ArmNN
 21.02
RefStridedSliceWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "RefWorkloadUtils.hpp"
8 #include "StridedSlice.hpp"
9 
10 namespace armnn
11 {
12 
14  const WorkloadInfo& info)
15  : BaseWorkload(descriptor, info)
16 {}
17 
19 {
20  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefStridedSliceWorkload_Execute");
21 
22  const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
23  const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
24 
25  DataType inputDataType = inputInfo.GetDataType();
26  DataType outputDataType = outputInfo.GetDataType();
27 
28  ARMNN_ASSERT(inputDataType == outputDataType);
29  IgnoreUnused(outputDataType);
30 
31  StridedSlice(inputInfo,
33  m_Data.m_Inputs[0]->Map(),
34  m_Data.m_Outputs[0]->Map(),
35  GetDataTypeSize(inputDataType));
36 }
37 
38 } // namespace armnn
CPU Execution: Reference C++ kernels.
const StridedSliceQueueDescriptor m_Data
Definition: Workload.hpp:46
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:173
DataType
Definition: Types.hpp:32
RefStridedSliceWorkload(const StridedSliceQueueDescriptor &descriptor, const WorkloadInfo &info)
DataType GetDataType() const
Definition: Tensor.hpp:194
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
std::vector< ITensorHandle * > m_Outputs
Contains information about inputs and outputs to a layer.
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
constexpr unsigned int GetDataTypeSize(DataType dataType)
Definition: TypesUtils.hpp:126