ArmNN
 21.08
RefStridedSliceWorkload.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
7 #include "RefWorkloadUtils.hpp"
8 #include "StridedSlice.hpp"
9 
10 namespace armnn
11 {
12 
14  const WorkloadInfo& info)
15  : BaseWorkload(descriptor, info)
16 {}
17 
19 {
21 }
22 
24 {
25  Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
26 }
27 
28 void RefStridedSliceWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
29 {
30  ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefStridedSliceWorkload_Execute");
31 
32  const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
33  const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
34 
35  DataType inputDataType = inputInfo.GetDataType();
36  DataType outputDataType = outputInfo.GetDataType();
37 
38  ARMNN_ASSERT(inputDataType == outputDataType);
39  IgnoreUnused(outputDataType);
40 
41  StridedSlice(inputInfo,
43  inputs[0]->Map(),
44  outputs[0]->Map(),
45  GetDataTypeSize(inputDataType));
46 }
47 
48 } // namespace armnn
CPU Execution: Reference C++ kernels.
Copyright (c) 2021 ARM Limited and Contributors.
void IgnoreUnused(Ts &&...)
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition: Profiling.hpp:219
DataType
Definition: Types.hpp:35
RefStridedSliceWorkload(const StridedSliceQueueDescriptor &descriptor, const WorkloadInfo &info)
void ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor) override
DataType GetDataType() const
Definition: Tensor.hpp:198
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
std::vector< ITensorHandle * > m_Outputs
Contains information about TensorInfos of a layer.
std::vector< ITensorHandle * > m_Inputs
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
constexpr unsigned int GetDataTypeSize(DataType dataType)
Definition: TypesUtils.hpp:139