8 #include <DataTypeUtils.hpp> 17 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
24 const std::vector<float>& inputData,
25 const std::vector<float>& outputData,
26 const std::vector<int32_t> vAxis,
28 bool keepDims =
false)
31 auto inputTensor = ConvertToDataType<ArmnnType>(inputData, inputTensorInfo);
33 std::vector<float> actualOutput(outputTensorInfo.
GetNumElements());
35 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
36 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
39 std::vector<uint32_t> updated_idx;
40 uint32_t resolvedAxis = 0;
41 for (uint32_t i = 0; i < vAxis.size(); ++i)
45 resolvedAxis = inputTensorInfo.
GetNumDimensions() +
static_cast<uint32_t
>(vAxis[i]);
48 resolvedAxis =
static_cast<uint32_t
>(vAxis[i]);
51 updated_idx.push_back(resolvedAxis);
59 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
60 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
66 inputHandle->Allocate();
67 outputHandle->Allocate();
77 outputHandle->GetShape(),
83 template<armnn::DataType ArmnnType,
typename T>
94 if (armnn::IsQuantizedType<T>())
102 std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
103 std::vector<float> outputValues({ 34.0f });
105 return ReduceTestCommon<ArmnnType>(workloadFactory,
116 template<armnn::DataType ArmnnType,
typename T>
127 if (armnn::IsQuantizedType<T>())
135 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
136 5.0f, 6.0f, 7.0f, 8.0f,
138 10.0f, 20.0f, 30.0f, 40.0f,
139 50.0f, 60.0f, 70.0f, 80.0f,
141 100.0f, 200.0f, 300.0f, 400.0f,
142 500.0f, 600.0f, 700.0f, 800.0f });
143 std::vector<float> outputValues({ 111.0f, 222.0f, 333.0f, 444.0f,
144 555.0f, 666.0f, 777.0f, 888.0f });
146 return ReduceTestCommon<ArmnnType>(workloadFactory,
157 template<armnn::DataType ArmnnType,
typename T>
168 if (armnn::IsQuantizedType<T>())
176 std::vector<float> inputValues( {7, 8, 6, 1,
201 std::vector<float> outputValues({ 28.0f, 35.0f, 30.0f, 27.0f,
202 27.0f, 31.0f, 31.0f, 24.0f,
203 35.0f, 32.0f, 29.0f, 44.0f});
205 return ReduceTestCommon<ArmnnType>(workloadFactory,
216 template<armnn::DataType ArmnnType,
typename T>
227 if (armnn::IsQuantizedType<T>())
235 std::vector<float> inputValues( {7, 8, 6, 1,
260 std::vector<float> outputValues({ 22.0f, 17.0f, 24.0f,
267 13.0f, 17.0f, 23.0f});
269 return ReduceTestCommon<ArmnnType>(workloadFactory,
281 template<armnn::DataType ArmnnType,
typename T>
292 if (armnn::IsQuantizedType<T>())
300 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
301 5.0f, 6.0f, 7.0f, 8.0f,
303 10.0f, 20.0f, 30.0f, 40.0f,
304 50.0f, 60.0f, 70.0f, 80.0f,
306 100.0f, 200.0f, 300.0f, 400.0f,
307 500.0f, 600.0f, 700.0f, 800.0f });
308 std::vector<float> outputValues({ 666.0f, 888.0f, 1110.0f, 1332.0f });
310 return ReduceTestCommon<ArmnnType>(workloadFactory,
324 ReduceSumSimpleTest<armnn::DataType::Float32>(
330 ReduceSumSingleAxisTest1<armnn::DataType::Float32>(
336 ReduceSumSingleAxisTest2<armnn::DataType::Float32>(
342 ReduceSumSingleAxisTest3<armnn::DataType::Float32>(
348 ReduceSumMultipleAxisTest<armnn::DataType::Float32>(
const TensorShape & GetShape() const
LayerTestResult< float, 4 > ReduceSumMultipleAxisTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ReduceSumSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ReduceSumSingleAxisTest1(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
bool m_KeepDims
if true then output shape has no change.
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
void IgnoreUnused(Ts &&...)
LayerDescriptor m_Parameters
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void SetQuantizationScale(float scale)
LayerTestResult< float, 4 > ReduceSumSingleAxisTest3(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
unsigned int GetNumDimensions() const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< float, 4 > ReduceSumSingleAxisTest2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
unsigned int GetNumElements() const