17 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
24 const std::vector<float>& inputData,
25 const std::vector<float>& outputData,
26 const std::vector<int32_t> vAxis,
28 bool keepDims =
false)
31 auto inputTensor = ConvertToDataType<ArmnnType>(inputData, inputTensorInfo);
33 std::vector<float> actualOutput(outputTensorInfo.
GetNumElements());
35 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
36 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
39 std::vector<uint32_t> updated_idx;
40 uint32_t resolvedAxis = 0;
41 for (uint32_t i = 0; i < vAxis.size(); ++i)
45 resolvedAxis = inputTensorInfo.
GetNumDimensions() +
static_cast<uint32_t
>(vAxis[i]);
48 resolvedAxis =
static_cast<uint32_t
>(vAxis[i]);
51 updated_idx.push_back(resolvedAxis);
59 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
60 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
62 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateReduce(descriptor, info);
64 inputHandle->Allocate();
65 outputHandle->Allocate();
75 outputHandle->GetShape(),
81 template<armnn::DataType ArmnnType,
typename T>
92 if (armnn::IsQuantizedType<T>())
100 std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
101 std::vector<float> outputValues({ 7200.0f });
103 return ReduceTestCommon<ArmnnType>(workloadFactory,
114 template<armnn::DataType ArmnnType,
typename T>
125 if (armnn::IsQuantizedType<T>())
133 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
134 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f,
135 100.0f, 200.0f, 300.0f, 400.0f, 500.0f, 600.0f, 700.0f, 800.0f
137 std::vector<float> outputValues({ 1000.0f, 8000.0f, 27000.0f, 64000.0f, 125000.0f, 216000.0f, 343000.0f, 512000.0f
140 return ReduceTestCommon<ArmnnType>(workloadFactory,
151 template<armnn::DataType ArmnnType,
typename T>
162 if (armnn::IsQuantizedType<T>())
170 std::vector<float> inputValues( {7, 8, 6, 1,
195 std::vector<float> outputValues({ 2940.f, 18432.f, 9408.f, 1568.f,
196 2520.f, 4608.f, 10080.f, 1512.f,
197 30240.f, 8064.f, 3584.f, 150528.f });
199 return ReduceTestCommon<ArmnnType>(workloadFactory,
210 template<armnn::DataType ArmnnType,
typename T>
221 if (armnn::IsQuantizedType<T>())
229 std::vector<float> inputValues({ 7, 8, 6, 1,
254 std::vector<float> outputValues({ 336.f, 56.f, 1029.f,
255 1344.f, 504.f, 2560.f,
261 56.f, 270.f, 384.f });
263 return ReduceTestCommon<ArmnnType>(workloadFactory,
275 template<armnn::DataType ArmnnType,
typename T>
286 if (armnn::IsQuantizedType<T>())
294 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
295 5.0f, 6.0f, 7.0f, 8.0f,
297 10.0f, 20.0f, 30.0f, 40.0f,
298 50.0f, 60.0f, 70.0f, 80.0f,
300 11.0f, 22.0f, 33.0f, 44.0f,
301 55.0f, 66.0f, 77.0f, 88.0f });
302 std::vector<float> outputValues({ 1512500.f, 20908800.f, 112058100.f, 396492800.f });
304 return ReduceTestCommon<ArmnnType>(workloadFactory,
318 ReduceProdSimpleTest<armnn::DataType::Float32>(
324 ReduceProdSingleAxisTest1<armnn::DataType::Float32>(
330 ReduceProdSingleAxisTest2<armnn::DataType::Float32>(
336 ReduceProdSingleAxisTest3<armnn::DataType::Float32>(
342 ReduceProdMultipleAxisTest<armnn::DataType::Float32>(
const TensorShape & GetShape() const
bool m_KeepDims
if true then output shape has no change.
LayerTestResult< float, 4 > ReduceProdMultipleAxisTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
void IgnoreUnused(Ts &&...)
LayerTestResult< float, 4 > ReduceProdSingleAxisTest2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerDescriptor m_Parameters
LayerTestResult< float, 4 > ReduceProdSingleAxisTest1(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 4 > ReduceProdSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
virtual std::unique_ptr< IWorkload > CreateReduce(const ReduceQueueDescriptor &descriptor, const WorkloadInfo &info) const
void SetQuantizationScale(float scale)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
unsigned int GetNumDimensions() const
LayerTestResult< float, 4 > ReduceProdSingleAxisTest3(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
unsigned int GetNumElements() const
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)