19 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
26 const std::vector<float>& inputData,
27 const std::vector<float>& outputData,
28 const std::vector<int32_t> vAxis,
30 bool keepDims =
false)
33 auto inputTensor = ConvertToDataType<ArmnnType>(inputData, inputTensorInfo);
35 std::vector<float> actualOutput(outputTensorInfo.
GetNumElements());
37 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
38 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
41 std::vector<uint32_t> updated_idx;
42 uint32_t resolvedAxis = 0;
43 for (uint32_t i = 0; i < vAxis.size(); ++i)
47 resolvedAxis = inputTensorInfo.
GetNumDimensions() +
static_cast<uint32_t
>(vAxis[i]);
50 resolvedAxis =
static_cast<uint32_t
>(vAxis[i]);
53 updated_idx.push_back(resolvedAxis);
61 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
62 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
64 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateReduce(descriptor, info);
66 inputHandle->Allocate();
67 outputHandle->Allocate();
77 outputHandle->GetShape(),
83 template<armnn::DataType ArmnnType,
typename T>
94 if (armnn::IsQuantizedType<T>())
102 std::vector<float> inputValues
104 1001.0f, 11.0f, 1003.0f,
105 10.0f, 1002.0f, 12.0f
107 std::vector<float> outputValues
109 1001.0f, 1002.0f, 1003.0f
112 return ReductionTestCommon<ArmnnType>(workloadFactory,
123 template<armnn::DataType ArmnnType,
typename T>
134 if (armnn::IsQuantizedType<T>())
142 std::vector<float> inputValues
144 1001.0f, 11.0f, 1003.0f,
145 10.0f, 1002.0f, 12.0f
147 std::vector<float> outputValues
152 return ReductionTestCommon<ArmnnType>(workloadFactory,
164 template<armnn::DataType ArmnnType,
typename T>
175 if (armnn::IsQuantizedType<T>())
183 std::vector<float> inputValues
189 std::vector<float> outputValues
194 return ReductionTestCommon<ArmnnType>(workloadFactory,
206 template<armnn::DataType ArmnnType,
typename T>
217 if (armnn::IsQuantizedType<T>())
225 std::vector<float> inputValues
227 1001.0f, 11.0f, 1003.0f,
228 10.0f, 1002.0f, 12.0f
230 std::vector<float> outputValues
235 return ReductionTestCommon<ArmnnType>(workloadFactory,
246 template<armnn::DataType ArmnnType,
typename T>
257 if (armnn::IsQuantizedType<T>())
265 std::vector<float> inputValues
267 1001.0f, 11.0f, 1003.0f,
268 10.0f, 1002.0f, 12.0f
270 std::vector<float> outputValues
275 return ReductionTestCommon<ArmnnType>(workloadFactory,
289 ReduceMaxSimpleTest<armnn::DataType::Float32>(
295 ReduceMaxNegativeAxisTest<armnn::DataType::Float32>(
301 ReduceMaxSimpleTest2<armnn::DataType::Float32>(
307 ReduceMinSimpleTest<armnn::DataType::Float32>(
313 ReduceMinNegativeAxisTest<armnn::DataType::Float32>(
const TensorShape & GetShape() const
bool m_KeepDims
if true then output shape has no change.
LayerTestResult< float, 4 > ReduceMaxSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
void IgnoreUnused(Ts &&...)
LayerDescriptor m_Parameters
LayerTestResult< float, 4 > ReduceMinNegativeAxisTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
virtual std::unique_ptr< IWorkload > CreateReduce(const ReduceQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< float, 4 > ReduceMinSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void SetQuantizationScale(float scale)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< float, 4 > ReduceMaxNegativeAxisTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
LayerTestResult< float, 4 > ReduceMaxSimpleTest2(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about TensorInfos of a layer.
void SetQuantizationOffset(int32_t offset)
unsigned int GetNumDimensions() const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
unsigned int GetNumElements() const
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)