18 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
25 const std::vector<float>& inputData,
26 const std::vector<int32_t>& outputData,
30 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
33 result.outputExpected = MakeTensor<int32_t, 3>(outputTensorInfo, outputData);
35 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
36 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
43 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
44 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
46 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateArgMinMax(descriptor, info);
48 inputHandle->Allocate();
49 outputHandle->Allocate();
53 workload->PostAllocationConfigure();
63 template<armnn::DataType ArmnnType,
typename T>
73 if (armnn::IsQuantizedType<T>())
81 std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
82 std::vector<int32_t> outputValues({ 3 });
84 return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
86 inputTensorInfo, outputTensorInfo,
87 inputValues, outputValues, -1);
90 template<armnn::DataType ArmnnType,
typename T>
100 if (armnn::IsQuantizedType<T>())
108 std::vector<float> inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
109 std::vector<int32_t> outputValues({ 1 });
111 return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
113 inputTensorInfo, outputTensorInfo,
114 inputValues, outputValues, 3);
117 template<armnn::DataType ArmnnType,
typename T>
127 if (armnn::IsQuantizedType<T>())
135 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
136 5.0f, 6.0f, 7.0f, 8.0f,
138 10.0f, 20.0f, 30.0f, 40.0f,
139 50.0f, 60.0f, 70.0f, 80.0f,
141 100.0f, 200.0f, 300.0f, 400.0f,
142 500.0f, 600.0f, 700.0f, 800.0f });
143 std::vector<int32_t> outputValues({ 0, 0, 0, 0,
146 return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
148 inputTensorInfo, outputTensorInfo,
149 inputValues, outputValues, 1);
152 template<armnn::DataType ArmnnType,
typename T>
162 if (armnn::IsQuantizedType<T>())
170 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
171 5.0f, 6.0f, 7.0f, 8.0f,
173 10.0f, 20.0f, 30.0f, 40.0f,
174 50.0f, 60.0f, 70.0f, 80.0f,
176 100.0f, 200.0f, 300.0f, 400.0f,
177 500.0f, 600.0f, 700.0f, 800.0f });
178 std::vector<int32_t> outputValues({ 2, 2, 2, 2,
181 return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
183 inputTensorInfo, outputTensorInfo,
184 inputValues, outputValues, 1);
187 template<armnn::DataType ArmnnType,
typename T>
198 if (armnn::IsQuantizedType<T>())
204 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
205 5.0f, 6.0f, 7.0f, 8.0f,
207 10.0f, 20.0f, 30.0f, 40.0f,
208 50.0f, 60.0f, 70.0f, 80.0f,
210 100.0f, 200.0f, 300.0f, 400.0f,
211 500.0f, 600.0f, 700.0f, 800.0f });
212 std::vector<int32_t> outputValues({ 1, 1, 1, 1,
216 return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
218 inputTensorInfo, outputTensorInfo,
219 inputValues, outputValues, 2);
222 template<armnn::DataType ArmnnType,
typename T>
233 if (armnn::IsQuantizedType<T>())
239 std::vector<float> inputValues({ 1.0f, 2.0f, 3.0f, 4.0f,
240 5.0f, 6.0f, 7.0f, 8.0f,
242 10.0f, 20.0f, 30.0f, 40.0f,
243 50.0f, 60.0f, 70.0f, 80.0f,
245 100.0f, 200.0f, 300.0f, 400.0f,
246 500.0f, 600.0f, 700.0f, 800.0f });
247 std::vector<int32_t> outputValues({ 0, 0,
251 return ArgMinMaxTestCommon<ArmnnType>(workloadFactory, memoryManager,
253 inputTensorInfo, outputTensorInfo,
254 inputValues, outputValues, 3);
261 ArgMaxSimpleTest<armnn::DataType::Float32>(
266 ArgMaxSimpleTest<armnn::DataType::QAsymmU8>(
271 ArgMaxSimpleTest<armnn::DataType::QSymmS16>(
276 ArgMaxSimpleTest<armnn::DataType::Signed32>(
281 ArgMinSimpleTest<armnn::DataType::Float32>(
286 ArgMinSimpleTest<armnn::DataType::QAsymmU8>(
291 ArgMinSimpleTest<armnn::DataType::QSymmS16>(
296 ArgMinSimpleTest<armnn::DataType::Signed32>(
301 ArgMinChannelTest<armnn::DataType::Float32>(
306 ArgMinChannelTest<armnn::DataType::QAsymmU8>(
311 ArgMinChannelTest<armnn::DataType::QSymmS16>(
316 ArgMinChannelTest<armnn::DataType::Signed32>(
321 ArgMaxChannelTest<armnn::DataType::Float32>(
326 ArgMaxChannelTest<armnn::DataType::QAsymmU8>(
331 ArgMaxChannelTest<armnn::DataType::QSymmS16>(
336 ArgMaxChannelTest<armnn::DataType::Signed32>(
341 ArgMaxHeightTest<armnn::DataType::Float32>(
346 ArgMaxHeightTest<armnn::DataType::Signed32>(
351 ArgMaxHeightTest<armnn::DataType::QAsymmU8>(
356 ArgMinWidthTest<armnn::DataType::Float32>(
361 ArgMinWidthTest<armnn::DataType::Signed32>(
366 ArgMinWidthTest<armnn::DataType::QAsymmU8>(
LayerTestResult< int32_t, 3 > ArgMinSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
LayerTestResult< int32_t, 3 > ArgMinChannelTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int32_t, 3 > ArgMaxChannelTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void IgnoreUnused(Ts &&...)
LayerDescriptor m_Parameters
LayerTestResult< int32_t, 3 > ArgMaxHeightTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< int32_t, 3 > ArgMinWidthTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int32_t, 3 > ArgMaxSimpleTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
int m_Axis
Axis to reduce across the input tensor.
void SetQuantizationOffset(int32_t offset)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)