From 68c2c903e94a96b1663cffa3e5981a5869dc8b92 Mon Sep 17 00:00:00 2001 From: Nikhil Raj Date: Thu, 19 Sep 2019 11:21:11 +0100 Subject: IVGCVSW-3723 Adding reference workload support for ArgMinMax Change-Id: I65209ecec4e3abf808163239748d6e830568c2e3 Signed-off-by: Nikhil Raj --- src/backends/backendsCommon/WorkloadData.cpp | 7 +- src/backends/backendsCommon/common.mk | 1 + src/backends/backendsCommon/test/CMakeLists.txt | 2 + src/backends/backendsCommon/test/LayerTests.hpp | 1 + .../test/layerTests/ArgMinMaxTestImpl.cpp | 252 +++++++++++++++++++++ .../test/layerTests/ArgMinMaxTestImpl.hpp | 29 +++ src/backends/reference/RefLayerSupport.cpp | 23 ++ src/backends/reference/RefLayerSupport.hpp | 5 + src/backends/reference/RefWorkloadFactory.cpp | 6 + src/backends/reference/RefWorkloadFactory.hpp | 3 + src/backends/reference/backend.mk | 2 + src/backends/reference/test/RefLayerTests.cpp | 16 ++ src/backends/reference/workloads/CMakeLists.txt | 4 + .../reference/workloads/RefArgMinMaxWorkload.cpp | 38 ++++ .../reference/workloads/RefArgMinMaxWorkload.hpp | 21 ++ src/backends/reference/workloads/RefWorkloads.hpp | 2 + 16 files changed, 410 insertions(+), 2 deletions(-) create mode 100644 src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp create mode 100644 src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.hpp create mode 100644 src/backends/reference/workloads/RefArgMinMaxWorkload.cpp create mode 100644 src/backends/reference/workloads/RefArgMinMaxWorkload.hpp (limited to 'src/backends') diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index 136c196e1b..c8c4f9aae4 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -471,6 +471,11 @@ void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0]; const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0]; + if (outputTensorInfo.GetDataType() != DataType::Signed32) + { + throw InvalidArgumentException(descriptorName + ": Output of ArgMinMax layer must be Int32."); + } + std::vector supportedTypes = { DataType::Float16, @@ -480,8 +485,6 @@ void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const }; ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName); - ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); - ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output"); } void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk index 0943a83eb1..abf924aca5 100644 --- a/src/backends/backendsCommon/common.mk +++ b/src/backends/backendsCommon/common.mk @@ -37,6 +37,7 @@ COMMON_TEST_SOURCES := \ test/layerTests/AbsTestImpl.cpp \ test/layerTests/ActivationTestImpl.cpp \ test/layerTests/AdditionTestImpl.cpp \ + test/layerTests/ArgMinMaxTestImpl.cpp \ test/layerTests/BatchNormalizationTestImpl.cpp \ test/layerTests/ConcatTestImpl.cpp \ test/layerTests/ConstantTestImpl.cpp \ diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index ef293d490b..0cebf90319 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -46,6 +46,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources layerTests/ActivationTestImpl.hpp layerTests/AdditionTestImpl.cpp layerTests/AdditionTestImpl.hpp + layerTests/ArgMinMaxTestImpl.cpp + layerTests/ArgMinMaxTestImpl.hpp layerTests/BatchNormalizationTestImpl.cpp layerTests/BatchNormalizationTestImpl.hpp layerTests/BatchToSpaceNdTestImpl.hpp diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index f48ae436cb..14ff26610f 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp new file mode 100644 index 0000000000..9c07599387 --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp @@ -0,0 +1,252 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ArgMinMaxTestImpl.hpp" + +#include + +#include +#include +#include + +#include + +namespace +{ + +template> +LayerTestResult ArgMinMaxTestCommon( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + armnn::ArgMinMaxFunction argMinMaxFunction, + const armnn::TensorInfo inputTensorInfo, + const armnn::TensorInfo outputTensorInfo, + const std::vector& inputData, + const std::vector& outputData, + int axis = 3) +{ + auto inputTensor = MakeTensor(inputTensorInfo, ConvertToDataType(inputData, inputTensorInfo)); + + LayerTestResult result(outputTensorInfo); + + result.outputExpected = MakeTensor(outputTensorInfo, outputData); + + std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ArgMinMaxQueueDescriptor descriptor; + descriptor.m_Parameters.m_Function = argMinMaxFunction; + descriptor.m_Parameters.m_Axis = axis; + armnn::WorkloadInfo info; + + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateArgMinMax(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]); + + workload->PostAllocationConfigure(); + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get()); + + return result; + +} + +} // namespace + +template +LayerTestResult ArgMaxSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + const armnn::TensorShape inputShape{ 1, 1, 1, 5 }; + const armnn::TensorShape outputShape{ 1, 1, 1 }; + + armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); + + if(armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(1.0f); + inputTensorInfo.SetQuantizationOffset(0); + } + + armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32); + + std::vector inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f }); + std::vector outputValues({ 3 }); + + return ArgMinMaxTestCommon(workloadFactory, memoryManager, + armnn::ArgMinMaxFunction::Max, + inputTensorInfo, outputTensorInfo, + inputValues, outputValues, 3); +} + +template +LayerTestResult ArgMinSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + const armnn::TensorShape inputShape{ 1, 1, 1, 5 }; + const armnn::TensorShape outputShape{ 1, 1, 1 }; + + armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); + + if(armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(1.0f); + inputTensorInfo.SetQuantizationOffset(0); + } + + armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32); + + std::vector inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f }); + std::vector outputValues({ 1 }); + + return ArgMinMaxTestCommon(workloadFactory, memoryManager, + armnn::ArgMinMaxFunction::Min, + inputTensorInfo, outputTensorInfo, + inputValues, outputValues, 3); +} + +template +LayerTestResult ArgMinChannel4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + const armnn::TensorShape inputShape{ 1, 3, 2, 4}; + const armnn::TensorShape outputShape{ 1, 2, 4 }; // C=1,2,4 H =1,3,4 W=1,3,2 + + armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); + + if(armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(1.0f); + inputTensorInfo.SetQuantizationOffset(0); + } + + armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32); + + std::vector inputValues({ 1.0f, 2.0f, 3.0f, 4.0f, + 5.0f, 6.0f, 7.0f, 8.0f, + + 10.0f, 20.0f, 30.0f, 40.0f, + 50.0f, 60.0f, 70.0f, 80.0f, + + 100.0f, 200.0f, 300.0f, 400.0f, + 500.0f, 600.0f, 700.0f, 800.0f }); + + std::vector outputValues({ 0, 0, 0, 0, + 0, 0, 0, 0 }); + + return ArgMinMaxTestCommon(workloadFactory, memoryManager, + armnn::ArgMinMaxFunction::Min, + inputTensorInfo, outputTensorInfo, + inputValues, outputValues, 1); +} + +template +LayerTestResult ArgMaxChannel4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) +{ + const armnn::TensorShape inputShape{ 1, 3, 2, 4}; + const armnn::TensorShape outputShape{ 1, 2, 4 }; + + armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); + + if(armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(1.0f); + inputTensorInfo.SetQuantizationOffset(0); + } + + armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32); + + std::vector inputValues({ 1.0f, 2.0f, 3.0f, 4.0f, + 5.0f, 6.0f, 7.0f, 8.0f, + + 10.0f, 20.0f, 30.0f, 40.0f, + 50.0f, 60.0f, 70.0f, 80.0f, + + 100.0f, 200.0f, 300.0f, 400.0f, + 500.0f, 600.0f, 700.0f, 800.0f }); + + std::vector outputValues({ 2, 2, 2, 2, + 2, 2, 2, 2 }); + + return ArgMinMaxTestCommon(workloadFactory, memoryManager, + armnn::ArgMinMaxFunction::Max, + inputTensorInfo, outputTensorInfo, + inputValues, outputValues, 1); +} + + +// Explicit template specializations + +template LayerTestResult +ArgMaxSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMinSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMaxSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMinSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMaxSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMinSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMinChannel4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMinChannel4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMinChannel4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMaxChannel4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMaxChannel4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template LayerTestResult +ArgMaxChannel4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); \ No newline at end of file diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.hpp new file mode 100644 index 0000000000..79d77d41b2 --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.hpp @@ -0,0 +1,29 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "LayerTestResult.hpp" + +#include + +#include +#include + +template> +LayerTestResult ArgMaxSimpleTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template> +LayerTestResult ArgMinSimpleTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template> +LayerTestResult ArgMinChannel4dTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); + +template> +LayerTestResult ArgMaxChannel4dTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager); \ No newline at end of file diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 572f617636..14183a7b68 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -192,6 +192,29 @@ bool RefLayerSupport::IsAdditionSupported(const TensorInfo& input0, return supported; } +bool RefLayerSupport::IsArgMinMaxSupported(const armnn::TensorInfo &input, const armnn::TensorInfo &output, + const armnn::ArgMinMaxDescriptor &descriptor, + armnn::Optional reasonIfUnsupported) const +{ + ignore_unused(descriptor); + + std::array supportedTypes = + { + DataType::Float32, + DataType::QuantisedAsymm8, + DataType::QuantisedSymm16 + }; + + bool supported = true; + + supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported, + "Reference ArgMinMax: input is not a supported type."); + supported &= CheckSupportRule(TypeIs(output, DataType::Signed32), reasonIfUnsupported, + "Reference ArgMinMax: output type not supported"); + + return supported; +} + bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input, const TensorInfo& output, const TensorInfo& mean, diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index 8200058633..9e8c914512 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -26,6 +26,11 @@ public: const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsArgMinMaxSupported(const TensorInfo& input, + const TensorInfo& output, + const ArgMinMaxDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const override; + bool IsBatchNormalizationSupported(const TensorInfo& input, const TensorInfo& output, const TensorInfo& mean, diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index 055c8da600..346fd691f2 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -134,6 +134,12 @@ std::unique_ptr RefWorkloadFactory::CreateActivation(const Activation return std::make_unique(descriptor, info); } +std::unique_ptr RefWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor, + const WorkloadInfo& info) const +{ + return std::make_unique(descriptor, info); +} + std::unique_ptr RefWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info) const { diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp index 2c40053f73..606da82d32 100644 --- a/src/backends/reference/RefWorkloadFactory.hpp +++ b/src/backends/reference/RefWorkloadFactory.hpp @@ -67,6 +67,9 @@ public: std::unique_ptr CreateActivation(const ActivationQueueDescriptor& descriptor, const WorkloadInfo& info) const override; + std::unique_ptr CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor, + const WorkloadInfo& info) const override; + std::unique_ptr CreateSoftmax(const SoftmaxQueueDescriptor& descriptor, const WorkloadInfo& info) const override; diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk index b2ec7488e2..fd0df27907 100644 --- a/src/backends/reference/backend.mk +++ b/src/backends/reference/backend.mk @@ -23,6 +23,7 @@ BACKEND_SOURCES := \ RefTensorHandleFactory.cpp \ workloads/Abs.cpp \ workloads/Activation.cpp \ + workloads/ArgMinMax.cpp \ workloads/BatchNormImpl.cpp \ workloads/BatchToSpaceNd.cpp \ workloads/Broadcast.cpp \ @@ -40,6 +41,7 @@ BACKEND_SOURCES := \ workloads/PreluImpl.cpp \ workloads/RefAbsWorkload.cpp \ workloads/RefActivationWorkload.cpp \ + workloads/RefArgMinMaxWorkload.cpp \ workloads/RefBatchNormalizationWorkload.cpp \ workloads/RefBatchToSpaceNdWorkload.cpp \ workloads/RefConcatWorkload.cpp \ diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index afeb8a458a..a5164f072b 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -1022,6 +1022,22 @@ ARMNN_AUTO_TEST_CASE(MeanVts3QuantisedSymm16, MeanVts3Test) +ARMNN_AUTO_TEST_CASE(ArgMinFloat32, ArgMinSimpleTest) +ARMNN_AUTO_TEST_CASE(ArgMinChannel4dFloat32, ArgMinChannel4dTest) +ARMNN_AUTO_TEST_CASE(ArgMaxChannel4dFloat32, ArgMaxChannel4dTest) + +ARMNN_AUTO_TEST_CASE(ArgMaxQuantisedAsymm8, ArgMaxSimpleTest) +ARMNN_AUTO_TEST_CASE(ArgMinQuantisedAsymm8, ArgMinSimpleTest) +ARMNN_AUTO_TEST_CASE(ArgMinChannel4dQuantisedAsymm8, ArgMinChannel4dTest) +ARMNN_AUTO_TEST_CASE(ArgMaxChannel4dQuantisedAsymm8, ArgMaxChannel4dTest) + +ARMNN_AUTO_TEST_CASE(ArgMaxQuantisedSymm16, ArgMaxSimpleTest) +ARMNN_AUTO_TEST_CASE(ArgMinQuantisedSymm16, ArgMinSimpleTest) +ARMNN_AUTO_TEST_CASE(ArgMinChannel4dQuantisedSymm16, ArgMinChannel4dTest) +ARMNN_AUTO_TEST_CASE(ArgMaxChannel4dQuantisedSymm16, ArgMaxChannel4dTest) + // Space To Batch Nd ARMNN_AUTO_TEST_CASE(SpaceToBatchNdSimpleFloat32, SpaceToBatchNdSimpleFloat32Test) ARMNN_AUTO_TEST_CASE(SpaceToBatchNdMultiChannelsFloat32, SpaceToBatchNdMultiChannelsFloat32Test) diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index 30770956ba..83444eda1f 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -6,6 +6,8 @@ list(APPEND armnnRefBackendWorkloads_sources Abs.cpp Abs.hpp + ArgMinMax.cpp + ArgMinMax.hpp Activation.cpp Activation.hpp ArgMinMax.cpp @@ -49,6 +51,8 @@ list(APPEND armnnRefBackendWorkloads_sources RefAbsWorkload.hpp RefActivationWorkload.cpp RefActivationWorkload.hpp + RefArgMinMaxWorkload.cpp + RefArgMinMaxWorkload.hpp RefBatchNormalizationWorkload.cpp RefBatchNormalizationWorkload.hpp RefBatchToSpaceNdWorkload.cpp diff --git a/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp b/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp new file mode 100644 index 0000000000..5f1eb73b61 --- /dev/null +++ b/src/backends/reference/workloads/RefArgMinMaxWorkload.cpp @@ -0,0 +1,38 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefArgMinMaxWorkload.hpp" + +#include "ArgMinMax.hpp" +#include "RefWorkloadUtils.hpp" +#include "Decoders.hpp" +#include "Encoders.hpp" +#include "Profiling.hpp" + +namespace armnn +{ +RefArgMinMaxWorkload::RefArgMinMaxWorkload( + const ArgMinMaxQueueDescriptor& descriptor, + const WorkloadInfo& info) + : BaseWorkload(descriptor, info) {} + +void RefArgMinMaxWorkload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefArgMinMaxWorkload_Execute"); + + const TensorInfo &inputTensorInfo = GetTensorInfo(m_Data.m_Inputs[0]); + + std::unique_ptr> decoderPtr = MakeDecoder(inputTensorInfo, m_Data.m_Inputs[0]->Map()); + Decoder &decoder = *decoderPtr; + + const TensorInfo &outputTensorInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + int32_t* output = GetOutputTensorData(0, m_Data); + + ArgMinMax(decoder, output, inputTensorInfo, outputTensorInfo, m_Data.m_Parameters.m_Function, + m_Data.m_Parameters.m_Axis); +} + +} //namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/RefArgMinMaxWorkload.hpp b/src/backends/reference/workloads/RefArgMinMaxWorkload.hpp new file mode 100644 index 0000000000..97b70772d1 --- /dev/null +++ b/src/backends/reference/workloads/RefArgMinMaxWorkload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2019 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include + +namespace armnn +{ +class RefArgMinMaxWorkload : public BaseWorkload +{ +public: + explicit RefArgMinMaxWorkload(const ArgMinMaxQueueDescriptor& descriptor, + const WorkloadInfo& info); + + virtual void Execute() const override; +}; +} //namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index 959226adf6..b4721b11aa 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -7,6 +7,7 @@ #include "Abs.hpp" #include "Activation.hpp" +#include "ArgMinMax.hpp" #include "BatchNormImpl.hpp" #include "ConvImpl.hpp" #include "Concatenate.hpp" @@ -16,6 +17,7 @@ #include "Pooling2d.hpp" #include "RefAbsWorkload.hpp" #include "RefActivationWorkload.hpp" +#include "RefArgMinMaxWorkload.hpp" #include "RefBatchNormalizationWorkload.hpp" #include "RefBatchToSpaceNdWorkload.hpp" #include "RefConvolution2dWorkload.hpp" -- cgit v1.2.1