From f195f03e095a5c4dc6880be11af64cab83b5c94b Mon Sep 17 00:00:00 2001 From: David Beck Date: Thu, 6 Sep 2018 16:46:34 +0100 Subject: IVGCVSW-1803 : add Ref Subtraction layer Change-Id: I4c019d626f9369245eca6d549bbe7a28e141f198 --- src/armnn/backends/RefLayerSupport.cpp | 8 +- src/armnn/backends/RefWorkloadFactory.cpp | 2 +- src/armnn/backends/RefWorkloads.hpp | 2 + .../RefWorkloads/RefSubtractionFloat32Workload.cpp | 31 ++++ .../RefWorkloads/RefSubtractionFloat32Workload.hpp | 21 +++ .../RefWorkloads/RefSubtractionUint8Workload.cpp | 41 ++++++ .../RefWorkloads/RefSubtractionUint8Workload.hpp | 21 +++ src/armnn/backends/RefWorkloads/Subtraction.cpp | 44 ++++++ src/armnn/backends/RefWorkloads/Subtraction.hpp | 20 +++ src/armnn/backends/test/LayerTests.cpp | 160 ++++++++++++++++++++- src/armnn/backends/test/LayerTests.hpp | 10 +- src/armnn/backends/test/Reference.cpp | 9 ++ 12 files changed, 364 insertions(+), 5 deletions(-) create mode 100644 src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.cpp create mode 100644 src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.hpp create mode 100644 src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp create mode 100644 src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.hpp create mode 100644 src/armnn/backends/RefWorkloads/Subtraction.cpp create mode 100644 src/armnn/backends/RefWorkloads/Subtraction.hpp (limited to 'src') diff --git a/src/armnn/backends/RefLayerSupport.cpp b/src/armnn/backends/RefLayerSupport.cpp index 5437574789..41f57f1677 100644 --- a/src/armnn/backends/RefLayerSupport.cpp +++ b/src/armnn/backends/RefLayerSupport.cpp @@ -135,8 +135,12 @@ bool IsSubtractionSupportedRef(const TensorInfo& input0, const TensorInfo& output, std::string* reasonIfUnsupported) { - // At the moment subtraction is not supported - return false; + ignore_unused(input1); + ignore_unused(output); + return IsSupportedForDataTypeRef(reasonIfUnsupported, + input0.GetDataType(), + &TrueFunc<>, + &TrueFunc<>); } bool IsFullyConnectedSupportedRef(const TensorInfo& input, diff --git a/src/armnn/backends/RefWorkloadFactory.cpp b/src/armnn/backends/RefWorkloadFactory.cpp index 4de9274eb8..92e2506935 100644 --- a/src/armnn/backends/RefWorkloadFactory.cpp +++ b/src/armnn/backends/RefWorkloadFactory.cpp @@ -230,7 +230,7 @@ std::unique_ptr RefWorkloadFactory::CreateDivision( std::unique_ptr RefWorkloadFactory::CreateSubtraction( const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const { - return MakeWorkload(descriptor, info); + return MakeWorkload(descriptor, info); } } // namespace armnn diff --git a/src/armnn/backends/RefWorkloads.hpp b/src/armnn/backends/RefWorkloads.hpp index 98385ad5ac..910610c72e 100644 --- a/src/armnn/backends/RefWorkloads.hpp +++ b/src/armnn/backends/RefWorkloads.hpp @@ -57,3 +57,5 @@ #include "backends/RefWorkloads/RefConvertFp32ToFp16Workload.hpp" #include "backends/RefWorkloads/RefDivisionFloat32Workload.hpp" #include "backends/RefWorkloads/RefDivisionUint8Workload.hpp" +#include "backends/RefWorkloads/RefSubtractionFloat32Workload.hpp" +#include "backends/RefWorkloads/RefSubtractionUint8Workload.hpp" diff --git a/src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.cpp b/src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.cpp new file mode 100644 index 0000000000..4440eedab7 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.cpp @@ -0,0 +1,31 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefSubtractionFloat32Workload.hpp" + +#include "Subtraction.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +namespace armnn +{ + +void RefSubtractionFloat32Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSubtractionFloat32Workload_Execute"); + + const TensorShape& inShape0 = GetTensorInfo(m_Data.m_Inputs[0]).GetShape(); + const TensorShape& inShape1 = GetTensorInfo(m_Data.m_Inputs[1]).GetShape(); + const TensorShape& outShape = GetTensorInfo(m_Data.m_Outputs[0]).GetShape(); + + const float* inData0 = GetInputTensorDataFloat(0, m_Data); + const float* inData1 = GetInputTensorDataFloat(1, m_Data); + float* outData = GetOutputTensorDataFloat(0, m_Data); + + Subtraction(inShape0, inShape1, outShape, inData0, inData1, outData); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.hpp b/src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.hpp new file mode 100644 index 0000000000..b3f5ed9474 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefSubtractionFloat32Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefSubtractionFloat32Workload : public Float32Workload +{ +public: + using Float32Workload::Float32Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp b/src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp new file mode 100644 index 0000000000..8066762e48 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.cpp @@ -0,0 +1,41 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefSubtractionUint8Workload.hpp" + +#include "Subtraction.hpp" +#include "RefWorkloadUtils.hpp" + +#include "Profiling.hpp" + +#include + +namespace armnn +{ + +void RefSubtractionUint8Workload::Execute() const +{ + ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSubtractionUint8Workload_Execute"); + + const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]); + const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]); + const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); + + auto dequant0 = Dequantize(GetInputTensorDataU8(0, m_Data), inputInfo0); + auto dequant1 = Dequantize(GetInputTensorDataU8(1, m_Data), inputInfo1); + + std::vector results(outputInfo.GetNumElements()); + + Subtraction(inputInfo0.GetShape(), + inputInfo1.GetShape(), + outputInfo.GetShape(), + dequant0.data(), + dequant1.data(), + results.data()); + + Quantize(GetOutputTensorDataU8(0, m_Data), results.data(), outputInfo); +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.hpp b/src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.hpp new file mode 100644 index 0000000000..582533253b --- /dev/null +++ b/src/armnn/backends/RefWorkloads/RefSubtractionUint8Workload.hpp @@ -0,0 +1,21 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "backends/Workload.hpp" +#include "backends/WorkloadData.hpp" + +namespace armnn +{ + +class RefSubtractionUint8Workload : public Uint8Workload +{ +public: + using Uint8Workload::Uint8Workload; + virtual void Execute() const override; +}; + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/Subtraction.cpp b/src/armnn/backends/RefWorkloads/Subtraction.cpp new file mode 100644 index 0000000000..f25c8adb1c --- /dev/null +++ b/src/armnn/backends/RefWorkloads/Subtraction.cpp @@ -0,0 +1,44 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "Subtraction.hpp" +#include "Broadcast.hpp" + +#include + +namespace +{ + +void ElementwiseSubtraction(unsigned int numElements, const float* inData0, const float* inData1, float* outData) +{ + for (unsigned int i = 0; i < numElements; ++i) + { + outData[i] = inData0[i] - inData1[i]; + } +} + +} // namespace + +namespace armnn +{ + +void Subtraction(const TensorShape& inShape0, + const TensorShape& inShape1, + const TensorShape& outShape, + const float* inData0, + const float* inData1, + float* outData) +{ + if (inShape0 == inShape1) + { + ElementwiseSubtraction(inShape0.GetNumElements(), inData0, inData1, outData); + } + else + { + BroadcastLoop(inShape0, inShape1, outShape).Unroll(std::minus(), 0, inData0, inData1, outData); + } +} + +} //namespace armnn diff --git a/src/armnn/backends/RefWorkloads/Subtraction.hpp b/src/armnn/backends/RefWorkloads/Subtraction.hpp new file mode 100644 index 0000000000..3956797185 --- /dev/null +++ b/src/armnn/backends/RefWorkloads/Subtraction.hpp @@ -0,0 +1,20 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include + +namespace armnn +{ + +void Subtraction(const TensorShape& inShape0, + const TensorShape& inShape1, + const TensorShape& outShape, + const float* inData0, + const float* inData1, + float* outData); + +} //namespace armnn diff --git a/src/armnn/backends/test/LayerTests.cpp b/src/armnn/backends/test/LayerTests.cpp index 8683f116cf..b39daf6bbf 100644 --- a/src/armnn/backends/test/LayerTests.cpp +++ b/src/armnn/backends/test/LayerTests.cpp @@ -1002,7 +1002,7 @@ LayerTestResult AdditionBroadcast1ElementUint8Test(armnn::IWorkloadF } LayerTestResult CompareAdditionTest(armnn::IWorkloadFactory& workloadFactory, - armnn::IWorkloadFactory& refWorkloadFactory) + armnn::IWorkloadFactory& refWorkloadFactory) { unsigned int batchSize = 4; unsigned int channels = 1; @@ -3935,6 +3935,164 @@ LayerTestResult MultiplicationBroadcast1DVectorUint8Test(armnn::IWor 0); } +namespace +{ +template +LayerTestResult SubtractionTestHelper(armnn::IWorkloadFactory& workloadFactory, + const unsigned int shape0[4], + const std::vector& values0, + float scale0, + int32_t offset0, + const unsigned int shape1[4], + const std::vector & values1, + float scale1, + int32_t offset1, + const unsigned int outShape[4], + const std::vector & outValues, + float outScale, + int32_t outOffset) +{ + auto dataType = (std::is_same::value ? + armnn::DataType::QuantisedAsymm8 : + armnn::DataType::Float32); + + armnn::TensorInfo inputTensorInfo0(4, shape0, dataType); + armnn::TensorInfo inputTensorInfo1(4, shape1, dataType); + armnn::TensorInfo outputTensorInfo(4, outShape, dataType); + + inputTensorInfo0.SetQuantizationScale(scale0); + inputTensorInfo0.SetQuantizationOffset(offset0); + + inputTensorInfo1.SetQuantizationScale(scale1); + inputTensorInfo1.SetQuantizationOffset(offset1); + + outputTensorInfo.SetQuantizationScale(outScale); + outputTensorInfo.SetQuantizationOffset(outOffset); + + auto input0 = MakeTensor(inputTensorInfo0, values0); + auto input1 = MakeTensor(inputTensorInfo1, values1); + + LayerTestResult result(outputTensorInfo); + result.outputExpected = MakeTensor(outputTensorInfo, outValues); + + std::unique_ptr inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0); + std::unique_ptr inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1); + std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); + + armnn::SubtractionQueueDescriptor data; + armnn::WorkloadInfo info; + AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get()); + AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get()); + AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateSubtraction(data, info); + + inputHandle0->Allocate(); + inputHandle1->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]); + CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]); + + workloadFactory.Finalize(); + workload->Execute(); + + CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get()); + + return result; +} +} // anonymous namespace + +LayerTestResult SubtractionUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + const unsigned int shape0[] = { 1, 1, 2, 2 }; + const unsigned int shape1[] = { 1, 1, 2, 2 }; + + std::vector input0({ 10, 12, 14, 16 }); + std::vector input1({ 1, 2, 1, 2 }); + std::vector output({ 3, 3, 5, 5 }); + + return SubtractionTestHelper(workloadFactory, + shape0, input0, 0.5f, 2, + shape1, input1, 1.0f, 0, + shape0, output, 1.0f, 0); +} + +LayerTestResult SubtractionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + const unsigned int shape0[] = { 1, 1, 2, 2 }; + const unsigned int shape1[] = { 1, 1, 1, 1 }; + + std::vector input0({ 10, 12, 14, 16 }); + std::vector input1({ 2 }); + std::vector output({ 5, 6, 7, 8 }); + + return SubtractionTestHelper(workloadFactory, + shape0, input0, 0.5f, 2, + shape1, input1, 1.0f, 0, + shape0, output, 1.0f, 3); +} + +LayerTestResult SubtractionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory) +{ + const unsigned int shape0[] = { 1, 1, 2, 2 }; + const unsigned int shape1[] = { 1, 1, 2, 1 }; + + std::vector input0({ 10, 12, 14, 16 }); + std::vector input1({ 2, 1 }); + std::vector output({ 8, 11, 12, 15 }); + + return SubtractionTestHelper(workloadFactory, + shape0, input0, 1.0f, 0, + shape1, input1, 1.0f, 0, + shape0, output, 1.0f, 0); +} + +LayerTestResult SubtractionTest(armnn::IWorkloadFactory& workloadFactory) +{ + const unsigned int shape0[] = { 1, 1, 2, 2 }; + const unsigned int shape1[] = { 1, 1, 2, 2 }; + + std::vector input0({ 1, 2, 3, 4 }); + std::vector input1({ 1, -1, 0, 2 }); + std::vector output({ 0, 3, 3, 2 }); + + return SubtractionTestHelper(workloadFactory, + shape0, input0, 1.0f, 0, + shape1, input1, 1.0f, 0, + shape0, output, 1.0f, 0); +} + +LayerTestResult SubtractionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory) +{ + const unsigned int shape0[] = { 1, 1, 2, 2 }; + const unsigned int shape1[] = { 1, 1, 1, 1 }; + + std::vector input0({ 1, 2, 3, 4 }); + std::vector input1({ 10 }); + std::vector output({ -9, -8, -7, -6 }); + + return SubtractionTestHelper(workloadFactory, + shape0, input0, 1.0f, 0, + shape1, input1, 1.0f, 0, + shape0, output, 1.0f, 0); +} + +LayerTestResult SubtractionBroadcastTest(armnn::IWorkloadFactory& workloadFactory) +{ + const unsigned int shape0[] = { 1, 1, 2, 2 }; + const unsigned int shape1[] = { 1, 1, 1, 2 }; + + std::vector input0({ 1, 2, 3, 4 }); + std::vector input1({ 10, -5 }); + std::vector output({ -9, 7, -7, 9 }); + + return SubtractionTestHelper(workloadFactory, + shape0, input0, 1.0f, 0, + shape1, input1, 1.0f, 0, + shape0, output, 1.0f, 0); +} + LayerTestResult ResizeBilinearNopUint8Test(armnn::IWorkloadFactory& workloadFactory) { constexpr unsigned int inputWidth = 4; diff --git a/src/armnn/backends/test/LayerTests.hpp b/src/armnn/backends/test/LayerTests.hpp index 06d789e783..5ca4c49c88 100644 --- a/src/armnn/backends/test/LayerTests.hpp +++ b/src/armnn/backends/test/LayerTests.hpp @@ -185,7 +185,11 @@ LayerTestResult AdditionBroadcast1ElementTest(armnn::IWorkloadFactory& LayerTestResult AdditionBroadcastTest(armnn::IWorkloadFactory& workloadFactory); LayerTestResult CompareAdditionTest(armnn::IWorkloadFactory& workloadFactory, - armnn::IWorkloadFactory& refWorkloadFactory); + armnn::IWorkloadFactory& refWorkloadFactory); + +LayerTestResult SubtractionTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult SubtractionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult SubtractionBroadcastTest(armnn::IWorkloadFactory& workloadFactory); LayerTestResult CompareActivationTest(armnn::IWorkloadFactory& workloadFactory, armnn::IWorkloadFactory& refWorkloadFactory, @@ -264,6 +268,10 @@ LayerTestResult AdditionUint8Test(armnn::IWorkloadFactory& workloadF LayerTestResult AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory); LayerTestResult AdditionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult SubtractionUint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult SubtractionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory); +LayerTestResult SubtractionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory); + LayerTestResult CompareActivationUint8Test(armnn::IWorkloadFactory& workloadFactory, armnn::IWorkloadFactory& refWorkloadFactory, armnn::ActivationFunction f); diff --git a/src/armnn/backends/test/Reference.cpp b/src/armnn/backends/test/Reference.cpp index 5b17bf3012..5a5f79d965 100644 --- a/src/armnn/backends/test/Reference.cpp +++ b/src/armnn/backends/test/Reference.cpp @@ -146,6 +146,15 @@ ARMNN_AUTO_TEST_CASE(AdditionUint8, AdditionUint8Test) ARMNN_AUTO_TEST_CASE(AddBroadcastUint8, AdditionBroadcastUint8Test) ARMNN_AUTO_TEST_CASE(AddBroadcast1ElementUint8, AdditionBroadcast1ElementUint8Test) +// Sub +ARMNN_AUTO_TEST_CASE(SimpleSub, SubtractionTest) +ARMNN_AUTO_TEST_CASE(SubBroadcast1Element, SubtractionBroadcast1ElementTest) +ARMNN_AUTO_TEST_CASE(SubBroadcast, SubtractionBroadcastTest) + +ARMNN_AUTO_TEST_CASE(SubitionUint8, SubtractionUint8Test) +ARMNN_AUTO_TEST_CASE(SubBroadcastUint8, SubtractionBroadcastUint8Test) +ARMNN_AUTO_TEST_CASE(SubBroadcast1ElementUint8, SubtractionBroadcast1ElementUint8Test) + // Div ARMNN_AUTO_TEST_CASE(SimpleDivision, DivisionTest) ARMNN_AUTO_TEST_CASE(DivisionByZero, DivisionByZeroTest) -- cgit v1.2.1