From 4e3e831da1d6d85dffffacf57e9de8fc891b7e58 Mon Sep 17 00:00:00 2001 From: Teresa Charlin Date: Thu, 5 Aug 2021 12:34:37 +0100 Subject: IVGCVSW-6262 Add support for Reduce Prod * Tflite parser * Tflite delegate * Serializer * Deserializer * Ref, CpuAcc and GpuAcc workloads Signed-off-by: Teresa Charlin Change-Id: I601a9ee1680b372c7955d9a628857d08c3cfd377 --- src/armnnDeserializer/Deserializer.cpp | 2 + src/armnnSerializer/ArmnnSchema.fbs | 3 +- src/armnnSerializer/ArmnnSchema_generated.h | 11 +- src/armnnSerializer/SerializerUtils.cpp | 2 + src/armnnTfLiteParser/TfLiteParser.cpp | 6 + src/armnnTfLiteParser/TfLiteParser.hpp | 1 + src/armnnTfLiteParser/test/Prod.cpp | 106 +++++++ src/backends/aclCommon/ArmComputeUtils.hpp | 3 +- src/backends/backendsCommon/common.mk | 1 + src/backends/backendsCommon/test/CMakeLists.txt | 2 + src/backends/backendsCommon/test/LayerTests.hpp | 1 + .../test/layerTests/ReduceProdTestImpl.cpp | 345 +++++++++++++++++++++ .../test/layerTests/ReduceProdTestImpl.hpp | 43 +++ src/backends/cl/test/ClLayerTests.cpp | 9 + src/backends/neon/test/NeonLayerTests.cpp | 4 + src/backends/reference/test/RefLayerTests.cpp | 7 + src/backends/reference/workloads/Reduce.cpp | 43 ++- 17 files changed, 566 insertions(+), 23 deletions(-) create mode 100644 src/armnnTfLiteParser/test/Prod.cpp create mode 100644 src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.cpp create mode 100644 src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.hpp (limited to 'src') diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp index 5c99496744..074429b73c 100644 --- a/src/armnnDeserializer/Deserializer.cpp +++ b/src/armnnDeserializer/Deserializer.cpp @@ -522,6 +522,8 @@ armnn::ReduceOperation ToReduceOperation(armnnSerializer::ReduceOperation operat return armnn::ReduceOperation::Mean; case armnnSerializer::ReduceOperation::ReduceOperation_Min: return armnn::ReduceOperation::Min; + case armnnSerializer::ReduceOperation::ReduceOperation_Prod: + return armnn::ReduceOperation::Prod; default: return armnn::ReduceOperation::Sum; } diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs index 85435a366f..a285a110cc 100644 --- a/src/armnnSerializer/ArmnnSchema.fbs +++ b/src/armnnSerializer/ArmnnSchema.fbs @@ -52,7 +52,8 @@ enum ReduceOperation: byte { Sum = 0, Max = 1, Mean = 2, - Min = 3 + Min = 3, + Prod = 4 } enum ResizeMethod: byte { diff --git a/src/armnnSerializer/ArmnnSchema_generated.h b/src/armnnSerializer/ArmnnSchema_generated.h index ca2bf0c003..cf28a7a313 100644 --- a/src/armnnSerializer/ArmnnSchema_generated.h +++ b/src/armnnSerializer/ArmnnSchema_generated.h @@ -559,26 +559,29 @@ enum ReduceOperation { ReduceOperation_Max = 1, ReduceOperation_Mean = 2, ReduceOperation_Min = 3, + ReduceOperation_Prod = 4, ReduceOperation_MIN = ReduceOperation_Sum, - ReduceOperation_MAX = ReduceOperation_Min + ReduceOperation_MAX = ReduceOperation_Prod }; -inline const ReduceOperation (&EnumValuesReduceOperation())[4] { +inline const ReduceOperation (&EnumValuesReduceOperation())[5] { static const ReduceOperation values[] = { ReduceOperation_Sum, ReduceOperation_Max, ReduceOperation_Mean, - ReduceOperation_Min + ReduceOperation_Min, + ReduceOperation_Prod }; return values; } inline const char * const *EnumNamesReduceOperation() { - static const char * const names[5] = { + static const char * const names[6] = { "Sum", "Max", "Mean", "Min", + "Prod", nullptr }; return names; diff --git a/src/armnnSerializer/SerializerUtils.cpp b/src/armnnSerializer/SerializerUtils.cpp index 1df8d4e608..85ce01d132 100644 --- a/src/armnnSerializer/SerializerUtils.cpp +++ b/src/armnnSerializer/SerializerUtils.cpp @@ -219,6 +219,8 @@ armnnSerializer::ReduceOperation GetFlatBufferReduceOperation(armnn::ReduceOpera return armnnSerializer::ReduceOperation::ReduceOperation_Mean; case armnn::ReduceOperation::Min: return armnnSerializer::ReduceOperation::ReduceOperation_Min; + case armnn::ReduceOperation::Prod: + return armnnSerializer::ReduceOperation::ReduceOperation_Prod; default: return armnnSerializer::ReduceOperation::ReduceOperation_Sum; } diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp index 5c7cb9b0b8..305f76931d 100644 --- a/src/armnnTfLiteParser/TfLiteParser.cpp +++ b/src/armnnTfLiteParser/TfLiteParser.cpp @@ -677,6 +677,7 @@ TfLiteParserImpl::TfLiteParserImpl(const Optional + (0, {{ "inputTensor", { 1.0f, 2.0f, 3.0f, 4.0f, + 5.0f, 6.0f, 7.0f, 8.0f, + + 10.0f, 20.0f, 30.0f, 40.0f, + 50.0f, 60.0f, 70.0f, 80.0f, + + 11.0f, 22.0f, 33.0f, 44.0f, + 55.0f, 66.0f, 77.0f, 88.0f } } }, + {{ "outputTensor", { 1512500.f, 20908800.f, 112058100.f, 396492800.f } } }); +} + +} diff --git a/src/backends/aclCommon/ArmComputeUtils.hpp b/src/backends/aclCommon/ArmComputeUtils.hpp index 624ce5df7a..059518acd6 100644 --- a/src/backends/aclCommon/ArmComputeUtils.hpp +++ b/src/backends/aclCommon/ArmComputeUtils.hpp @@ -272,7 +272,8 @@ inline arm_compute::ReductionOperation ConvertReductionOperationToAcl(const Redu case ReduceOperation::Mean: return arm_compute::ReductionOperation::MEAN_SUM; case ReduceOperation::Max: return arm_compute::ReductionOperation::MAX; case ReduceOperation::Min: return arm_compute::ReductionOperation::MIN; - default: throw InvalidArgumentException("Unsupported Reduction operation"); + case ReduceOperation::Prod: return arm_compute::ReductionOperation::PROD; + default: throw InvalidArgumentException("Unsupported Reduction operation"); } } diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk index 5d339477d5..47ceffe37d 100644 --- a/src/backends/backendsCommon/common.mk +++ b/src/backends/backendsCommon/common.mk @@ -78,6 +78,7 @@ COMMON_TEST_SOURCES := \ test/layerTests/Pooling2dTestImpl.cpp \ test/layerTests/RankTestImpl.cpp \ test/layerTests/ReductionTestImpl.cpp \ + test/layerTests/ReduceProdTestImpl.cpp \ test/layerTests/ReduceSumTestImpl.cpp \ test/layerTests/ReshapeTestImpl.cpp \ test/layerTests/ResizeTestImpl.cpp \ diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index 4561fd7739..c9bc5e74b8 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -139,6 +139,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources layerTests/RankTestImpl.hpp layerTests/ReductionTestImpl.cpp layerTests/ReductionTestImpl.hpp + layerTests/ReduceProdTestImpl.cpp + layerTests/ReduceProdTestImpl.hpp layerTests/ReduceSumTestImpl.cpp layerTests/ReduceSumTestImpl.hpp layerTests/ReshapeTestImpl.cpp diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index fcb1f71436..0690637500 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -51,6 +51,7 @@ #include #include #include +#include #include #include #include diff --git a/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.cpp new file mode 100644 index 0000000000..4fb0732141 --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.cpp @@ -0,0 +1,345 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ReduceProdTestImpl.hpp" + +#include +#include +#include + +#include + +namespace +{ + +template> +LayerTestResult ReduceTestCommon( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + const armnn::TensorInfo inputTensorInfo, + const armnn::TensorInfo outputTensorInfo, + const std::vector& inputData, + const std::vector& outputData, + const std::vector vAxis, + const armnn::ReduceOperation reduceOperation, + bool keepDims = false) +{ + IgnoreUnused(memoryManager); + auto inputTensor = ConvertToDataType(inputData, inputTensorInfo); + + std::vector actualOutput(outputTensorInfo.GetNumElements()); + + std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo); + std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); + + armnn::ReduceQueueDescriptor descriptor; + std::vector updated_idx; + uint32_t resolvedAxis = 0; + for (uint32_t i = 0; i < vAxis.size(); ++i) + { + if (vAxis[i] < 0) + { + resolvedAxis = inputTensorInfo.GetNumDimensions() + static_cast(vAxis[i]); + } else + { + resolvedAxis = static_cast(vAxis[i]); + } + + updated_idx.push_back(resolvedAxis); + } + + descriptor.m_Parameters.m_vAxis = updated_idx; + descriptor.m_Parameters.m_ReduceOperation = reduceOperation; + descriptor.m_Parameters.m_KeepDims = keepDims; + armnn::WorkloadInfo info; + + AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get()); + AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); + + std::unique_ptr workload = workloadFactory.CreateReduce(descriptor, info); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), inputTensor.data()); + + workload->Execute(); + + CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); + + return LayerTestResult(actualOutput, + outputData, + outputHandle->GetShape(), + outputTensorInfo.GetShape()); +} + +} // namespace + +template +LayerTestResult ReduceProdSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + const armnn::TensorShape inputShape{ 1, 1, 1, 5 }; + const armnn::TensorShape outputShape{ 1, 1, 1, 1 }; + + armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); + + if (armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(1.0f); + inputTensorInfo.SetQuantizationOffset(0); + } + + armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32); + + std::vector inputValues({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f }); + std::vector outputValues({ 7200.0f }); + + return ReduceTestCommon(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + inputValues, + outputValues, + { -1 }, + armnn::ReduceOperation::Prod); +} + +template +LayerTestResult ReduceProdSingleAxisTest1( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + const armnn::TensorShape inputShape{ 1, 3, 2, 4 }; + const armnn::TensorShape outputShape{ 1, 1, 2, 4 }; + + armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); + + if (armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(1.0f); + inputTensorInfo.SetQuantizationOffset(0); + } + + armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32); + + std::vector inputValues({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, + 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, + 100.0f, 200.0f, 300.0f, 400.0f, 500.0f, 600.0f, 700.0f, 800.0f + }); + std::vector outputValues({ 1000.0f, 8000.0f, 27000.0f, 64000.0f, 125000.0f, 216000.0f, 343000.0f, 512000.0f + }); + + return ReduceTestCommon(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + inputValues, + outputValues, + { 1 }, + armnn::ReduceOperation::Prod); +} + +template +LayerTestResult ReduceProdSingleAxisTest2( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + const armnn::TensorShape inputShape{ 1, 6, 3, 4 }; + const armnn::TensorShape outputShape{ 1, 1, 3, 4}; + + armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); + + if (armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(1.0f); + inputTensorInfo.SetQuantizationOffset(0); + } + + armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32); + + std::vector inputValues( {7, 8, 6, 1, + 1, 1, 8, 7, + 3, 7, 7, 7, + + 6, 8, 4, 7, + 3, 8, 7, 3, + 5, 8, 8, 8, + + + 7, 8, 2, 7, + 3, 8, 5, 6, + 8, 4, 2, 7, + + 1, 6, 7, 2, + 8, 3, 3, 1, + 7, 6, 2, 6, + + + 5, 3, 4, 8, + 7, 8, 2, 4, + 6, 6, 2, 8, + + 2, 2, 7, 2, + 5, 3, 6, 3, + 6, 1, 8, 8}); + std::vector outputValues({ 2940.f, 18432.f, 9408.f, 1568.f, + 2520.f, 4608.f, 10080.f, 1512.f, + 30240.f, 8064.f, 3584.f, 150528.f }); + + return ReduceTestCommon(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + inputValues, + outputValues, + { 1 }, + armnn::ReduceOperation::Prod); +} + +template +LayerTestResult ReduceProdSingleAxisTest3( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + const armnn::TensorShape inputShape{ 1, 6, 3, 4 }; + const armnn::TensorShape outputShape{ 1, 6, 3, 1 }; + + armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); + + if (armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(1.0f); + inputTensorInfo.SetQuantizationOffset(0); + } + + armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32); + + std::vector inputValues({ 7, 8, 6, 1, + 1, 1, 8, 7, + 3, 7, 7, 7, + + 6, 8, 4, 7, + 3, 8, 7, 3, + 5, 8, 8, 8, + + + 7, 8, 2, 7, + 3, 8, 5, 6, + 8, 4, 2, 7, + + 1, 6, 7, 2, + 8, 3, 3, 1, + 7, 6, 2, 6, + + + 5, 3, 4, 8, + 7, 8, 2, 4, + 6, 6, 2, 8, + + 2, 2, 7, 2, + 5, 3, 6, 3, + 6, 1, 8, 8 }); + std::vector outputValues({ 336.f, 56.f, 1029.f, + 1344.f, 504.f, 2560.f, + + 784.f, 720.f, 448.f, + 84.f, 72.f, 504.f, + + 480.f, 448.f, 576.f, + 56.f, 270.f, 384.f }); + + return ReduceTestCommon(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + inputValues, + outputValues, + { 3 }, + armnn::ReduceOperation::Prod, + true); +} + +template +LayerTestResult ReduceProdMultipleAxisTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + const armnn::TensorShape inputShape{ 1, 3, 2, 4 }; + const armnn::TensorShape outputShape{ 1, 1, 1, 4 }; + + armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType); + + if (armnn::IsQuantizedType()) + { + inputTensorInfo.SetQuantizationScale(1.0f); + inputTensorInfo.SetQuantizationOffset(0); + } + + armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32); + + std::vector inputValues({ 1.0f, 2.0f, 3.0f, 4.0f, + 5.0f, 6.0f, 7.0f, 8.0f, + + 10.0f, 20.0f, 30.0f, 40.0f, + 50.0f, 60.0f, 70.0f, 80.0f, + + 11.0f, 22.0f, 33.0f, 44.0f, + 55.0f, 66.0f, 77.0f, 88.0f }); + std::vector outputValues({ 1512500.f, 20908800.f, 112058100.f, 396492800.f }); + + return ReduceTestCommon(workloadFactory, + memoryManager, + tensorHandleFactory, + inputTensorInfo, + outputTensorInfo, + inputValues, + outputValues, + { 1, 2 }, + armnn::ReduceOperation::Prod); +} + +// Explicit template specializations + +template LayerTestResult +ReduceProdSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult +ReduceProdSingleAxisTest1( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult +ReduceProdSingleAxisTest2( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult +ReduceProdSingleAxisTest3( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult +ReduceProdMultipleAxisTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); diff --git a/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.hpp new file mode 100644 index 0000000000..97e94978f7 --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/ReduceProdTestImpl.hpp @@ -0,0 +1,43 @@ +// +// Copyright © 2021 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "LayerTestResult.hpp" + +#include + +#include +#include + +template> +LayerTestResult ReduceProdSimpleTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ReduceProdSingleAxisTest1( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ReduceProdSingleAxisTest2( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ReduceProdSingleAxisTest3( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult ReduceProdMultipleAxisTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp index f5b26d37f3..2bb63b5347 100644 --- a/src/backends/cl/test/ClLayerTests.cpp +++ b/src/backends/cl/test/ClLayerTests.cpp @@ -1894,6 +1894,15 @@ ARMNN_AUTO_TEST_FIXTURE_WITH_THF( ARMNN_AUTO_TEST_FIXTURE_WITH_THF( ReduceSumSingleAxisFloat32_3, ClContextControlFixture, ReduceSumSingleAxisTest3) +// ReduceProd +ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReduceProdFloat32, ClContextControlFixture, ReduceProdSimpleTest) +ARMNN_AUTO_TEST_FIXTURE_WITH_THF( + ReduceProdSingleAxisFloat32_1, ClContextControlFixture, ReduceProdSingleAxisTest1) +ARMNN_AUTO_TEST_FIXTURE_WITH_THF( + ReduceProdSingleAxisFloat32_2, ClContextControlFixture, ReduceProdSingleAxisTest2) +ARMNN_AUTO_TEST_FIXTURE_WITH_THF( + ReduceProdSingleAxisFloat32_3, ClContextControlFixture, ReduceProdSingleAxisTest3) + // ReduceMax ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReduceMaxFloat32, ClContextControlFixture, ReduceMaxSimpleTest) ARMNN_AUTO_TEST_FIXTURE_WITH_THF( diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp index 6985776bf0..75f9648f2d 100644 --- a/src/backends/neon/test/NeonLayerTests.cpp +++ b/src/backends/neon/test/NeonLayerTests.cpp @@ -1399,6 +1399,10 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceSumSingleAxisFloat32_2, ReduceSumSingleAxisT // Moved to NeonLayerTests_NDK_Bug.cpp //ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceSumSingleAxisFloat32_3, ReduceSumSingleAxisTest3) +// ReduceProd +ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceProdSingleAxisFloat32_1, ReduceProdSingleAxisTest1) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceProdSingleAxisFloat32_2, ReduceProdSingleAxisTest2) + // ReduceMax ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceMaxFloat32, ReduceMaxSimpleTest) ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceMaxNegativeAxisFloat32, ReduceMaxNegativeAxisTest) diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index e906b2962c..18490e29c7 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -2321,6 +2321,13 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceSumSingleAxisFloat32_2, ReduceSumSingleAxisT ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceSumSingleAxisFloat32_3, ReduceSumSingleAxisTest3) ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceSumMultipleAxisFloat32, ReduceSumMultipleAxisTest) +// ReduceProd +ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceProdFloat32, ReduceProdSimpleTest) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceProdSingleAxisFloat32_1, ReduceProdSingleAxisTest1) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceProdSingleAxisFloat32_2, ReduceProdSingleAxisTest2) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceProdSingleAxisFloat32_3, ReduceProdSingleAxisTest3) +ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceProdMultipleAxisFloat32, ReduceProdMultipleAxisTest) + // ReduceMax ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceMaxFloat32, ReduceMaxSimpleTest) ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceMaxNegativeAxisFloat32, ReduceMaxNegativeAxisTest) diff --git a/src/backends/reference/workloads/Reduce.cpp b/src/backends/reference/workloads/Reduce.cpp index 8bf422aea3..3f929c43bc 100644 --- a/src/backends/reference/workloads/Reduce.cpp +++ b/src/backends/reference/workloads/Reduce.cpp @@ -9,7 +9,6 @@ #include -#include #include #include #include @@ -87,6 +86,9 @@ void Reduce(const TensorInfo& inputInfo, case ReduceOperation::Sum: std::fill(tempOut.begin(), tempOut.end(), 0.0f); break; + case ReduceOperation::Prod: + std::fill(tempOut.begin(), tempOut.end(), 1.0f); + break; case ReduceOperation::Max: std::fill(tempOut.begin(), tempOut.end(), -1 * std::numeric_limits::max()); break; @@ -119,23 +121,30 @@ void Reduce(const TensorInfo& inputInfo, numResolvedAxis, resolvedAxis); input[inputOffset]; auto inputValue = input.Get(); - if (reduceOperation == ReduceOperation::Max) - { - if (inputValue > tempOut[outputOffset]) - { - tempOut[outputOffset] = inputValue; - } - } - else if (reduceOperation == ReduceOperation::Min) - { - if (inputValue < tempOut[outputOffset]) - { - tempOut[outputOffset] = inputValue; - } - } - else + switch(reduceOperation) { - tempOut[outputOffset] += inputValue; + case ReduceOperation::Mean: + case ReduceOperation::Sum: + tempOut[outputOffset] += inputValue; + break; + case ReduceOperation::Prod: + tempOut[outputOffset] *= inputValue; + break; + case ReduceOperation::Max: + if (inputValue > tempOut[outputOffset]) + { + tempOut[outputOffset] = inputValue; + } + break; + case ReduceOperation::Min: + if (inputValue < tempOut[outputOffset]) + { + tempOut[outputOffset] = inputValue; + } + break; + default: + throw armnn::InvalidArgumentException("Unknown reduce method: " + + std::to_string(static_cast(reduceOperation))); } } -- cgit v1.2.1