From 98e383eadf4e670d057ad725c7fe7924fea8e36b Mon Sep 17 00:00:00 2001 From: Idriss Chaouch Date: Mon, 28 Aug 2023 14:28:31 +0100 Subject: IVGCVSW-7525 Add broadcast_to operator Signed-off-by: Idriss Chaouch Signed-off-by: Narumol Prangnawarat Change-Id: I94ec5f9120b2d736fdf98d00ec5137a4efd739b8 --- src/backends/backendsCommon/WorkloadData.cpp | 26 + src/backends/backendsCommon/WorkloadFactory.cpp | 12 + src/backends/backendsCommon/common.mk | 1 + .../test/BroadcastToEndToEndTestImpl.hpp | 149 +++++ src/backends/backendsCommon/test/CMakeLists.txt | 3 + .../test/ElementwiseUnaryEndToEndTestImpl.hpp | 2 +- .../test/IsLayerSupportedTestImpl.hpp | 2 + src/backends/backendsCommon/test/LayerTests.hpp | 2 + .../test/layerTests/BroadcastToTestImpl.cpp | 636 +++++++++++++++++++++ .../test/layerTests/BroadcastToTestImpl.hpp | 46 ++ src/backends/reference/RefLayerSupport.cpp | 53 +- src/backends/reference/RefLayerSupport.hpp | 5 + src/backends/reference/RefWorkloadFactory.cpp | 5 + src/backends/reference/backend.mk | 1 + src/backends/reference/test/RefEndToEndTests.cpp | 12 + src/backends/reference/test/RefLayerTests.cpp | 56 ++ src/backends/reference/workloads/CMakeLists.txt | 2 + .../reference/workloads/RefBroadcastToWorkload.cpp | 48 ++ .../reference/workloads/RefBroadcastToWorkload.hpp | 25 + src/backends/reference/workloads/RefWorkloads.hpp | 1 + 20 files changed, 1077 insertions(+), 10 deletions(-) create mode 100644 src/backends/backendsCommon/test/BroadcastToEndToEndTestImpl.hpp create mode 100644 src/backends/backendsCommon/test/layerTests/BroadcastToTestImpl.cpp create mode 100644 src/backends/backendsCommon/test/layerTests/BroadcastToTestImpl.hpp create mode 100644 src/backends/reference/workloads/RefBroadcastToWorkload.cpp create mode 100644 src/backends/reference/workloads/RefBroadcastToWorkload.hpp (limited to 'src/backends') diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index d0f6eea3d4..021435ea40 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -4417,4 +4417,30 @@ void TileQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName); } +void BroadcastToQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + const std::string& descriptorName{"BroadcastToQueueDescriptor"}; + + ValidateNumInputs(workloadInfo, descriptorName, 1); + ValidateNumOutputs(workloadInfo, descriptorName, 1); + + const TensorInfo& inputTensorInfo = workloadInfo.m_InputTensorInfos[0]; + const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0]; + + std::vector supportedTypes = + { + DataType::Float32, + DataType::Float16, + DataType::QAsymmS8, + DataType::QAsymmU8, + DataType::QSymmS8, + DataType::QSymmS16, + DataType::Signed32, + DataType::Signed64 + }; + + ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName); + ValidateDataTypes(outputTensorInfo, supportedTypes, descriptorName); +} + } // namespace armnn \ No newline at end of file diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 6ff237bc12..2538211a41 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -183,6 +183,18 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId, reason); break; } + case LayerType::BroadcastTo: + { + auto cLayer = PolymorphicDowncast(&layer); + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + + result = layerSupportObject.IsBroadcastToSupported(OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason); + break; + } case LayerType::Cast: { const TensorInfo& input = layer.GetInputSlot(0).GetTensorInfo(); diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk index 303f8aca9c..4876f02ce0 100644 --- a/src/backends/backendsCommon/common.mk +++ b/src/backends/backendsCommon/common.mk @@ -48,6 +48,7 @@ COMMON_TEST_SOURCES := \ test/layerTests/ArgMinMaxTestImpl.cpp \ test/layerTests/BatchMatMulTestImpl.cpp \ test/layerTests/BatchNormalizationTestImpl.cpp \ + test/layerTests/BroadcastToTestImpl.cpp \ test/layerTests/CastTestImpl.cpp \ test/layerTests/ChannelShuffleTestImpl.cpp \ test/layerTests/ComparisonTestImpl.cpp \ diff --git a/src/backends/backendsCommon/test/BroadcastToEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/BroadcastToEndToEndTestImpl.hpp new file mode 100644 index 0000000000..3b2c47fb94 --- /dev/null +++ b/src/backends/backendsCommon/test/BroadcastToEndToEndTestImpl.hpp @@ -0,0 +1,149 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once +#include "armnn/INetwork.hpp" +#include "armnnUtils/QuantizeHelper.hpp" +#include "ElementwiseBinaryEndToEndTestImpl.hpp" +#include "Optimizer.hpp" +#include +#include +#include + +namespace +{ + using namespace armnn; + armnn::INetworkPtr CreateBroadcastToNetwork(BroadcastToDescriptor& descriptor, + const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& outputInfo) + { + INetworkPtr network(INetwork::Create()); + IConnectableLayer* inputLayer = network->AddInputLayer(0, "input"); + IConnectableLayer* broadcastLayer = network->AddBroadcastToLayer(descriptor, "broadcast_to"); + IConnectableLayer* outputLayer = network->AddOutputLayer(0, "output"); + Connect(inputLayer, broadcastLayer, inputInfo, 0, 0); + Connect(broadcastLayer, outputLayer, outputInfo, 0, 0); + return network; + } + + armnn::INetworkPtr CreateBroadcastToNetworkWithElementWiseBinary(BroadcastToDescriptor& descriptor, + const ElementwiseBinaryDescriptor& + elementWiseDescriptor, + const armnn::TensorInfo& inputInfo, + const armnn::TensorInfo& inputInfoElementWise, + const armnn::TensorInfo& outputInfo) + { + INetworkPtr network(INetwork::Create()); + IConnectableLayer* inputLayer = network->AddInputLayer(0, "input"); + IConnectableLayer* inputLayerElementWise = network->AddInputLayer(1, "inputElementWiseBinary"); + IConnectableLayer* broadcastLayer = network->AddBroadcastToLayer(descriptor, "broadcast_to"); + IConnectableLayer* multiplicationLayer = + network->AddElementwiseBinaryLayer(elementWiseDescriptor, + "multiplication"); + IConnectableLayer* outputLayer = network->AddOutputLayer(0, "output"); + Connect(inputLayer, broadcastLayer, inputInfo, 0, 0); + Connect(inputLayerElementWise, multiplicationLayer, + inputInfoElementWise, 0, 1); + Connect(broadcastLayer, multiplicationLayer, inputInfo, 0, 0); + Connect(multiplicationLayer, outputLayer, outputInfo, 0, 0); + return network; + } + + template > + void BroadcastToEndToEnd(const std::vector& backends) + { + float qScale = 1.0f; + int32_t qOffset = 0; + bool qConst = true; + + const TensorShape inputTensorShape = { {1, 4} }; + const TensorShape outputTensorShape = { {4, 4} }; + + TensorInfo inputInfo (inputTensorShape, ArmnnType, qScale, + qOffset, qConst); + TensorInfo outputInfo (outputTensorShape, ArmnnType,qScale, + qOffset); + + std::vector inputData = armnnUtils::QuantizedVector({ + 65, 144, 91, 161 + }, qScale, qOffset); + + std::vector expectedOutputData = armnnUtils::QuantizedVector({ + 65, 144, 91, 161, + 65, 144, 91, 161, + 65, 144, 91, 161, + 65, 144, 91, 161 + }, qScale, qOffset); + + auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 4, 4 })); + CHECK(descriptor.m_BroadcastToShape == outputTensorShape); + INetworkPtr network = CreateBroadcastToNetwork(descriptor, inputInfo, outputInfo); + + std::map> inputTensor = { { 0, inputData } }; + std::map> expectedOutputTensor = { { 0, expectedOutputData } }; + EndToEndLayerTestImpl(std::move(network),inputTensor, + expectedOutputTensor, backends); + } + + template > + void BroadcastToEndToEndElementWiseBinary(const std::vector& backends) + { + float qScale = 1.0f; + int32_t qOffset = 0; + bool qConst = true; + + const TensorShape inputTensorShape = { {1, 4} }; + const TensorShape outputTensorShape = { {4, 4} }; + + const TensorInfo inputInfo (inputTensorShape, ArmnnType, qScale, + qOffset, qConst); + const TensorInfo inputInfoElementWise (outputTensorShape, ArmnnType, qScale, + qOffset, qConst); + const TensorInfo outputInfo (outputTensorShape, ArmnnType,qScale, + qOffset); + + std::vector inputData = armnnUtils::QuantizedVector({ + 65, 144, 91, 161 + }, qScale, qOffset); + + std::vector inputDataElementWise = armnnUtils::QuantizedVector({ + 1, 1, 1, 1, + 1, 1, 1, 1, + 1, 1, 1, 1, + 1, 1, 1, 1 + }, qScale, qOffset); + + std::vector expectedOutputData = armnnUtils::QuantizedVector({ + 65, 144, 91, 161, + 65, 144, 91, 161, + 65, 144, 91, 161, + 65, 144, 91, 161 + }, qScale, qOffset); + + auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 4, 4 })); + CHECK(descriptor.m_BroadcastToShape == outputTensorShape); + INetworkPtr network = CreateBroadcastToNetworkWithElementWiseBinary(descriptor, + BinaryOperation::Mul, + inputInfo, + inputInfoElementWise, + outputInfo); + // Create ArmNN runtime + IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); + + // Optimise ArmNN network + IOptimizedNetworkPtr optNet = Optimize(*network, {Compute::CpuRef}, + run->GetDeviceSpec()); + + Graph& graph = GetGraphForTesting(optNet.get()); + + Optimizer::Pass(graph, + armnn::MakeOptimizations(armnn::optimizations::BroadcastToOptimizationLayer())); + + std::map> inputTensor = { { 0, inputData }, {1, inputDataElementWise} }; + std::map> expectedOutputTensor = { { 0, expectedOutputData } }; + EndToEndLayerTestImpl(std::move(network),inputTensor, + expectedOutputTensor, backends); + } + +} // anonymous namespace \ No newline at end of file diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt index 8f3a22d53b..ed95bcf399 100644 --- a/src/backends/backendsCommon/test/CMakeLists.txt +++ b/src/backends/backendsCommon/test/CMakeLists.txt @@ -10,6 +10,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources BackendIdTests.cpp BackendProfilingTests.cpp BackendRegistryTests.cpp + BroadcastToEndToEndTestImpl.hpp ChannelShuffleEndToEndTestImpl.hpp ComparisonEndToEndTestImpl.hpp CompatibilityTests.cpp @@ -79,6 +80,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources layerTests/BatchNormalizationTestImpl.cpp layerTests/BatchNormalizationTestImpl.hpp layerTests/BatchToSpaceNdTestImpl.hpp + layerTests/BroadcastToTestImpl.cpp + layerTests/BroadcastToTestImpl.hpp layerTests/CastTestImpl.cpp layerTests/CastTestImpl.hpp layerTests/ChannelShuffleTestImpl.cpp diff --git a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp index 9586417407..9d05a64ce8 100644 --- a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp +++ b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp @@ -94,7 +94,7 @@ void ElementwiseUnarySimpleEndToEnd(const std::vector& backends, std::map> inputTensorData = {{ 0, qInputData }}; std::map> expectedOutputData = {{ 0, qExpectedOutput }}; - EndToEndLayerTestImpl(move(net), inputTensorData, expectedOutputData, backends); + EndToEndLayerTestImpl(std::move(net), inputTensorData, expectedOutputData, backends); } } // anonymous namespace diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index e8a2ec6931..9f472e9f28 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -634,6 +634,8 @@ DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization) DECLARE_LAYER_POLICY_2_PARAM(BatchToSpaceNd) +DECLARE_LAYER_POLICY_2_PARAM(BroadcastTo) + DECLARE_LAYER_POLICY_1_PARAM(Cast) DECLARE_LAYER_POLICY_2_PARAM(ChannelShuffle) diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp index 3f8d045c06..015d25ef3e 100644 --- a/src/backends/backendsCommon/test/LayerTests.hpp +++ b/src/backends/backendsCommon/test/LayerTests.hpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -79,3 +80,4 @@ #include #include #include + diff --git a/src/backends/backendsCommon/test/layerTests/BroadcastToTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BroadcastToTestImpl.cpp new file mode 100644 index 0000000000..b4e8a4c85d --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/BroadcastToTestImpl.cpp @@ -0,0 +1,636 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "BroadcastToTestImpl.hpp" +#include + +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include + +namespace +{ +template +LayerTestResult BroadcastToTestImpl( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory, + armnn::BroadcastToDescriptor descriptor, + armnn::TensorInfo& inputInfo, + armnn::TensorInfo& outputInfo, + std::vector& inputData, + std::vector& expectedOutputData) +{ + + CHECK(descriptor.m_BroadcastToShape == outputInfo.GetShape()); + + LayerTestResult result(outputInfo); + std::vector outputActual(outputInfo.GetNumElements()); + + armnn::BroadcastToQueueDescriptor queueDescriptor; + queueDescriptor.m_Parameters = std::move(descriptor); + armnn::WorkloadInfo workloadInfo; + + std::unique_ptr inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo); + std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo); + + AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get()); + AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get()); + + const armnn::BackendId& backend = workloadFactory.GetBackendId(); + armnn::LayerSupportHandle handle = armnn::GetILayerSupportByBackendId(backend); + + auto workload = workloadFactory.CreateWorkload(armnn::LayerType::BroadcastTo, queueDescriptor, workloadInfo); + + inputHandle->Allocate(); + outputHandle->Allocate(); + + CopyDataToITensorHandle(inputHandle.get(), inputData.data()); + + workload->PostAllocationConfigure(); + ExecuteWorkload(*workload, memoryManager); + + CopyDataFromITensorHandle(outputActual.data(), outputHandle.get()); + return LayerTestResult(outputActual, + expectedOutputData, + outputHandle->GetShape(), + outputInfo.GetShape()); +} +} + +template +LayerTestResult BroadcastTo1dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape( {1, 4} )); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorShape inputShape = { 1, 1 }; + armnn::TensorShape outputShape = { 1, 4 }; + + armnn::TensorInfo inputInfo(inputShape, ArmnnType); + armnn::TensorInfo outputInfo(outputShape, ArmnnType); + + std::vector input = armnnUtils::QuantizedVector( + { + 1.f + }, qScale, qOffset); + + std::vector expectedOutput = armnnUtils::QuantizedVector( + { + 1.f, 1.f, + 1.f, 1.f + }, qScale, qOffset); + + return BroadcastToTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + inputInfo, + outputInfo, + input, + expectedOutput); +} + +template +LayerTestResult BroadcastTo2dAxis0Test(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 4, 3 })); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorShape inputShape = { 1, 3 }; + armnn::TensorShape outputShape = { 4, 3 }; + + armnn::TensorInfo inputInfo(inputShape, ArmnnType); + armnn::TensorInfo outputInfo(outputShape, ArmnnType); + + std::vector input = armnnUtils::QuantizedVector( + { + 0.f, 1.f, 2.f + }, qScale, qOffset); + + + std::vector expectedOutput = armnnUtils::QuantizedVector( + { + 0.f, 1.f, 2.f, + 0.f, 1.f, 2.f, + 0.f, 1.f, 2.f, + 0.f, 1.f, 2.f + }, qScale, qOffset); + + return BroadcastToTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + inputInfo, + outputInfo, + input, + expectedOutput); +} + +template +LayerTestResult BroadcastTo2dAxis1Test(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 3, 4 })); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorShape inputShape = { 3, 1 }; + armnn::TensorShape outputShape = { 3, 4 }; + + armnn::TensorInfo inputInfo(inputShape, ArmnnType); + armnn::TensorInfo outputInfo(outputShape, ArmnnType); + + std::vector input = armnnUtils::QuantizedVector( + { + 0.f, 1.f, 2.f + }, qScale, qOffset); + + + std::vector expectedOutput = armnnUtils::QuantizedVector( + { + 0.f, 0.f, 0.f, 0.f, + 1.f, 1.f, 1.f, 1.f, + 2.f, 2.f, 2.f, 2.f + }, qScale, qOffset); + + return BroadcastToTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + inputInfo, + outputInfo, + input, + expectedOutput); +} + +template +LayerTestResult BroadcastTo3dAxis0Test(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 2, 1, 3 })); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorShape inputShape = { 1, 1, 3 }; + armnn::TensorShape outputShape = { 2, 1, 3 }; + + armnn::TensorInfo inputInfo(inputShape, ArmnnType); + armnn::TensorInfo outputInfo(outputShape, ArmnnType); + std::vector input = armnnUtils::QuantizedVector( + { + 1.1f, 2.12f, 3.3f + }, qScale, qOffset); + + + std::vector expectedOutput = armnnUtils::QuantizedVector( + { + 1.1f, 2.12f, 3.3f, + 1.1f, 2.12f, 3.3f + }, qScale, qOffset); + + return BroadcastToTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + inputInfo, + outputInfo, + input, + expectedOutput); +} + +template +LayerTestResult BroadcastTo3dAxis1Test(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 1, 3, 3 })); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorShape inputShape = { 1, 1, 3 }; + armnn::TensorShape outputShape = { 1, 3, 3 }; + + armnn::TensorInfo inputInfo(inputShape, ArmnnType); + armnn::TensorInfo outputInfo(outputShape, ArmnnType); + std::vector input = armnnUtils::QuantizedVector( + { + 1.1f, 2.12f, 3.3f + }, qScale, qOffset); + + + std::vector expectedOutput = armnnUtils::QuantizedVector( + { + 1.1f, 2.12f, 3.3f, + 1.1f, 2.12f, 3.3f, + 1.1f, 2.12f, 3.3f + }, qScale, qOffset); + + return BroadcastToTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + inputInfo, + outputInfo, + input, + expectedOutput); +} + +template +LayerTestResult BroadcastTo3dAxis2Test(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 1, 3, 3 })); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorShape inputShape = { 1, 3, 1 }; + armnn::TensorShape outputShape = { 1, 3, 3 }; + + armnn::TensorInfo inputInfo(inputShape, ArmnnType); + armnn::TensorInfo outputInfo(outputShape, ArmnnType); + std::vector input = armnnUtils::QuantizedVector( + { + 1.1f, 2.12f, 3.3f + }, qScale, qOffset); + + + std::vector expectedOutput = armnnUtils::QuantizedVector( + { + 1.1f, 1.1f, 1.1f, + 2.12f, 2.12f, 2.12f, + 3.3f, 3.3f, 3.3f + }, qScale, qOffset); + + return BroadcastToTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + inputInfo, + outputInfo, + input, + expectedOutput); +} + +template +LayerTestResult BroadcastTo4dTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory) +{ + auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 3, 1, 2, 3 })); + + float qScale = 1.0f; + int32_t qOffset = 0; + + armnn::TensorShape inputShape = { 1, 1, 1, 3 }; + armnn::TensorShape outputShape = { 3, 1, 2, 3 }; + + armnn::TensorInfo inputInfo(inputShape, ArmnnType); + armnn::TensorInfo outputInfo(outputShape, ArmnnType); + + std::vector input = armnnUtils::QuantizedVector( + { + 0.f, 1.f, 2.f + }, qScale, qOffset); + + + std::vector expectedOutput = armnnUtils::QuantizedVector( + { + 0.f, 1.f, 2.f, + 0.f, 1.f, 2.f, + 0.f, 1.f, 2.f, + 0.f, 1.f, 2.f, + 0.f, 1.f, 2.f, + 0.f, 1.f, 2.f + }, qScale, qOffset); + + return BroadcastToTestImpl(workloadFactory, + memoryManager, + tensorHandleFactory, + descriptor, + inputInfo, + outputInfo, + input, + expectedOutput); +} + +template LayerTestResult, 1> +BroadcastTo1dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +BroadcastTo2dAxis0Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +BroadcastTo2dAxis1Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +BroadcastTo3dAxis0Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +BroadcastTo3dAxis1Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +BroadcastTo3dAxis2Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 4> +BroadcastTo4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 1> +BroadcastTo1dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +BroadcastTo2dAxis0Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +BroadcastTo2dAxis1Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +BroadcastTo3dAxis0Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +BroadcastTo3dAxis1Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +BroadcastTo3dAxis2Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 4> +BroadcastTo4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 1> +BroadcastTo1dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +BroadcastTo2dAxis0Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +BroadcastTo2dAxis1Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +BroadcastTo3dAxis0Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +BroadcastTo3dAxis1Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +BroadcastTo3dAxis2Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 4> +BroadcastTo4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 1> +BroadcastTo1dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +BroadcastTo2dAxis0Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +BroadcastTo2dAxis1Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +BroadcastTo3dAxis0Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +BroadcastTo3dAxis1Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +BroadcastTo3dAxis2Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 4> +BroadcastTo4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 1> +BroadcastTo1dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +BroadcastTo2dAxis0Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +BroadcastTo2dAxis1Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +BroadcastTo3dAxis0Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +BroadcastTo3dAxis1Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +BroadcastTo3dAxis2Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 4> +BroadcastTo4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 1> +BroadcastTo1dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +BroadcastTo2dAxis0Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +BroadcastTo2dAxis1Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +BroadcastTo3dAxis0Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +BroadcastTo3dAxis1Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +BroadcastTo3dAxis2Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 4> +BroadcastTo4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 1> +BroadcastTo1dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +BroadcastTo2dAxis0Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 2> +BroadcastTo2dAxis1Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +BroadcastTo3dAxis0Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +BroadcastTo3dAxis1Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 3> +BroadcastTo3dAxis2Test( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template LayerTestResult, 4> +BroadcastTo4dTest( + armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); diff --git a/src/backends/backendsCommon/test/layerTests/BroadcastToTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/BroadcastToTestImpl.hpp new file mode 100644 index 0000000000..d8d0df447b --- /dev/null +++ b/src/backends/backendsCommon/test/layerTests/BroadcastToTestImpl.hpp @@ -0,0 +1,46 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include +#include +#include +#include "ResolveType.hpp" + +template> +LayerTestResult BroadcastTo4dTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult BroadcastTo3dAxis0Test(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult BroadcastTo3dAxis1Test(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult BroadcastTo3dAxis2Test(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult BroadcastTo2dAxis0Test(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult BroadcastTo2dAxis1Test(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); + +template> +LayerTestResult BroadcastTo1dTest(armnn::IWorkloadFactory& workloadFactory, + const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, + const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp index 0b1b9c7824..defdf0d807 100644 --- a/src/backends/reference/RefLayerSupport.cpp +++ b/src/backends/reference/RefLayerSupport.cpp @@ -100,6 +100,11 @@ bool RefLayerSupport::IsLayerSupported(const LayerType& type, infos[1], *(PolymorphicDowncast(&descriptor)), reasonIfUnsupported); + case LayerType::BroadcastTo: + return IsBroadcastToSupported(infos[0], + infos[1], + *(PolymorphicDowncast(&descriptor)), + reasonIfUnsupported); case LayerType::Comparison: return IsComparisonSupported(infos[0], infos[1], @@ -807,20 +812,50 @@ bool RefLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input, return supported; } +bool RefLayerSupport::IsBroadcastToSupported(const TensorInfo& input, + const TensorInfo& output, + const BroadcastToDescriptor& descriptor, + Optional reasonIfUnsupported) const +{ + IgnoreUnused(descriptor); + + bool supported = true; + + std::array supportedTypes + { + DataType::Float32, + DataType::Float16, + DataType::QAsymmS8, + DataType::QAsymmU8, + DataType::QSymmS8, + DataType::QSymmS16, + DataType::Signed32, + DataType::Signed64 + }; + + supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported, + "BroadcastTo: input type not supported."); + + supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported, + "BroadcastTo: output type not supported"); + + return supported; +} + bool RefLayerSupport::IsCastSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported) const { std::array supportedInputTypes = - { - DataType::Float32, - DataType::Float16, - DataType::QSymmS8, - DataType::QAsymmS8, - DataType::QAsymmU8, - DataType::QSymmS16, - DataType::Signed32 - }; + { + DataType::Float32, + DataType::Float16, + DataType::QSymmS8, + DataType::QAsymmS8, + DataType::QAsymmU8, + DataType::QSymmS16, + DataType::Signed32 + }; bool supported = true; supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported, diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp index 377afac62f..9e7175389a 100644 --- a/src/backends/reference/RefLayerSupport.hpp +++ b/src/backends/reference/RefLayerSupport.hpp @@ -54,6 +54,11 @@ public: const BatchToSpaceNdDescriptor& descriptor, Optional reasonIfUnsupported = EmptyOptional()) const; + bool IsBroadcastToSupported(const TensorInfo& input, + const TensorInfo& output, + const BroadcastToDescriptor& descriptor, + Optional reasonIfUnsupported = EmptyOptional()) const; + bool IsCastSupported(const TensorInfo& input, const TensorInfo& output, Optional reasonIfUnsupported = EmptyOptional()) const; diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp index fa2082d4f2..c4d9583a66 100644 --- a/src/backends/reference/RefWorkloadFactory.cpp +++ b/src/backends/reference/RefWorkloadFactory.cpp @@ -179,6 +179,11 @@ std::unique_ptr RefWorkloadFactory::CreateWorkload(LayerType type, = PolymorphicDowncast(&descriptor); return std::make_unique(*batchToSpaceNdQueueDescriptor, info); } + case LayerType::BroadcastTo: + { + auto broadcastToQueueDescriptor = PolymorphicDowncast(&descriptor); + return std::make_unique(*broadcastToQueueDescriptor, info); + } case LayerType::Cast : { auto castQueueDescriptor = PolymorphicDowncast(&descriptor); diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk index 7f047af930..27ca8f607a 100644 --- a/src/backends/reference/backend.mk +++ b/src/backends/reference/backend.mk @@ -53,6 +53,7 @@ BACKEND_SOURCES := \ workloads/RefBatchMatMulWorkload.cpp \ workloads/RefBatchNormalizationWorkload.cpp \ workloads/RefBatchToSpaceNdWorkload.cpp \ + workloads/RefBroadcastToWorkload.cpp \ workloads/RefCastWorkload.cpp \ workloads/RefChannelShuffleWorkload.cpp \ workloads/RefComparisonWorkload.cpp \ diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp index 09d6ac5d20..e503d3fb7f 100644 --- a/src/backends/reference/test/RefEndToEndTests.cpp +++ b/src/backends/reference/test/RefEndToEndTests.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -1728,4 +1729,15 @@ TEST_CASE("RefReshapeRemovalNCHWSecondEndToEnd") { ReshapeRemovalNCHWEndToEnd(defaultBackends, true, false); } + +// BroadcastTo +TEST_CASE("RefBroadcastToEndToEndFloat32") +{ + BroadcastToEndToEnd(defaultBackends); +} + +TEST_CASE("RefBroadcastToEndToEndWithElementWiseBinaryFloat32") +{ + BroadcastToEndToEndElementWiseBinary(defaultBackends); +} } diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp index a079bb712a..af4ed966b2 100644 --- a/src/backends/reference/test/RefLayerTests.cpp +++ b/src/backends/reference/test/RefLayerTests.cpp @@ -2823,4 +2823,60 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(UnidirectionalSequenceLstmLayerInt8NoCifgWithPeeph ARMNN_AUTO_TEST_CASE_WITH_THF(UnidirectionalSequenceLstmInt8WithCifgWithPeepholeNoProjection, UnidirectionalSequenceLstmInt8WithCifgWithPeepholeNoProjectionTest) +// Broadcast to +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo1dTestFloat32, BroadcastTo1dTest) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo2dAxis0TestFloat32, BroadcastTo2dAxis0Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo2dAxis1TestFloat32, BroadcastTo2dAxis1Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis0TestFloat32, BroadcastTo3dAxis0Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis1TestFloat32, BroadcastTo3dAxis1Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis2TestFloat32, BroadcastTo3dAxis2Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo4dTestFloat32, BroadcastTo4dTest) + +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo1dTestFloat16, BroadcastTo1dTest) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo2dAxis0TestFloat16, BroadcastTo2dAxis0Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo2dAxis1TestFloat16, BroadcastTo2dAxis1Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis0TestFloat16, BroadcastTo3dAxis0Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis1TestFloat16, BroadcastTo3dAxis1Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis2TestFloat16, BroadcastTo3dAxis2Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo4dTestFloat16, BroadcastTo4dTest) + +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo1dTestQAsymmS8, BroadcastTo1dTest) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo2dAxis0TestQAsymmS8, BroadcastTo2dAxis0Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo2dAxis1TestQAsymmS8, BroadcastTo2dAxis1Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis0TestQAsymmS8, BroadcastTo3dAxis0Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis1TestQAsymmS8, BroadcastTo3dAxis1Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis2TestQAsymmS8, BroadcastTo3dAxis2Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo4dTestQAsymmS8, BroadcastTo4dTest) + +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo1dTestQAsymmU8, BroadcastTo1dTest) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo2dAxis0TestQAsymmU8, BroadcastTo2dAxis0Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo2dAxis1TestQAsymmU8, BroadcastTo2dAxis1Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis0TestQAsymmU8, BroadcastTo3dAxis0Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis1TestQAsymmU8, BroadcastTo3dAxis1Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis2TestQAsymmU8, BroadcastTo3dAxis2Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo4dTestQAsymmU8, BroadcastTo4dTest) + +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo1dTestQSymmS8, BroadcastTo1dTest) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo2dAxis0TestQSymmS8, BroadcastTo2dAxis0Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo2dAxis1TestQSymmS8, BroadcastTo2dAxis1Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis0TestQSymmS8, BroadcastTo3dAxis0Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis1TestQSymmS8, BroadcastTo3dAxis1Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis2TestQSymmS8, BroadcastTo3dAxis2Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo4dTestQSymmS8, BroadcastTo4dTest) + +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo1dTestQSymmS16, BroadcastTo1dTest) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo2dAxis0TestQSymmS16, BroadcastTo2dAxis0Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo2dAxis1TestQSymmS16, BroadcastTo2dAxis1Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis0TestQSymmS16, BroadcastTo3dAxis0Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis1TestQSymmS16, BroadcastTo3dAxis1Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis2TestQSymmS16, BroadcastTo3dAxis2Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo4dTestQSymmS16, BroadcastTo4dTest) + +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo1dTestSigned32, BroadcastTo1dTest) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo2dAxis0TestSigned32, BroadcastTo2dAxis0Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo2dAxis1TestSigned32, BroadcastTo2dAxis1Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis0TestSigned32, BroadcastTo3dAxis0Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis1TestSigned32, BroadcastTo3dAxis1Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo3dAxis2TestSigned32, BroadcastTo3dAxis2Test) +ARMNN_AUTO_TEST_CASE_WITH_THF(BroadcastTo4dTestSigned32, BroadcastTo4dTest) } \ No newline at end of file diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt index 9372568133..42f92aec1d 100644 --- a/src/backends/reference/workloads/CMakeLists.txt +++ b/src/backends/reference/workloads/CMakeLists.txt @@ -79,6 +79,8 @@ list(APPEND armnnRefBackendWorkloads_sources RefBatchNormalizationWorkload.hpp RefBatchToSpaceNdWorkload.cpp RefBatchToSpaceNdWorkload.hpp + RefBroadcastToWorkload.cpp + RefBroadcastToWorkload.hpp RefCastWorkload.cpp RefCastWorkload.hpp RefChannelShuffleWorkload.cpp diff --git a/src/backends/reference/workloads/RefBroadcastToWorkload.cpp b/src/backends/reference/workloads/RefBroadcastToWorkload.cpp new file mode 100644 index 0000000000..3a6184d22e --- /dev/null +++ b/src/backends/reference/workloads/RefBroadcastToWorkload.cpp @@ -0,0 +1,48 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RefBroadcastToWorkload.hpp" +#include "RefWorkloadUtils.hpp" +#include "Profiling.hpp" +#include "Broadcast.hpp" + +#include "Decoders.hpp" +#include "Encoders.hpp" + +namespace armnn +{ + +RefBroadcastToWorkload::RefBroadcastToWorkload(const BroadcastToQueueDescriptor& descriptor, const WorkloadInfo& info) + : RefBaseWorkload(descriptor, info) +{} + +void RefBroadcastToWorkload::Execute() const +{ + Execute(m_Data.m_Inputs, m_Data.m_Outputs); +} + +void RefBroadcastToWorkload::ExecuteAsync(ExecutionData& executionData) +{ + WorkingMemDescriptor* workingMemDescriptor = static_cast(executionData.m_Data); + Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs); +} + +void RefBroadcastToWorkload::Execute(std::vector inputs, std::vector outputs) const +{ + ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefBroadcastToWorkload_Execute"); + const TensorInfo& inputInfo = GetTensorInfo(inputs[0]); + const TensorInfo& outputInfo = GetTensorInfo(outputs[0]); + + std::unique_ptr> input = MakeDecoder(inputInfo, inputs[0]->Map()); + std::unique_ptr> output= MakeEncoder(outputInfo, outputs[0]->Map()); + + auto broadcastTo = [](float x) + { + return x; + }; + BroadcastLoop(inputInfo.GetShape(), outputInfo.GetShape()).Unroll(broadcastTo, + 0, *input, *output); +} +} // namespace armnn diff --git a/src/backends/reference/workloads/RefBroadcastToWorkload.hpp b/src/backends/reference/workloads/RefBroadcastToWorkload.hpp new file mode 100644 index 0000000000..ac947ae787 --- /dev/null +++ b/src/backends/reference/workloads/RefBroadcastToWorkload.hpp @@ -0,0 +1,25 @@ +// +// Copyright © 2023 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "RefBaseWorkload.hpp" + +namespace armnn +{ +class RefBroadcastToWorkload : public RefBaseWorkload +{ + +public: + explicit RefBroadcastToWorkload(const BroadcastToQueueDescriptor& descriptor, + const WorkloadInfo& info); + + void Execute() const override; + void ExecuteAsync(ExecutionData& executionData) override; + +private: + void Execute(std::vector inputs, std::vector outputs) const; +}; +} // namespace armnn \ No newline at end of file diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp index a36eae501c..98aa27b8a9 100644 --- a/src/backends/reference/workloads/RefWorkloads.hpp +++ b/src/backends/reference/workloads/RefWorkloads.hpp @@ -10,6 +10,7 @@ #include "RefBatchMatMulWorkload.hpp" #include "RefBatchNormalizationWorkload.hpp" #include "RefBatchToSpaceNdWorkload.hpp" +#include "RefBroadcastToWorkload.hpp" #include "RefCastWorkload.hpp" #include "RefChannelShuffleWorkload.hpp" #include "RefComparisonWorkload.hpp" -- cgit v1.2.1