aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test
diff options
context:
space:
mode:
authorIdriss Chaouch <idriss.chaouch@arm.com>2023-08-28 14:28:31 +0100
committerIdriss Chaouch <idriss.chaouch@arm.com>2023-08-31 11:26:28 +0100
commit98e383eadf4e670d057ad725c7fe7924fea8e36b (patch)
tree35acac15aa69ab405887289cb9674d388f06f96b /src/backends/backendsCommon/test
parent2be039bce38a4fa436e8310dfe14ebfff20d57bd (diff)
downloadarmnn-98e383eadf4e670d057ad725c7fe7924fea8e36b.tar.gz
IVGCVSW-7525 Add broadcast_to operator
Signed-off-by: Idriss Chaouch <idriss.chaouch@arm.com> Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I94ec5f9120b2d736fdf98d00ec5137a4efd739b8
Diffstat (limited to 'src/backends/backendsCommon/test')
-rw-r--r--src/backends/backendsCommon/test/BroadcastToEndToEndTestImpl.hpp149
-rw-r--r--src/backends/backendsCommon/test/CMakeLists.txt3
-rw-r--r--src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/BroadcastToTestImpl.cpp636
-rw-r--r--src/backends/backendsCommon/test/layerTests/BroadcastToTestImpl.hpp46
7 files changed, 839 insertions, 1 deletions
diff --git a/src/backends/backendsCommon/test/BroadcastToEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/BroadcastToEndToEndTestImpl.hpp
new file mode 100644
index 0000000000..3b2c47fb94
--- /dev/null
+++ b/src/backends/backendsCommon/test/BroadcastToEndToEndTestImpl.hpp
@@ -0,0 +1,149 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+#include "armnn/INetwork.hpp"
+#include "armnnUtils/QuantizeHelper.hpp"
+#include "ElementwiseBinaryEndToEndTestImpl.hpp"
+#include "Optimizer.hpp"
+#include <CommonTestUtils.hpp>
+#include <ResolveType.hpp>
+#include <doctest/doctest.h>
+
+namespace
+{
+ using namespace armnn;
+ armnn::INetworkPtr CreateBroadcastToNetwork(BroadcastToDescriptor& descriptor,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& outputInfo)
+ {
+ INetworkPtr network(INetwork::Create());
+ IConnectableLayer* inputLayer = network->AddInputLayer(0, "input");
+ IConnectableLayer* broadcastLayer = network->AddBroadcastToLayer(descriptor, "broadcast_to");
+ IConnectableLayer* outputLayer = network->AddOutputLayer(0, "output");
+ Connect(inputLayer, broadcastLayer, inputInfo, 0, 0);
+ Connect(broadcastLayer, outputLayer, outputInfo, 0, 0);
+ return network;
+ }
+
+ armnn::INetworkPtr CreateBroadcastToNetworkWithElementWiseBinary(BroadcastToDescriptor& descriptor,
+ const ElementwiseBinaryDescriptor&
+ elementWiseDescriptor,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& inputInfoElementWise,
+ const armnn::TensorInfo& outputInfo)
+ {
+ INetworkPtr network(INetwork::Create());
+ IConnectableLayer* inputLayer = network->AddInputLayer(0, "input");
+ IConnectableLayer* inputLayerElementWise = network->AddInputLayer(1, "inputElementWiseBinary");
+ IConnectableLayer* broadcastLayer = network->AddBroadcastToLayer(descriptor, "broadcast_to");
+ IConnectableLayer* multiplicationLayer =
+ network->AddElementwiseBinaryLayer(elementWiseDescriptor,
+ "multiplication");
+ IConnectableLayer* outputLayer = network->AddOutputLayer(0, "output");
+ Connect(inputLayer, broadcastLayer, inputInfo, 0, 0);
+ Connect(inputLayerElementWise, multiplicationLayer,
+ inputInfoElementWise, 0, 1);
+ Connect(broadcastLayer, multiplicationLayer, inputInfo, 0, 0);
+ Connect(multiplicationLayer, outputLayer, outputInfo, 0, 0);
+ return network;
+ }
+
+ template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+ void BroadcastToEndToEnd(const std::vector<BackendId>& backends)
+ {
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+ bool qConst = true;
+
+ const TensorShape inputTensorShape = { {1, 4} };
+ const TensorShape outputTensorShape = { {4, 4} };
+
+ TensorInfo inputInfo (inputTensorShape, ArmnnType, qScale,
+ qOffset, qConst);
+ TensorInfo outputInfo (outputTensorShape, ArmnnType,qScale,
+ qOffset);
+
+ std::vector<T> inputData = armnnUtils::QuantizedVector<T>({
+ 65, 144, 91, 161
+ }, qScale, qOffset);
+
+ std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>({
+ 65, 144, 91, 161,
+ 65, 144, 91, 161,
+ 65, 144, 91, 161,
+ 65, 144, 91, 161
+ }, qScale, qOffset);
+
+ auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 4, 4 }));
+ CHECK(descriptor.m_BroadcastToShape == outputTensorShape);
+ INetworkPtr network = CreateBroadcastToNetwork(descriptor, inputInfo, outputInfo);
+
+ std::map<int, std::vector<T>> inputTensor = { { 0, inputData } };
+ std::map<int, std::vector<T>> expectedOutputTensor = { { 0, expectedOutputData } };
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),inputTensor,
+ expectedOutputTensor, backends);
+ }
+
+ template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+ void BroadcastToEndToEndElementWiseBinary(const std::vector<BackendId>& backends)
+ {
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+ bool qConst = true;
+
+ const TensorShape inputTensorShape = { {1, 4} };
+ const TensorShape outputTensorShape = { {4, 4} };
+
+ const TensorInfo inputInfo (inputTensorShape, ArmnnType, qScale,
+ qOffset, qConst);
+ const TensorInfo inputInfoElementWise (outputTensorShape, ArmnnType, qScale,
+ qOffset, qConst);
+ const TensorInfo outputInfo (outputTensorShape, ArmnnType,qScale,
+ qOffset);
+
+ std::vector<T> inputData = armnnUtils::QuantizedVector<T>({
+ 65, 144, 91, 161
+ }, qScale, qOffset);
+
+ std::vector<T> inputDataElementWise = armnnUtils::QuantizedVector<T>({
+ 1, 1, 1, 1,
+ 1, 1, 1, 1,
+ 1, 1, 1, 1,
+ 1, 1, 1, 1
+ }, qScale, qOffset);
+
+ std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>({
+ 65, 144, 91, 161,
+ 65, 144, 91, 161,
+ 65, 144, 91, 161,
+ 65, 144, 91, 161
+ }, qScale, qOffset);
+
+ auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 4, 4 }));
+ CHECK(descriptor.m_BroadcastToShape == outputTensorShape);
+ INetworkPtr network = CreateBroadcastToNetworkWithElementWiseBinary(descriptor,
+ BinaryOperation::Mul,
+ inputInfo,
+ inputInfoElementWise,
+ outputInfo);
+ // Create ArmNN runtime
+ IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions());
+
+ // Optimise ArmNN network
+ IOptimizedNetworkPtr optNet = Optimize(*network, {Compute::CpuRef},
+ run->GetDeviceSpec());
+
+ Graph& graph = GetGraphForTesting(optNet.get());
+
+ Optimizer::Pass(graph,
+ armnn::MakeOptimizations(armnn::optimizations::BroadcastToOptimizationLayer()));
+
+ std::map<int, std::vector<T>> inputTensor = { { 0, inputData }, {1, inputDataElementWise} };
+ std::map<int, std::vector<T>> expectedOutputTensor = { { 0, expectedOutputData } };
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),inputTensor,
+ expectedOutputTensor, backends);
+ }
+
+} // anonymous namespace \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 8f3a22d53b..ed95bcf399 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -10,6 +10,7 @@ list(APPEND armnnBackendsCommonUnitTests_sources
BackendIdTests.cpp
BackendProfilingTests.cpp
BackendRegistryTests.cpp
+ BroadcastToEndToEndTestImpl.hpp
ChannelShuffleEndToEndTestImpl.hpp
ComparisonEndToEndTestImpl.hpp
CompatibilityTests.cpp
@@ -79,6 +80,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources
layerTests/BatchNormalizationTestImpl.cpp
layerTests/BatchNormalizationTestImpl.hpp
layerTests/BatchToSpaceNdTestImpl.hpp
+ layerTests/BroadcastToTestImpl.cpp
+ layerTests/BroadcastToTestImpl.hpp
layerTests/CastTestImpl.cpp
layerTests/CastTestImpl.hpp
layerTests/ChannelShuffleTestImpl.cpp
diff --git a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
index 9586417407..9d05a64ce8 100644
--- a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
@@ -94,7 +94,7 @@ void ElementwiseUnarySimpleEndToEnd(const std::vector<BackendId>& backends,
std::map<int, std::vector<TInput>> inputTensorData = {{ 0, qInputData }};
std::map<int, std::vector<TInput>> expectedOutputData = {{ 0, qExpectedOutput }};
- EndToEndLayerTestImpl<ArmnnInType, ArmnnInType>(move(net), inputTensorData, expectedOutputData, backends);
+ EndToEndLayerTestImpl<ArmnnInType, ArmnnInType>(std::move(net), inputTensorData, expectedOutputData, backends);
}
} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index e8a2ec6931..9f472e9f28 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -634,6 +634,8 @@ DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
DECLARE_LAYER_POLICY_2_PARAM(BatchToSpaceNd)
+DECLARE_LAYER_POLICY_2_PARAM(BroadcastTo)
+
DECLARE_LAYER_POLICY_1_PARAM(Cast)
DECLARE_LAYER_POLICY_2_PARAM(ChannelShuffle)
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 3f8d045c06..015d25ef3e 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -13,6 +13,7 @@
#include <backendsCommon/test/layerTests/BatchMatMulTestImpl.hpp>
#include <backendsCommon/test/layerTests/BatchNormalizationTestImpl.hpp>
#include <backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp>
+#include <backendsCommon/test/layerTests/BroadcastToTestImpl.hpp>
#include <backendsCommon/test/layerTests/CastTestImpl.hpp>
#include <backendsCommon/test/layerTests/ChannelShuffleTestImpl.hpp>
#include <backendsCommon/test/layerTests/ComparisonTestImpl.hpp>
@@ -79,3 +80,4 @@
#include <backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.hpp>
#include <backendsCommon/test/layerTests/TransposeTestImpl.hpp>
#include <backendsCommon/test/layerTests/UnidirectionalSequenceLstmTestImpl.hpp>
+
diff --git a/src/backends/backendsCommon/test/layerTests/BroadcastToTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BroadcastToTestImpl.cpp
new file mode 100644
index 0000000000..b4e8a4c85d
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/BroadcastToTestImpl.cpp
@@ -0,0 +1,636 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "BroadcastToTestImpl.hpp"
+#include <vector>
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <armnn/backends/Workload.hpp>
+#include <armnn/backends/WorkloadData.hpp>
+#include <armnn/backends/WorkloadFactory.hpp>
+
+#include <armnnTestUtils/WorkloadTestUtils.hpp>
+#include <armnnTestUtils/TensorCopyUtils.hpp>
+
+#include <armnn/BackendHelper.hpp>
+
+#include <armnnUtils/QuantizeHelper.hpp>
+#include <doctest/doctest.h>
+
+namespace
+{
+template<typename T, std::size_t NumDims>
+LayerTestResult<T, NumDims> BroadcastToTestImpl(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory,
+ armnn::BroadcastToDescriptor descriptor,
+ armnn::TensorInfo& inputInfo,
+ armnn::TensorInfo& outputInfo,
+ std::vector<T>& inputData,
+ std::vector<T>& expectedOutputData)
+{
+
+ CHECK(descriptor.m_BroadcastToShape == outputInfo.GetShape());
+
+ LayerTestResult<T, NumDims> result(outputInfo);
+ std::vector<T> outputActual(outputInfo.GetNumElements());
+
+ armnn::BroadcastToQueueDescriptor queueDescriptor;
+ queueDescriptor.m_Parameters = std::move(descriptor);
+ armnn::WorkloadInfo workloadInfo;
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
+
+ AddInputToWorkload(queueDescriptor, workloadInfo, inputInfo, inputHandle.get());
+ AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
+
+ const armnn::BackendId& backend = workloadFactory.GetBackendId();
+ armnn::LayerSupportHandle handle = armnn::GetILayerSupportByBackendId(backend);
+
+ auto workload = workloadFactory.CreateWorkload(armnn::LayerType::BroadcastTo, queueDescriptor, workloadInfo);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), inputData.data());
+
+ workload->PostAllocationConfigure();
+ ExecuteWorkload(*workload, memoryManager);
+
+ CopyDataFromITensorHandle(outputActual.data(), outputHandle.get());
+ return LayerTestResult<T, NumDims>(outputActual,
+ expectedOutputData,
+ outputHandle->GetShape(),
+ outputInfo.GetShape());
+}
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 1> BroadcastTo1dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape( {1, 4} ));
+
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorShape inputShape = { 1, 1 };
+ armnn::TensorShape outputShape = { 1, 4 };
+
+ armnn::TensorInfo inputInfo(inputShape, ArmnnType);
+ armnn::TensorInfo outputInfo(outputShape, ArmnnType);
+
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(
+ {
+ 1.f
+ }, qScale, qOffset);
+
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(
+ {
+ 1.f, 1.f,
+ 1.f, 1.f
+ }, qScale, qOffset);
+
+ return BroadcastToTestImpl<T, 1>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ descriptor,
+ inputInfo,
+ outputInfo,
+ input,
+ expectedOutput);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> BroadcastTo2dAxis0Test(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 4, 3 }));
+
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorShape inputShape = { 1, 3 };
+ armnn::TensorShape outputShape = { 4, 3 };
+
+ armnn::TensorInfo inputInfo(inputShape, ArmnnType);
+ armnn::TensorInfo outputInfo(outputShape, ArmnnType);
+
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(
+ {
+ 0.f, 1.f, 2.f
+ }, qScale, qOffset);
+
+
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(
+ {
+ 0.f, 1.f, 2.f,
+ 0.f, 1.f, 2.f,
+ 0.f, 1.f, 2.f,
+ 0.f, 1.f, 2.f
+ }, qScale, qOffset);
+
+ return BroadcastToTestImpl<T, 2>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ descriptor,
+ inputInfo,
+ outputInfo,
+ input,
+ expectedOutput);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 2> BroadcastTo2dAxis1Test(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 3, 4 }));
+
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorShape inputShape = { 3, 1 };
+ armnn::TensorShape outputShape = { 3, 4 };
+
+ armnn::TensorInfo inputInfo(inputShape, ArmnnType);
+ armnn::TensorInfo outputInfo(outputShape, ArmnnType);
+
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(
+ {
+ 0.f, 1.f, 2.f
+ }, qScale, qOffset);
+
+
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(
+ {
+ 0.f, 0.f, 0.f, 0.f,
+ 1.f, 1.f, 1.f, 1.f,
+ 2.f, 2.f, 2.f, 2.f
+ }, qScale, qOffset);
+
+ return BroadcastToTestImpl<T, 2>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ descriptor,
+ inputInfo,
+ outputInfo,
+ input,
+ expectedOutput);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 3> BroadcastTo3dAxis0Test(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 2, 1, 3 }));
+
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorShape inputShape = { 1, 1, 3 };
+ armnn::TensorShape outputShape = { 2, 1, 3 };
+
+ armnn::TensorInfo inputInfo(inputShape, ArmnnType);
+ armnn::TensorInfo outputInfo(outputShape, ArmnnType);
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(
+ {
+ 1.1f, 2.12f, 3.3f
+ }, qScale, qOffset);
+
+
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(
+ {
+ 1.1f, 2.12f, 3.3f,
+ 1.1f, 2.12f, 3.3f
+ }, qScale, qOffset);
+
+ return BroadcastToTestImpl<T, 3>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ descriptor,
+ inputInfo,
+ outputInfo,
+ input,
+ expectedOutput);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 3> BroadcastTo3dAxis1Test(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 1, 3, 3 }));
+
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorShape inputShape = { 1, 1, 3 };
+ armnn::TensorShape outputShape = { 1, 3, 3 };
+
+ armnn::TensorInfo inputInfo(inputShape, ArmnnType);
+ armnn::TensorInfo outputInfo(outputShape, ArmnnType);
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(
+ {
+ 1.1f, 2.12f, 3.3f
+ }, qScale, qOffset);
+
+
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(
+ {
+ 1.1f, 2.12f, 3.3f,
+ 1.1f, 2.12f, 3.3f,
+ 1.1f, 2.12f, 3.3f
+ }, qScale, qOffset);
+
+ return BroadcastToTestImpl<T, 3>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ descriptor,
+ inputInfo,
+ outputInfo,
+ input,
+ expectedOutput);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 3> BroadcastTo3dAxis2Test(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 1, 3, 3 }));
+
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorShape inputShape = { 1, 3, 1 };
+ armnn::TensorShape outputShape = { 1, 3, 3 };
+
+ armnn::TensorInfo inputInfo(inputShape, ArmnnType);
+ armnn::TensorInfo outputInfo(outputShape, ArmnnType);
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(
+ {
+ 1.1f, 2.12f, 3.3f
+ }, qScale, qOffset);
+
+
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(
+ {
+ 1.1f, 1.1f, 1.1f,
+ 2.12f, 2.12f, 2.12f,
+ 3.3f, 3.3f, 3.3f
+ }, qScale, qOffset);
+
+ return BroadcastToTestImpl<T, 3>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ descriptor,
+ inputInfo,
+ outputInfo,
+ input,
+ expectedOutput);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> BroadcastTo4dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 3, 1, 2, 3 }));
+
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+
+ armnn::TensorShape inputShape = { 1, 1, 1, 3 };
+ armnn::TensorShape outputShape = { 3, 1, 2, 3 };
+
+ armnn::TensorInfo inputInfo(inputShape, ArmnnType);
+ armnn::TensorInfo outputInfo(outputShape, ArmnnType);
+
+ std::vector<T> input = armnnUtils::QuantizedVector<T>(
+ {
+ 0.f, 1.f, 2.f
+ }, qScale, qOffset);
+
+
+ std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(
+ {
+ 0.f, 1.f, 2.f,
+ 0.f, 1.f, 2.f,
+ 0.f, 1.f, 2.f,
+ 0.f, 1.f, 2.f,
+ 0.f, 1.f, 2.f,
+ 0.f, 1.f, 2.f
+ }, qScale, qOffset);
+
+ return BroadcastToTestImpl<T, 4>(workloadFactory,
+ memoryManager,
+ tensorHandleFactory,
+ descriptor,
+ inputInfo,
+ outputInfo,
+ input,
+ expectedOutput);
+}
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 1>
+BroadcastTo1dTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+BroadcastTo2dAxis0Test<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 2>
+BroadcastTo2dAxis1Test<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
+BroadcastTo3dAxis0Test<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
+BroadcastTo3dAxis1Test<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 3>
+BroadcastTo3dAxis2Test<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
+BroadcastTo4dTest<armnn::DataType::Float32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 1>
+BroadcastTo1dTest<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 2>
+BroadcastTo2dAxis0Test<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 2>
+BroadcastTo2dAxis1Test<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 3>
+BroadcastTo3dAxis0Test<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 3>
+BroadcastTo3dAxis1Test<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 3>
+BroadcastTo3dAxis2Test<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
+BroadcastTo4dTest<armnn::DataType::Float16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 1>
+BroadcastTo1dTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 2>
+BroadcastTo2dAxis0Test<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 2>
+BroadcastTo2dAxis1Test<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
+BroadcastTo3dAxis0Test<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
+BroadcastTo3dAxis1Test<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 3>
+BroadcastTo3dAxis2Test<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmS8>, 4>
+BroadcastTo4dTest<armnn::DataType::QAsymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 1>
+BroadcastTo1dTest<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
+BroadcastTo2dAxis0Test<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 2>
+BroadcastTo2dAxis1Test<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
+BroadcastTo3dAxis0Test<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
+BroadcastTo3dAxis1Test<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 3>
+BroadcastTo3dAxis2Test<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QAsymmU8>, 4>
+BroadcastTo4dTest<armnn::DataType::QAsymmU8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS8>, 1>
+BroadcastTo1dTest<armnn::DataType::QSymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS8>, 2>
+BroadcastTo2dAxis0Test<armnn::DataType::QSymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS8>, 2>
+BroadcastTo2dAxis1Test<armnn::DataType::QSymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS8>, 3>
+BroadcastTo3dAxis0Test<armnn::DataType::QSymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS8>, 3>
+BroadcastTo3dAxis1Test<armnn::DataType::QSymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS8>, 3>
+BroadcastTo3dAxis2Test<armnn::DataType::QSymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS8>, 4>
+BroadcastTo4dTest<armnn::DataType::QSymmS8>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 1>
+BroadcastTo1dTest<armnn::DataType::QSymmS16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
+BroadcastTo2dAxis0Test<armnn::DataType::QSymmS16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 2>
+BroadcastTo2dAxis1Test<armnn::DataType::QSymmS16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
+BroadcastTo3dAxis0Test<armnn::DataType::QSymmS16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
+BroadcastTo3dAxis1Test<armnn::DataType::QSymmS16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 3>
+BroadcastTo3dAxis2Test<armnn::DataType::QSymmS16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::QSymmS16>, 4>
+BroadcastTo4dTest<armnn::DataType::QSymmS16>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Signed32>, 1>
+BroadcastTo1dTest<armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Signed32>, 2>
+BroadcastTo2dAxis0Test<armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Signed32>, 2>
+BroadcastTo2dAxis1Test<armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Signed32>, 3>
+BroadcastTo3dAxis0Test<armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Signed32>, 3>
+BroadcastTo3dAxis1Test<armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Signed32>, 3>
+BroadcastTo3dAxis2Test<armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template LayerTestResult<armnn::ResolveType<armnn::DataType::Signed32>, 4>
+BroadcastTo4dTest<armnn::DataType::Signed32>(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
diff --git a/src/backends/backendsCommon/test/layerTests/BroadcastToTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/BroadcastToTestImpl.hpp
new file mode 100644
index 0000000000..d8d0df447b
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/BroadcastToTestImpl.hpp
@@ -0,0 +1,46 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnnTestUtils/LayerTestResult.hpp>
+#include <armnn/backends/IBackendInternal.hpp>
+#include <armnn/backends/WorkloadFactory.hpp>
+#include "ResolveType.hpp"
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> BroadcastTo4dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> BroadcastTo3dAxis0Test(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> BroadcastTo3dAxis1Test(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 3> BroadcastTo3dAxis2Test(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> BroadcastTo2dAxis0Test(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 2> BroadcastTo2dAxis1Test(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 1> BroadcastTo1dTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file