aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/BroadcastToEndToEndTestImpl.hpp
diff options
context:
space:
mode:
authorIdriss Chaouch <idriss.chaouch@arm.com>2023-08-28 14:28:31 +0100
committerIdriss Chaouch <idriss.chaouch@arm.com>2023-08-31 11:26:28 +0100
commit98e383eadf4e670d057ad725c7fe7924fea8e36b (patch)
tree35acac15aa69ab405887289cb9674d388f06f96b /src/backends/backendsCommon/test/BroadcastToEndToEndTestImpl.hpp
parent2be039bce38a4fa436e8310dfe14ebfff20d57bd (diff)
downloadarmnn-98e383eadf4e670d057ad725c7fe7924fea8e36b.tar.gz
IVGCVSW-7525 Add broadcast_to operator
Signed-off-by: Idriss Chaouch <idriss.chaouch@arm.com> Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com> Change-Id: I94ec5f9120b2d736fdf98d00ec5137a4efd739b8
Diffstat (limited to 'src/backends/backendsCommon/test/BroadcastToEndToEndTestImpl.hpp')
-rw-r--r--src/backends/backendsCommon/test/BroadcastToEndToEndTestImpl.hpp149
1 files changed, 149 insertions, 0 deletions
diff --git a/src/backends/backendsCommon/test/BroadcastToEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/BroadcastToEndToEndTestImpl.hpp
new file mode 100644
index 0000000000..3b2c47fb94
--- /dev/null
+++ b/src/backends/backendsCommon/test/BroadcastToEndToEndTestImpl.hpp
@@ -0,0 +1,149 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+#include "armnn/INetwork.hpp"
+#include "armnnUtils/QuantizeHelper.hpp"
+#include "ElementwiseBinaryEndToEndTestImpl.hpp"
+#include "Optimizer.hpp"
+#include <CommonTestUtils.hpp>
+#include <ResolveType.hpp>
+#include <doctest/doctest.h>
+
+namespace
+{
+ using namespace armnn;
+ armnn::INetworkPtr CreateBroadcastToNetwork(BroadcastToDescriptor& descriptor,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& outputInfo)
+ {
+ INetworkPtr network(INetwork::Create());
+ IConnectableLayer* inputLayer = network->AddInputLayer(0, "input");
+ IConnectableLayer* broadcastLayer = network->AddBroadcastToLayer(descriptor, "broadcast_to");
+ IConnectableLayer* outputLayer = network->AddOutputLayer(0, "output");
+ Connect(inputLayer, broadcastLayer, inputInfo, 0, 0);
+ Connect(broadcastLayer, outputLayer, outputInfo, 0, 0);
+ return network;
+ }
+
+ armnn::INetworkPtr CreateBroadcastToNetworkWithElementWiseBinary(BroadcastToDescriptor& descriptor,
+ const ElementwiseBinaryDescriptor&
+ elementWiseDescriptor,
+ const armnn::TensorInfo& inputInfo,
+ const armnn::TensorInfo& inputInfoElementWise,
+ const armnn::TensorInfo& outputInfo)
+ {
+ INetworkPtr network(INetwork::Create());
+ IConnectableLayer* inputLayer = network->AddInputLayer(0, "input");
+ IConnectableLayer* inputLayerElementWise = network->AddInputLayer(1, "inputElementWiseBinary");
+ IConnectableLayer* broadcastLayer = network->AddBroadcastToLayer(descriptor, "broadcast_to");
+ IConnectableLayer* multiplicationLayer =
+ network->AddElementwiseBinaryLayer(elementWiseDescriptor,
+ "multiplication");
+ IConnectableLayer* outputLayer = network->AddOutputLayer(0, "output");
+ Connect(inputLayer, broadcastLayer, inputInfo, 0, 0);
+ Connect(inputLayerElementWise, multiplicationLayer,
+ inputInfoElementWise, 0, 1);
+ Connect(broadcastLayer, multiplicationLayer, inputInfo, 0, 0);
+ Connect(multiplicationLayer, outputLayer, outputInfo, 0, 0);
+ return network;
+ }
+
+ template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+ void BroadcastToEndToEnd(const std::vector<BackendId>& backends)
+ {
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+ bool qConst = true;
+
+ const TensorShape inputTensorShape = { {1, 4} };
+ const TensorShape outputTensorShape = { {4, 4} };
+
+ TensorInfo inputInfo (inputTensorShape, ArmnnType, qScale,
+ qOffset, qConst);
+ TensorInfo outputInfo (outputTensorShape, ArmnnType,qScale,
+ qOffset);
+
+ std::vector<T> inputData = armnnUtils::QuantizedVector<T>({
+ 65, 144, 91, 161
+ }, qScale, qOffset);
+
+ std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>({
+ 65, 144, 91, 161,
+ 65, 144, 91, 161,
+ 65, 144, 91, 161,
+ 65, 144, 91, 161
+ }, qScale, qOffset);
+
+ auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 4, 4 }));
+ CHECK(descriptor.m_BroadcastToShape == outputTensorShape);
+ INetworkPtr network = CreateBroadcastToNetwork(descriptor, inputInfo, outputInfo);
+
+ std::map<int, std::vector<T>> inputTensor = { { 0, inputData } };
+ std::map<int, std::vector<T>> expectedOutputTensor = { { 0, expectedOutputData } };
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),inputTensor,
+ expectedOutputTensor, backends);
+ }
+
+ template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+ void BroadcastToEndToEndElementWiseBinary(const std::vector<BackendId>& backends)
+ {
+ float qScale = 1.0f;
+ int32_t qOffset = 0;
+ bool qConst = true;
+
+ const TensorShape inputTensorShape = { {1, 4} };
+ const TensorShape outputTensorShape = { {4, 4} };
+
+ const TensorInfo inputInfo (inputTensorShape, ArmnnType, qScale,
+ qOffset, qConst);
+ const TensorInfo inputInfoElementWise (outputTensorShape, ArmnnType, qScale,
+ qOffset, qConst);
+ const TensorInfo outputInfo (outputTensorShape, ArmnnType,qScale,
+ qOffset);
+
+ std::vector<T> inputData = armnnUtils::QuantizedVector<T>({
+ 65, 144, 91, 161
+ }, qScale, qOffset);
+
+ std::vector<T> inputDataElementWise = armnnUtils::QuantizedVector<T>({
+ 1, 1, 1, 1,
+ 1, 1, 1, 1,
+ 1, 1, 1, 1,
+ 1, 1, 1, 1
+ }, qScale, qOffset);
+
+ std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>({
+ 65, 144, 91, 161,
+ 65, 144, 91, 161,
+ 65, 144, 91, 161,
+ 65, 144, 91, 161
+ }, qScale, qOffset);
+
+ auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 4, 4 }));
+ CHECK(descriptor.m_BroadcastToShape == outputTensorShape);
+ INetworkPtr network = CreateBroadcastToNetworkWithElementWiseBinary(descriptor,
+ BinaryOperation::Mul,
+ inputInfo,
+ inputInfoElementWise,
+ outputInfo);
+ // Create ArmNN runtime
+ IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions());
+
+ // Optimise ArmNN network
+ IOptimizedNetworkPtr optNet = Optimize(*network, {Compute::CpuRef},
+ run->GetDeviceSpec());
+
+ Graph& graph = GetGraphForTesting(optNet.get());
+
+ Optimizer::Pass(graph,
+ armnn::MakeOptimizations(armnn::optimizations::BroadcastToOptimizationLayer()));
+
+ std::map<int, std::vector<T>> inputTensor = { { 0, inputData }, {1, inputDataElementWise} };
+ std::map<int, std::vector<T>> expectedOutputTensor = { { 0, expectedOutputData } };
+ EndToEndLayerTestImpl<ArmnnType, ArmnnType>(std::move(network),inputTensor,
+ expectedOutputTensor, backends);
+ }
+
+} // anonymous namespace \ No newline at end of file