// // Copyright © 2023 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #pragma once #include "armnn/INetwork.hpp" #include "armnnUtils/QuantizeHelper.hpp" #include "ElementwiseBinaryEndToEndTestImpl.hpp" #include "Optimizer.hpp" #include #include #include namespace { using namespace armnn; armnn::INetworkPtr CreateBroadcastToNetwork(BroadcastToDescriptor& descriptor, const armnn::TensorInfo& inputInfo, const armnn::TensorInfo& outputInfo) { INetworkPtr network(INetwork::Create()); IConnectableLayer* inputLayer = network->AddInputLayer(0, "input"); IConnectableLayer* broadcastLayer = network->AddBroadcastToLayer(descriptor, "broadcast_to"); IConnectableLayer* outputLayer = network->AddOutputLayer(0, "output"); Connect(inputLayer, broadcastLayer, inputInfo, 0, 0); Connect(broadcastLayer, outputLayer, outputInfo, 0, 0); return network; } armnn::INetworkPtr CreateBroadcastToNetworkWithElementWiseBinary(BroadcastToDescriptor& descriptor, const ElementwiseBinaryDescriptor& elementWiseDescriptor, const armnn::TensorInfo& inputInfo, const armnn::TensorInfo& inputInfoElementWise, const armnn::TensorInfo& outputInfo) { INetworkPtr network(INetwork::Create()); IConnectableLayer* inputLayer = network->AddInputLayer(0, "input"); IConnectableLayer* inputLayerElementWise = network->AddInputLayer(1, "inputElementWiseBinary"); IConnectableLayer* broadcastLayer = network->AddBroadcastToLayer(descriptor, "broadcast_to"); IConnectableLayer* multiplicationLayer = network->AddElementwiseBinaryLayer(elementWiseDescriptor, "multiplication"); IConnectableLayer* outputLayer = network->AddOutputLayer(0, "output"); Connect(inputLayer, broadcastLayer, inputInfo, 0, 0); Connect(inputLayerElementWise, multiplicationLayer, inputInfoElementWise, 0, 1); Connect(broadcastLayer, multiplicationLayer, inputInfo, 0, 0); Connect(multiplicationLayer, outputLayer, outputInfo, 0, 0); return network; } template > void BroadcastToEndToEnd(const std::vector& backends) { float qScale = 1.0f; int32_t qOffset = 0; bool qConst = true; const TensorShape inputTensorShape = { {1, 4} }; const TensorShape outputTensorShape = { {4, 4} }; TensorInfo inputInfo (inputTensorShape, ArmnnType, qScale, qOffset, qConst); TensorInfo outputInfo (outputTensorShape, ArmnnType,qScale, qOffset); std::vector inputData = armnnUtils::QuantizedVector({ 65, 144, 91, 161 }, qScale, qOffset); std::vector expectedOutputData = armnnUtils::QuantizedVector({ 65, 144, 91, 161, 65, 144, 91, 161, 65, 144, 91, 161, 65, 144, 91, 161 }, qScale, qOffset); auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 4, 4 })); CHECK(descriptor.m_BroadcastToShape == outputTensorShape); INetworkPtr network = CreateBroadcastToNetwork(descriptor, inputInfo, outputInfo); std::map> inputTensor = { { 0, inputData } }; std::map> expectedOutputTensor = { { 0, expectedOutputData } }; EndToEndLayerTestImpl(std::move(network),inputTensor, expectedOutputTensor, backends); } template > void BroadcastToEndToEndElementWiseBinary(const std::vector& backends, const ElementwiseBinaryDescriptor& elementWiseDescriptor) { float qScale = 1.0f; int32_t qOffset = 0; bool qConst = true; const TensorShape inputTensorShape = { {1, 4} }; const TensorShape outputTensorShape = { {4, 4} }; const TensorInfo inputInfo (inputTensorShape, ArmnnType, qScale, qOffset, qConst); const TensorInfo inputInfoElementWise (outputTensorShape, ArmnnType, qScale, qOffset, qConst); const TensorInfo outputInfo (outputTensorShape, ArmnnType,qScale, qOffset); std::vector inputData = armnnUtils::QuantizedVector({ 65, 144, 91, 161 }, qScale, qOffset); std::vector inputDataElementWise = armnnUtils::QuantizedVector({ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, qScale, qOffset); std::vector expectedOutputData; if (elementWiseDescriptor.m_Operation == BinaryOperation::Mul || elementWiseDescriptor.m_Operation == BinaryOperation::Div) { expectedOutputData = armnnUtils::QuantizedVector({ 65, 144, 91, 161, 65, 144, 91, 161, 65, 144, 91, 161, 65, 144, 91, 161 }, qScale, qOffset); } else if (elementWiseDescriptor.m_Operation == BinaryOperation::Add) { expectedOutputData = armnnUtils::QuantizedVector({ 66, 145, 92, 162, 66, 145, 92, 162, 66, 145, 92, 162, 66, 145, 92, 162 }, qScale, qOffset); } else if (elementWiseDescriptor.m_Operation == BinaryOperation::Sub) { expectedOutputData = armnnUtils::QuantizedVector({ 64, 143, 90, 160, 64, 143, 90, 160, 64, 143, 90, 160, 64, 143, 90, 160 }, qScale, qOffset); } auto descriptor = armnn::BroadcastToDescriptor(armnn::TensorShape({ 4, 4 })); CHECK(descriptor.m_BroadcastToShape == outputTensorShape); INetworkPtr network = CreateBroadcastToNetworkWithElementWiseBinary(descriptor, elementWiseDescriptor, inputInfo, inputInfoElementWise, outputInfo); // Create ArmNN runtime IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); // Optimise ArmNN network IOptimizedNetworkPtr optNet = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec()); Graph& graph = GetGraphForTesting(optNet.get()); Optimizer::Pass(graph, armnn::MakeOptimizations(armnn::optimizations::BroadcastToOptimizationLayer())); std::map> inputTensor = { { 0, inputData }, {1, inputDataElementWise} }; std::map> expectedOutputTensor = { { 0, expectedOutputData } }; EndToEndLayerTestImpl(std::move(network),inputTensor, expectedOutputTensor, backends); } } // anonymous namespace