aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2021-11-24 15:47:28 +0000
committerSadik Armagan <sadik.armagan@arm.com>2021-12-14 11:02:41 +0000
commita097d2a0ed8e30d5aaf6d29ec18d0c39201b7b67 (patch)
tree947e587bc42d07f52c55b155308b5ea5bd3ebacd /src/armnn
parentbc14881a76699dd942e94265116da68a6466455e (diff)
downloadarmnn-a097d2a0ed8e30d5aaf6d29ec18d0c39201b7b67.tar.gz
IVGCVSW-6453 'Move the ArmNN Test Utils code to a physically separate directory'
* Created include/armnnTestUtils directory * Moved Arm NN test utils files into armnnTestUtils directory Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I03ac54c645c41c52650c4c03b6a58fb1481fef5d
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/test/CreateWorkload.hpp2315
-rw-r--r--src/armnn/test/GraphTests.cpp2
-rw-r--r--src/armnn/test/GraphUtils.cpp78
-rw-r--r--src/armnn/test/GraphUtils.hpp24
-rw-r--r--src/armnn/test/InferOutputTests.cpp2
-rw-r--r--src/armnn/test/InferOutputTests.hpp2
-rw-r--r--src/armnn/test/NetworkTests.cpp2
-rw-r--r--src/armnn/test/OptimizerTests.cpp2
-rw-r--r--src/armnn/test/PredicateResult.hpp45
-rw-r--r--src/armnn/test/RuntimeTests.cpp2
-rw-r--r--src/armnn/test/TensorHelpers.hpp236
-rw-r--r--src/armnn/test/TestUtils.cpp62
-rw-r--r--src/armnn/test/TestUtils.hpp57
-rw-r--r--src/armnn/test/UnitTests.cpp67
-rw-r--r--src/armnn/test/UnitTests.hpp185
-rw-r--r--src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp4
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp2
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp2
-rw-r--r--src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp2
-rw-r--r--src/armnn/test/optimizations/FoldPadTests.cpp2
-rw-r--r--src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp2
-rw-r--r--src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp2
-rw-r--r--src/armnn/test/optimizations/FuseActivationTests.cpp4
-rw-r--r--src/armnn/test/optimizations/FuseBatchNormTests.cpp2
-rw-r--r--src/armnn/test/optimizations/InsertDebugLayerTests.cpp2
-rw-r--r--src/armnn/test/optimizations/MovePermuteUpTests.cpp2
-rw-r--r--src/armnn/test/optimizations/MoveTransposeUpTests.cpp2
-rw-r--r--src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp2
-rw-r--r--src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp2
-rw-r--r--src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp2
-rw-r--r--src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp2
-rw-r--r--src/armnn/test/optimizations/PermuteAsReshapeTests.cpp2
-rw-r--r--src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp2
-rw-r--r--src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp4
-rw-r--r--src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp2
-rw-r--r--src/armnn/test/optimizations/TransposeAsReshapeTests.cpp2
36 files changed, 53 insertions, 3076 deletions
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index ea8a436177..ae07253841 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -2,2315 +2,8 @@
// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#pragma once
-#include "TestUtils.hpp"
-
-#include <Graph.hpp>
-#include <Network.hpp>
-#include <ResolveType.hpp>
-
-#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <armnn/utility/Assert.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
-#include <armnn/utility/PolymorphicDowncast.hpp>
-
-#include <backendsCommon/TensorHandle.hpp>
-#include <backendsCommon/WorkloadData.hpp>
-#include <backendsCommon/WorkloadFactory.hpp>
-
-#include <doctest/doctest.h>
-
-#include <utility>
-
-using namespace armnn;
-
-namespace
-{
-
-using namespace std;
-
-// Calls CreateWorkload for a layer, and checks the returned pointer is of the correct type.
-template<typename Workload>
-std::unique_ptr<Workload> MakeAndCheckWorkload(Layer& layer,
- const IWorkloadFactory& factory,
- const ModelOptions& modelOptions = {})
-{
- std::unique_ptr<IWorkload> workload = layer.CreateWorkload(factory);
- CHECK_MESSAGE(workload.get() == PolymorphicDowncast<Workload*>(workload.get()),
- "Cannot convert to derived class");
- std::string reasonIfUnsupported;
- layer.SetBackendId(factory.GetBackendId());
- CHECK(factory.IsLayerSupported(layer, layer.GetDataType(), reasonIfUnsupported, modelOptions));
- return std::unique_ptr<Workload>(static_cast<Workload*>(workload.release()));
-}
-
-// Helper function to create tensor handlers for workloads, assuming they all use the same factory.
-void CreateTensorHandles(armnn::Graph& graph,
- armnn::IWorkloadFactory& factory)
-{
- TensorHandleFactoryRegistry tmpRegistry;
- for (auto&& layer : graph.TopologicalSort())
- {
- layer->CreateTensorHandles(tmpRegistry, factory);
- }
-}
-
-/////////////////////////////////////////////////////////////////////////////////////////////
-// The following functions are called by backendsCommon/test/CreateWorkload*.cpp
-// They build very simple graphs, and then create a workload.
-// Some checks are performed on the workload to ensure parameters have been passed correctly.
-// They return the created workloads so that backend-specific checks can be performed.
-/////////////////////////////////////////////////////////////////////////////////////////////
-
-template <typename ActivationWorkload, armnn::DataType DataType>
-std::unique_ptr<ActivationWorkload> CreateActivationWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph)
-{
- // Creates the layer we're testing.
- ActivationDescriptor layerDesc;
- layerDesc.m_Function = ActivationFunction::Abs;
- layerDesc.m_A = 3.5f;
- layerDesc.m_B = -10.0f;
-
- ActivationLayer* const layer = graph.AddLayer<ActivationLayer>(layerDesc, "layer");
-
- // Creates extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- armnn::TensorInfo tensorInfo({1, 1}, DataType);
-
- Connect(input, layer, tensorInfo);
- Connect(layer, output, tensorInfo);
-
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<ActivationWorkload>(*layer, factory);
-
- ActivationQueueDescriptor queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
- CHECK(queueDescriptor.m_Parameters.m_A == 3.5f);
- CHECK(queueDescriptor.m_Parameters.m_B == -10.0f);
- CHECK((queueDescriptor.m_Parameters.m_Function == ActivationFunction::Abs));
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
-template <typename WorkloadType,
- typename DescriptorType,
- typename LayerType,
- armnn::DataType DataType>
-std::unique_ptr<WorkloadType> CreateElementwiseWorkloadTest(armnn::IWorkloadFactory & factory,
- armnn::Graph & graph)
-{
- // Creates the layer we're testing.
- Layer* const layer = graph.AddLayer<LayerType>("layer");
-
- // Creates extra layers.
- Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
- Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- armnn::TensorInfo tensorInfo({2, 3}, DataType);
- Connect(input1, layer, tensorInfo, 0, 0);
- Connect(input2, layer, tensorInfo, 0, 1);
- Connect(layer, output, tensorInfo);
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
-
- DescriptorType queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Inputs.size() == 2);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
-template<typename WorkloadType,
- typename DescriptorType,
- armnn::DataType DataType>
-std::unique_ptr<WorkloadType> CreateSubtractionWithBlobWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph)
-{
- // Creates the layer we're testing.
- SubtractionLayer* const layer = graph.AddLayer<SubtractionLayer>("layer");
-
- auto activationDesc = std::make_shared<ActivationDescriptor>();
- activationDesc->m_A = 10.0f;
- activationDesc->m_B = 5.0f;
- activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
-
- layer->SetAdditionalInfoForObject(activationDesc);
-
- // Creates extra layers.
- Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
- Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- armnn::TensorInfo tensorInfo({2, 3}, DataType);
- Connect(input1, layer, tensorInfo, 0, 0);
- Connect(input2, layer, tensorInfo, 0, 1);
- Connect(layer, output, tensorInfo);
- CreateTensorHandles(graph, factory);
-
- // Check that the additional information can be queried from the layer
- std::shared_ptr<ActivationDescriptor>
- activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
-
- ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
- ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
- ARMNN_ASSERT(
- static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
- );
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
-
- DescriptorType queueDescriptor = workload->GetData();
-
- const ActivationDescriptor* queueDescBlobPtr =
- queueDescriptor.template GetAdditionalInformation<ActivationDescriptor>();
- IgnoreUnused(queueDescBlobPtr);
- ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
- ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
- ARMNN_ASSERT(
- static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
- );
-
- CHECK(queueDescriptor.m_Inputs.size() == 2);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
-
- return workload;
-}
-
-template<typename WorkloadType,
- typename DescriptorType,
- armnn::DataType DataType>
-std::unique_ptr<WorkloadType> CreateMultiplicationWithBlobWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph)
-{
- // Creates the layer we're testing.
- MultiplicationLayer* const layer = graph.AddLayer<MultiplicationLayer>("layer");
-
- auto activationDesc = std::make_shared<ActivationDescriptor>();
- activationDesc->m_A = 10.0f;
- activationDesc->m_B = 5.0f;
- activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
-
- layer->SetAdditionalInfoForObject(activationDesc);
-
- // Creates extra layers.
- Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
- Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- armnn::TensorInfo tensorInfo({2, 3}, DataType);
- Connect(input1, layer, tensorInfo, 0, 0);
- Connect(input2, layer, tensorInfo, 0, 1);
- Connect(layer, output, tensorInfo);
- CreateTensorHandles(graph, factory);
-
- // Check that the additional information can be queried from the layer
- std::shared_ptr<ActivationDescriptor>
- activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
-
- ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
- ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
- ARMNN_ASSERT(
- static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
- );
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
-
- DescriptorType queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Inputs.size() == 2);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
- const ActivationDescriptor* queueDescBlobPtr =
- queueDescriptor.template GetAdditionalInformation<ActivationDescriptor>();
- IgnoreUnused(queueDescBlobPtr);
- ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
- ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
- ARMNN_ASSERT(
- static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
- );
-
- return workload;// Returns so we can do extra, backend-specific tests.
-}
-
-template<typename WorkloadType,
- typename DescriptorType,
- armnn::DataType DataType>
-std::unique_ptr<WorkloadType> CreateAdditionWithBlobWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph)
-{
- // Creates the layer we're testing.
- AdditionLayer* const layer = graph.AddLayer<AdditionLayer>("layer");
-
- auto activationDesc = std::make_shared<ActivationDescriptor>();
- activationDesc->m_A = 10.0f;
- activationDesc->m_B = 5.0f;
- activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
-
- layer->SetAdditionalInfoForObject(activationDesc);
-
- // Creates extra layers.
- Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
- Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- armnn::TensorInfo tensorInfo({2, 3}, DataType);
- Connect(input1, layer, tensorInfo, 0, 0);
- Connect(input2, layer, tensorInfo, 0, 1);
- Connect(layer, output, tensorInfo);
- CreateTensorHandles(graph, factory);
-
- // Check that the additional information can be queried from the layer
- std::shared_ptr<ActivationDescriptor>
- activationDescPtr = layer->template GetAdditionalInformation<ActivationDescriptor>();
-
- ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
- ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
- ARMNN_ASSERT(
- static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
- );
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
-
- DescriptorType queueDescriptor = workload->GetData();
- const ActivationDescriptor* queueDescBlobPtr =
- queueDescriptor.template GetAdditionalInformation<ActivationDescriptor>();
- IgnoreUnused(queueDescBlobPtr);
- CHECK(queueDescriptor.m_Inputs.size() == 2);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
- ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
- ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
- ARMNN_ASSERT(
- static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
- );
-
- return workload;
-}
-
-template <typename WorkloadType,
- typename DescriptorType,
- armnn::DataType DataType>
-std::unique_ptr<WorkloadType> CreateElementwiseUnaryWorkloadTest(armnn::IWorkloadFactory & factory,
- armnn::Graph & graph,
- armnn::UnaryOperation op)
-{
- ElementwiseUnaryDescriptor desc = ElementwiseUnaryDescriptor(op);
- Layer* const layer = graph.AddLayer<armnn::ElementwiseUnaryLayer>(desc, "layer");
-
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- armnn::TensorInfo tensorInfo({ 2, 3 }, DataType);
- Connect(input, layer, tensorInfo, 0, 0);
- Connect(layer, output, tensorInfo, 0, 0);
- CreateTensorHandles(graph, factory);
-
- auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
- DescriptorType queueDescriptor = workload->GetData();
-
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
-
- return workload;
-}
-
-template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
-std::unique_ptr<BatchNormalizationWorkloadType> CreateBatchNormalizationWorkloadTest(
- armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW)
-{
- TensorShape tensorShape;
- switch (dataLayout)
- {
- case DataLayout::NHWC:
- tensorShape = { 2, 4, 4, 3 };
- break;
- case DataLayout::NCHW:
- default:
- tensorShape = { 2, 3, 4, 4 };
- }
-
- // Creates the layer we're testing.
- BatchNormalizationDescriptor layerDesc;
- layerDesc.m_Eps = 0.05f;
- layerDesc.m_DataLayout = dataLayout;
-
- BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
-
- armnn::TensorInfo weightInfo({3}, DataType);
- layer->m_Mean = std::make_unique<ScopedTensorHandle>(weightInfo);
- layer->m_Variance = std::make_unique<ScopedTensorHandle>(weightInfo);
- layer->m_Beta = std::make_unique<ScopedTensorHandle>(weightInfo);
- layer->m_Gamma = std::make_unique<ScopedTensorHandle>(weightInfo);
- layer->m_Mean->Allocate();
- layer->m_Variance->Allocate();
- layer->m_Beta->Allocate();
- layer->m_Gamma->Allocate();
-
- // Creates extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- armnn::TensorInfo tensorInfo(tensorShape, DataType);
- Connect(input, layer, tensorInfo);
- Connect(layer, output, tensorInfo);
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<BatchNormalizationWorkloadType>(*layer, factory);
- BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Parameters.m_Eps == 0.05f);
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
- CHECK((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType)));
- CHECK((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType)));
- CHECK((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType)));
- CHECK((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType)));
- CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
-template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
-std::unique_ptr<BatchNormalizationWorkloadType> CreateBatchNormalizationWithBlobWorkloadTest(
- armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW)
-{
- TensorShape tensorShape;
- switch (dataLayout)
- {
- case DataLayout::NHWC:
- tensorShape = { 2, 4, 4, 3 };
- break;
- case DataLayout::NCHW:
- default:
- tensorShape = { 2, 3, 4, 4 };
- }
-
- // Creates the layer we're testing.
- BatchNormalizationDescriptor layerDesc;
- layerDesc.m_Eps = 0.05f;
- layerDesc.m_DataLayout = dataLayout;
-
- BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
-
- armnn::TensorInfo weightInfo({3}, DataType);
- layer->m_Mean = std::make_unique<ScopedTensorHandle>(weightInfo);
- layer->m_Variance = std::make_unique<ScopedTensorHandle>(weightInfo);
- layer->m_Beta = std::make_unique<ScopedTensorHandle>(weightInfo);
- layer->m_Gamma = std::make_unique<ScopedTensorHandle>(weightInfo);
- layer->m_Mean->Allocate();
- layer->m_Variance->Allocate();
- layer->m_Beta->Allocate();
- layer->m_Gamma->Allocate();
-
- auto activationDesc = std::make_shared<ActivationDescriptor>();
- activationDesc->m_A = 10.0f;
- activationDesc->m_B = 5.0f;
- activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
-
- layer->SetAdditionalInfoForObject(activationDesc);
-
- // Check that the additional information can be queried from the layer
- std::shared_ptr<ActivationDescriptor> activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
- ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
- ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
- ARMNN_ASSERT(
- static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
- );
-
- // Creates extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- armnn::TensorInfo tensorInfo(tensorShape, DataType);
- Connect(input, layer, tensorInfo);
- Connect(layer, output, tensorInfo);
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<BatchNormalizationWorkloadType>(*layer, factory);
- BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
- const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
- IgnoreUnused(queueDescBlobPtr);
- ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
- ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
- ARMNN_ASSERT(
- static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
- );
-
- CHECK(queueDescriptor.m_Parameters.m_Eps == 0.05f);
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
- CHECK((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType)));
- CHECK((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType)));
- CHECK((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType)));
- CHECK((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType)));
- CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
-template <typename Convolution2dWorkload, armnn::DataType DataType>
-std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph,
- DataLayout dataLayout = DataLayout::NCHW,
- const ModelOptions& modelOptions = {})
-{
- // Creates the layer we're testing.
- Convolution2dDescriptor layerDesc;
- layerDesc.m_PadLeft = 3;
- layerDesc.m_PadRight = 3;
- layerDesc.m_PadTop = 1;
- layerDesc.m_PadBottom = 1;
- layerDesc.m_StrideX = 2;
- layerDesc.m_StrideY = 4;
- layerDesc.m_BiasEnabled = true;
- layerDesc.m_DataLayout = dataLayout;
-
- Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
-
- TensorShape weightShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 5, 3} : TensorShape{2, 5, 3, 3};
- TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3};
- TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2};
-
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType));
- layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
-
- layer->m_Weight->Allocate();
- layer->m_Bias->Allocate();
-
- // Creates extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- Connect(input, layer, TensorInfo(inputShape, DataType));
- Connect(layer, output, TensorInfo(outputShape, DataType));
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory, modelOptions);
-
- Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Parameters.m_StrideX == 2);
- CHECK(queueDescriptor.m_Parameters.m_StrideY == 4);
- CHECK(queueDescriptor.m_Parameters.m_PadLeft == 3);
- CHECK(queueDescriptor.m_Parameters.m_PadRight == 3);
- CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
- CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1);
- CHECK(queueDescriptor.m_Parameters.m_BiasEnabled);
- CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
-
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
- CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
- CHECK((queueDescriptor.m_Bias->GetTensorInfo() ==
- TensorInfo({2}, GetBiasDataType(DataType))));
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
-template<typename Convolution2dWorkload, armnn::DataType DataType>
-std::unique_ptr<Convolution2dWorkload> CreateConvolution2dFusedActivationWithBlobWorkloadTest(
- armnn::IWorkloadFactory& factory,
- armnn::Graph& graph,
- DataLayout dataLayout = DataLayout::NCHW,
- const ModelOptions& modelOptions = {})
-{
- // Creates the layer we're testing.
- Convolution2dDescriptor layerDesc;
- layerDesc.m_PadLeft = 3;
- layerDesc.m_PadRight = 3;
- layerDesc.m_PadTop = 1;
- layerDesc.m_PadBottom = 1;
- layerDesc.m_StrideX = 2;
- layerDesc.m_StrideY = 4;
- layerDesc.m_BiasEnabled = true;
- layerDesc.m_DataLayout = dataLayout;
-
-
- Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
-
- TensorShape weightShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 5, 3} : TensorShape{2, 5, 3, 3};
- TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3};
- TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2};
-
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType));
- layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
-
- layer->m_Weight->Allocate();
- layer->m_Bias->Allocate();
-
- auto activationDesc = std::make_shared<ActivationDescriptor>();
- activationDesc->m_A = 10.0f;
- activationDesc->m_B = 5.0f;
- activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
-
- layer->SetAdditionalInfoForObject(activationDesc);
-
- // Check that the additional information can be queried from the layer
- std::shared_ptr<ActivationDescriptor> activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
-
- ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
- ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
- ARMNN_ASSERT(
- static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
- );
-
- // Creates extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- Connect(input, layer, TensorInfo(inputShape, DataType));
- Connect(layer, output, TensorInfo(outputShape, DataType));
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory, modelOptions);
-
- Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
- const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
- IgnoreUnused(queueDescBlobPtr);
- ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
- ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
- ARMNN_ASSERT(
- static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
- );
-
- CHECK(queueDescriptor.m_Parameters.m_StrideX == 2);
- CHECK(queueDescriptor.m_Parameters.m_StrideY == 4);
- CHECK(queueDescriptor.m_Parameters.m_PadLeft == 3);
- CHECK(queueDescriptor.m_Parameters.m_PadRight == 3);
- CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
- CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1);
- CHECK(queueDescriptor.m_Parameters.m_BiasEnabled);
- CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
- CHECK(queueDescriptor.m_Outputs.size() == 1);
- CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
- CHECK((queueDescriptor.m_Bias->GetTensorInfo() ==
- TensorInfo({2}, GetBiasDataType(DataType))));
- CHECK(queueDescriptor.m_Inputs.size() == 1);
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
-template <typename Convolution2dWorkload, armnn::DataType DataType>
-std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadFastMathTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph,
- DataLayout dataLayout = DataLayout::NCHW,
- const ModelOptions& modelOptions = {})
-{
- // Creates the layer we're testing.
- Convolution2dDescriptor layerDesc;
- layerDesc.m_PadLeft = 0;
- layerDesc.m_PadRight = 0;
- layerDesc.m_PadTop = 0;
- layerDesc.m_PadBottom = 0;
- layerDesc.m_StrideX = 1;
- layerDesc.m_StrideY = 1;
- layerDesc.m_BiasEnabled = false;
- layerDesc.m_DataLayout = dataLayout;
-
- Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
-
- TensorShape weightShape = TensorShape{32, 32, 3, 3};
- TensorShape inputShape = TensorShape{1, 32, 149, 149};
- TensorShape outputShape = TensorShape{1, 32, 147, 147};
-
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType));
- layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
-
- layer->m_Weight->Allocate();
- layer->m_Bias->Allocate();
-
- // Creates extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- Connect(input, layer, TensorInfo(inputShape, DataType));
- Connect(layer, output, TensorInfo(outputShape, DataType));
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory, modelOptions);
-
- Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Parameters.m_StrideX == 1);
- CHECK(queueDescriptor.m_Parameters.m_StrideY == 1);
- CHECK(queueDescriptor.m_Parameters.m_PadLeft == 0);
- CHECK(queueDescriptor.m_Parameters.m_PadRight == 0);
- CHECK(queueDescriptor.m_Parameters.m_PadTop == 0);
- CHECK(queueDescriptor.m_Parameters.m_PadBottom == 0);
- CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
-
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
- CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
-template <typename LstmWorkload>
-std::unique_ptr<LstmWorkload> CreateLstmWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph)
-{
- // This parameter setting is for withCifgWithPeepholeNoProjection
- LstmDescriptor layerDesc;
- layerDesc.m_ActivationFunc = 4;
- layerDesc.m_ClippingThresCell = 0.0f;
- layerDesc.m_ClippingThresProj = 0.0f;
- layerDesc.m_CifgEnabled = true;
- layerDesc.m_PeepholeEnabled = true;
- layerDesc.m_ProjectionEnabled = false;
-
- LstmLayer* const layer = graph.AddLayer<LstmLayer>(layerDesc, "layer");
- unsigned int batchSize = 2;
- unsigned int inputSize = 2;
- unsigned int numUnits = 4;
- unsigned int outputSize = 4;
-
- layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>
- (TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>
- (TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>
- (TensorInfo({ numUnits, inputSize }, DataType::Float32));
- layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>
- (TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>
- (TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>
- (TensorInfo({ numUnits, outputSize }, DataType::Float32));
- layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>
- (TensorInfo({ numUnits }, DataType::Float32));
- layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>
- (TensorInfo({ numUnits }, DataType::Float32));
- layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>
- (TensorInfo({ numUnits }, DataType::Float32));
-
- layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
- layer->m_BasicParameters.m_InputToCellWeights->Allocate();
- layer->m_BasicParameters.m_InputToOutputWeights->Allocate();
- layer->m_BasicParameters.m_RecurrentToForgetWeights->Allocate();
- layer->m_BasicParameters.m_RecurrentToCellWeights->Allocate();
- layer->m_BasicParameters.m_RecurrentToOutputWeights->Allocate();
- layer->m_BasicParameters.m_ForgetGateBias->Allocate();
- layer->m_BasicParameters.m_CellBias->Allocate();
- layer->m_BasicParameters.m_OutputGateBias->Allocate();
-
-
- if (layerDesc.m_PeepholeEnabled)
- {
- layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedTensorHandle>
- (TensorInfo({ numUnits }, DataType::Float32));
- layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedTensorHandle>
- (TensorInfo({ numUnits }, DataType::Float32));
- layer->m_PeepholeParameters.m_CellToForgetWeights->Allocate();
- layer->m_PeepholeParameters.m_CellToOutputWeights->Allocate();
- }
-
- // create input and output layers
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const outputStateIn = graph.AddLayer<InputLayer>(1, "outputStateIn");
- Layer* const cellStateIn = graph.AddLayer<InputLayer>(2, "cellStateIn");
- Layer* const scratchBuffer = graph.AddLayer<OutputLayer>(0, "scratchBuffer");
- Layer* const outputStateOut = graph.AddLayer<OutputLayer>(1, "outputStateOut");
- Layer* const cellStateOut = graph.AddLayer<OutputLayer>(2, "cellStateOut");
- Layer* const output = graph.AddLayer<OutputLayer>(3, "output");
-
- // connect up
- armnn::TensorInfo lstmTensorInfo1({ batchSize, inputSize }, DataType::Float32);
- armnn::TensorInfo lstmTensorInfo2({ batchSize, numUnits}, DataType::Float32);
- armnn::TensorInfo lstmTensorInfo3({ batchSize, outputSize }, DataType::Float32);
- armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * (layerDesc.m_CifgEnabled ? 3 : 4) },
- DataType::Float32);
- Connect(input, layer, lstmTensorInfo1, 0, 0);
- Connect(cellStateIn, layer, lstmTensorInfo2, 0, 1);
- Connect(outputStateIn, layer, lstmTensorInfo3, 0, 2);
- Connect(layer, scratchBuffer, lstmTensorInfoScratchBuff, 0, 0);
- Connect(layer, outputStateOut, lstmTensorInfo3, 1, 0);
- Connect(layer, cellStateOut, lstmTensorInfo2, 2, 0);
- Connect(layer, output, lstmTensorInfo3, 3, 0);
-
- CreateTensorHandles(graph, factory);
-
- // make the workload and check it
- auto workload = MakeAndCheckWorkload<LstmWorkload>(*layer, factory);
- LstmQueueDescriptor queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Parameters.m_ActivationFunc == 4);
- CHECK(queueDescriptor.m_Parameters.m_ClippingThresCell == 0.0f);
- CHECK(queueDescriptor.m_Parameters.m_ClippingThresProj == 0.0f);
- CHECK(queueDescriptor.m_Inputs.size() == 3);
- CHECK(queueDescriptor.m_Outputs.size() == 4);
-
- CHECK((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == TensorInfo({ numUnits, inputSize },
- DataType::Float32)));
- CHECK((queueDescriptor.m_OutputGateBias->GetTensorInfo() == TensorInfo({ numUnits },
- DataType::Float32)));
- CHECK((queueDescriptor.m_CellBias->GetTensorInfo() == TensorInfo({ numUnits }, DataType::Float32)));
- return workload;
-}
-
-template <typename QuantizedLstmWorkload>
-std::unique_ptr<QuantizedLstmWorkload> CreateQuantizedLstmWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph)
-{
- auto layer = graph.AddLayer<QuantizedLstmLayer>("quantizedLstmlayer");
- unsigned int numBatches = 2;
- unsigned int inputSize = 2;
- unsigned int outputSize = 4;
-
- // Scale/Offset for input/output, cellState In/Out, weights, bias
- float inputOutputScale = 0.0078125f;
- int32_t inputOutputOffset = 128;
-
- float cellStateScale = 0.00048828125f;
- int32_t cellStateOffset = 0;
-
- float weightsScale = 0.00408021f;
- int32_t weightsOffset = 100;
-
- float biasScale = 3.1876640625e-05f;
- int32_t biasOffset = 0;
-
- // Weights and bias tensor and quantization info
- armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
- armnn::DataType::QAsymmU8,
- weightsScale,
- weightsOffset);
-
- armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
- armnn::DataType::QAsymmU8,
- weightsScale,
- weightsOffset);
-
- armnn::TensorInfo biasInfo({outputSize},
- armnn::DataType::Signed32,
- biasScale,
- biasOffset);
-
- // Weights and bias
- layer->m_QuantizedLstmParameters.m_InputToInputWeights =
- std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
- layer->m_QuantizedLstmParameters.m_InputToForgetWeights =
- std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
- layer->m_QuantizedLstmParameters.m_InputToCellWeights =
- std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
- layer->m_QuantizedLstmParameters.m_InputToOutputWeights =
- std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
-
- layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights =
- std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
- layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights =
- std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
- layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights =
- std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
- layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights =
- std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
-
- layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
- layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
- layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(biasInfo);
- layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
-
- // Allocate weights and bias
- layer->m_QuantizedLstmParameters.m_InputToInputWeights->Allocate();
- layer->m_QuantizedLstmParameters.m_InputToForgetWeights->Allocate();
- layer->m_QuantizedLstmParameters.m_InputToCellWeights->Allocate();
- layer->m_QuantizedLstmParameters.m_InputToOutputWeights->Allocate();
-
- layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->Allocate();
- layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->Allocate();
- layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->Allocate();
- layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->Allocate();
-
- layer->m_QuantizedLstmParameters.m_InputGateBias->Allocate();
- layer->m_QuantizedLstmParameters.m_ForgetGateBias->Allocate();
- layer->m_QuantizedLstmParameters.m_CellBias->Allocate();
- layer->m_QuantizedLstmParameters.m_OutputGateBias->Allocate();
-
- // Create input and output layers
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const cellStateIn = graph.AddLayer<InputLayer>(1, "cellStateIn");
- Layer* const outputStateIn = graph.AddLayer<InputLayer>(2, "outputStateIn");
-
- Layer* const cellStateOut = graph.AddLayer<OutputLayer>(0, "cellStateOut");
- Layer* const outputStateOut = graph.AddLayer<OutputLayer>(1, "outputStateOut");
-
- // Input/output tensor info and quantization info
- armnn::TensorInfo inputInfo({numBatches , inputSize},
- armnn::DataType::QAsymmU8,
- inputOutputScale,
- inputOutputOffset);
-
- armnn::TensorInfo cellStateInfo({numBatches , outputSize},
- armnn::DataType::QSymmS16,
- cellStateScale,
- cellStateOffset);
-
- armnn::TensorInfo outputStateInfo({numBatches , outputSize},
- armnn::DataType::QAsymmU8,
- inputOutputScale,
- inputOutputOffset);
-
- // Connect input/output slots
- Connect(input, layer, inputInfo, 0, 0);
- Connect(cellStateIn, layer, cellStateInfo, 0, 1);
- Connect(outputStateIn, layer, outputStateInfo, 0, 2);
-
- Connect(layer, cellStateOut, cellStateInfo, 0, 0);
- Connect(layer, outputStateOut, outputStateInfo, 1, 0);
-
- CreateTensorHandles(graph, factory);
-
- // Create workload and check layer support
- auto workload = MakeAndCheckWorkload<QuantizedLstmWorkload>(*layer, factory);
- QuantizedLstmQueueDescriptor queueDescriptor = workload->GetData();
-
- // Validate input/output sizes
- CHECK(queueDescriptor.m_Inputs.size() == 3);
- CHECK(queueDescriptor.m_Outputs.size() == 2);
-
- // Validate weight tensor info
- CHECK((queueDescriptor.m_InputToInputWeights->GetTensorInfo() == inputWeightsInfo));
- CHECK((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == inputWeightsInfo));
- CHECK((queueDescriptor.m_InputToCellWeights->GetTensorInfo() == inputWeightsInfo));
- CHECK((queueDescriptor.m_InputToOutputWeights->GetTensorInfo() == inputWeightsInfo));
-
- CHECK((queueDescriptor.m_RecurrentToInputWeights->GetTensorInfo() == recurrentWeightsInfo));
- CHECK((queueDescriptor.m_RecurrentToForgetWeights->GetTensorInfo() == recurrentWeightsInfo));
- CHECK((queueDescriptor.m_RecurrentToCellWeights->GetTensorInfo() == recurrentWeightsInfo));
- CHECK((queueDescriptor.m_RecurrentToOutputWeights->GetTensorInfo() == recurrentWeightsInfo));
-
- CHECK((queueDescriptor.m_InputGateBias->GetTensorInfo() == biasInfo));
- CHECK((queueDescriptor.m_ForgetGateBias->GetTensorInfo() == biasInfo));
- CHECK((queueDescriptor.m_CellBias->GetTensorInfo() == biasInfo));
- CHECK((queueDescriptor.m_OutputGateBias->GetTensorInfo() == biasInfo));
-
- return workload;
-}
-
-template <typename QLstmWorkload>
-std::unique_ptr<QLstmWorkload> CreateQLstmWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph)
-{
- QLstmDescriptor layerDesc;
- layerDesc.m_CifgEnabled = true;
- layerDesc.m_PeepholeEnabled = false;
- layerDesc.m_ProjectionEnabled = false;
- layerDesc.m_LayerNormEnabled = true;
-
- layerDesc.m_CellClip = 0.0f;
- layerDesc.m_ProjectionClip = 0.0f;
-
- layerDesc.m_HiddenStateZeroPoint = 0;
- layerDesc.m_HiddenStateScale = 0.007f;
-
- layerDesc.m_InputIntermediateScale = 0.007059f;
- layerDesc.m_ForgetIntermediateScale = 0.007812f;
- layerDesc.m_CellIntermediateScale = 0.007059f;
- layerDesc.m_OutputIntermediateScale = 0.007812f;
-
- QLstmLayer* const layer = graph.AddLayer<QLstmLayer>(layerDesc, "qLstm");
-
- unsigned int numBatches = 2;
- unsigned int inputSize = 4;
- unsigned int numUnits = 4;
- unsigned int outputSize = 4;
-
- // Scale/Offset quantization info
- float inputScale = 0.0078125f;
- int32_t inputOffset = 0;
-
- // if (!projectionEnabled) outputScale == hiddenStateScale
- float outputScale = layerDesc.m_HiddenStateScale;
- int32_t outputOffset = layerDesc.m_HiddenStateZeroPoint;
-
- float cellStateScale = 3.05176e-05f;
- int32_t cellStateOffset = 0;
-
- float weightsScale = 0.00784314f;
- int32_t weightsOffset = 0;
-
- float layerNormScale = 3.05182e-05f;
- int32_t layerNormOffset = 0;
-
- float biasScale = layerNormScale / 1024;
- int32_t biasOffset = 0;
-
- // Weights and bias tensor and quantization info
- armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
- armnn::DataType::QSymmS8,
- weightsScale,
- weightsOffset);
-
- armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
- armnn::DataType::QSymmS8,
- weightsScale,
- weightsOffset);
-
- armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset);
-
- armnn::TensorInfo layerNormWeightsInfo({numUnits}, armnn::DataType::QSymmS16, layerNormScale, layerNormOffset);
-
- // Create and allocate tensors
- layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
- layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
- layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
-
- layer->m_BasicParameters.m_RecurrentToForgetWeights =
- std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
- layer->m_BasicParameters.m_RecurrentToCellWeights =
- std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
- layer->m_BasicParameters.m_RecurrentToOutputWeights =
- std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
-
- layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
- layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(biasInfo);
- layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
-
- layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
- std::make_unique<ScopedTensorHandle>(layerNormWeightsInfo);
- layer->m_LayerNormParameters.m_CellLayerNormWeights =
- std::make_unique<ScopedTensorHandle>(layerNormWeightsInfo);
- layer->m_LayerNormParameters.m_OutputLayerNormWeights =
- std::make_unique<ScopedTensorHandle>(layerNormWeightsInfo);
-
- layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
- layer->m_BasicParameters.m_InputToCellWeights->Allocate();
- layer->m_BasicParameters.m_InputToOutputWeights->Allocate();
-
- layer->m_BasicParameters.m_RecurrentToForgetWeights->Allocate();
- layer->m_BasicParameters.m_RecurrentToCellWeights->Allocate();
- layer->m_BasicParameters.m_RecurrentToOutputWeights->Allocate();
-
- layer->m_BasicParameters.m_ForgetGateBias->Allocate();
- layer->m_BasicParameters.m_CellBias->Allocate();
- layer->m_BasicParameters.m_OutputGateBias->Allocate();
-
- layer->m_LayerNormParameters.m_ForgetLayerNormWeights->Allocate();
- layer->m_LayerNormParameters.m_CellLayerNormWeights->Allocate();
- layer->m_LayerNormParameters.m_OutputLayerNormWeights->Allocate();
-
- // Input and output layers
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const outputStateIn = graph.AddLayer<InputLayer>(1, "outputStateIn");
- Layer* const cellStateIn = graph.AddLayer<InputLayer>(2, "cellStateIn");
-
- Layer* const outputStateOut = graph.AddLayer<OutputLayer>(0, "outputStateOut");
- Layer* const cellStateOut = graph.AddLayer<OutputLayer>(1, "cellStateOut");
- Layer* const output = graph.AddLayer<OutputLayer>(2, "output");
-
- // Input/Output tensor info
- armnn::TensorInfo inputInfo({numBatches , inputSize},
- armnn::DataType::QAsymmS8,
- inputScale,
- inputOffset);
-
- armnn::TensorInfo cellStateInfo({numBatches , numUnits},
- armnn::DataType::QSymmS16,
- cellStateScale,
- cellStateOffset);
-
- armnn::TensorInfo outputStateInfo({numBatches , outputSize},
- armnn::DataType::QAsymmS8,
- outputScale,
- outputOffset);
-
- // Connect layers to slots
- Connect(input, layer, inputInfo, 0, 0);
- Connect(outputStateIn, layer, outputStateInfo, 0, 1);
- Connect(cellStateIn, layer, cellStateInfo, 0, 2);
-
- Connect(layer, outputStateOut, outputStateInfo, 0, 0);
- Connect(layer, cellStateOut, cellStateInfo, 1, 0);
- Connect(layer, output, outputStateInfo, 2, 0);
-
- CreateTensorHandles(graph, factory);
-
- // Create and check workload
- auto workload = MakeAndCheckWorkload<QLstmWorkload>(*layer, factory);
- QLstmQueueDescriptor queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Parameters.m_CellClip == 0.0f);
- CHECK(queueDescriptor.m_Parameters.m_ProjectionClip == 0.0f);
- CHECK(queueDescriptor.m_Inputs.size() == 3);
- CHECK(queueDescriptor.m_Outputs.size() == 3);
-
- CHECK((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == inputWeightsInfo));
- CHECK((queueDescriptor.m_InputToCellWeights->GetTensorInfo() == inputWeightsInfo));
- CHECK((queueDescriptor.m_InputToOutputWeights->GetTensorInfo() == inputWeightsInfo));
-
- CHECK((queueDescriptor.m_RecurrentToForgetWeights->GetTensorInfo() == recurrentWeightsInfo));
- CHECK((queueDescriptor.m_RecurrentToCellWeights->GetTensorInfo() == recurrentWeightsInfo));
- CHECK((queueDescriptor.m_RecurrentToOutputWeights->GetTensorInfo() == recurrentWeightsInfo));
-
- CHECK((queueDescriptor.m_ForgetGateBias->GetTensorInfo() == biasInfo));
- CHECK((queueDescriptor.m_CellBias->GetTensorInfo() == biasInfo));
- CHECK((queueDescriptor.m_OutputGateBias->GetTensorInfo() == biasInfo));
-
- return workload;
-}
-
-template <typename Convolution2dWorkload, armnn::DataType DataType>
-std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph)
-{
- // Creates the layer we're testing.
- Convolution2dDescriptor layerDesc;
- layerDesc.m_PadLeft = 1;
- layerDesc.m_PadRight = 1;
- layerDesc.m_PadTop = 1;
- layerDesc.m_PadBottom = 1;
- layerDesc.m_StrideX = 1;
- layerDesc.m_StrideY = 1;
- layerDesc.m_BiasEnabled = true;
-
- Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
-
- float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
-
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({ 2, 3, 3, 3 }, DataType, inputsQScale));
- layer->m_Bias = std::make_unique<ScopedTensorHandle>
- (TensorInfo({2}, GetBiasDataType(DataType), inputsQScale));
- layer->m_Weight->Allocate();
- layer->m_Bias->Allocate();
-
- // Creates extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- Connect(input, layer, TensorInfo({2, 3, 6, 6}, DataType, inputsQScale));
- Connect(layer, output, TensorInfo({2, 2, 6, 6}, DataType, outputQScale));
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory);
-
- Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Parameters.m_StrideX == 1);
- CHECK(queueDescriptor.m_Parameters.m_StrideY == 1);
- CHECK(queueDescriptor.m_Parameters.m_PadLeft == 1);
- CHECK(queueDescriptor.m_Parameters.m_PadRight == 1);
- CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
- CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1);
- CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true);
-
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
- CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({2, 3, 3, 3},
- DataType, inputsQScale)));
- CHECK((queueDescriptor.m_Bias->GetTensorInfo()
- == TensorInfo({2}, GetBiasDataType(DataType), inputsQScale)));
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
-template <typename DepthwiseConvolution2dFloat32Workload, armnn::DataType DataType>
-std::unique_ptr<DepthwiseConvolution2dFloat32Workload> CreateDepthwiseConvolution2dWorkloadTest(
- armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW)
-{
- // Creates the layer we're testing.
- DepthwiseConvolution2dDescriptor layerDesc;
- layerDesc.m_PadLeft = 1;
- layerDesc.m_PadRight = 2;
- layerDesc.m_PadTop = 1;
- layerDesc.m_PadBottom = 2;
- layerDesc.m_StrideX = 1;
- layerDesc.m_StrideY = 1;
- layerDesc.m_BiasEnabled = false;
- layerDesc.m_DataLayout = dataLayout;
-
- DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer");
-
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({1, 4, 4, 2}, DataType)); // [ 1, H, W, I*M ]
- layer->m_Weight->Allocate();
-
- // Creates extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- TensorShape inputShape = (dataLayout == DataLayout::NCHW) ?
- TensorShape{ 2, 2, 5, 5 } : TensorShape{ 2, 5, 5, 2 };
- TensorShape outputShape = (dataLayout == DataLayout::NCHW) ?
- TensorShape{ 2, 2, 5, 5 } : TensorShape{ 2, 5, 5, 2 };
-
- // Connects up.
- Connect(input, layer, TensorInfo(inputShape, DataType));
- Connect(layer, output, TensorInfo(outputShape, DataType));
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<DepthwiseConvolution2dFloat32Workload>(*layer, factory);
-
- DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Parameters.m_StrideX == 1);
- CHECK(queueDescriptor.m_Parameters.m_StrideY == 1);
- CHECK(queueDescriptor.m_Parameters.m_PadLeft == 1);
- CHECK(queueDescriptor.m_Parameters.m_PadRight == 2);
- CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
- CHECK(queueDescriptor.m_Parameters.m_PadBottom == 2);
- CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == false);
- CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
-
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
- CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({1, 4, 4, 2}, DataType)));
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
-template <typename FullyConnectedWorkload, armnn::DataType DataType>
-std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph)
-{
- // Creates the layer we're testing.
- FullyConnectedDescriptor layerDesc;
- layerDesc.m_BiasEnabled = false;
- layerDesc.m_TransposeWeightMatrix = true;
-
- FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
-
- float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
-
- // As optimization isn't run member variables need to be updated.
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
- layer->m_Weight->Allocate();
-
- armnn::TensorInfo weightsTensorInfo({7, 20}, DataType, inputsQScale);
- weightsTensorInfo.SetConstant();
-
- // Creates extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- auto const weights = graph.AddLayer<ConstantLayer>("weights");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- weights->m_LayerOutput = std::make_unique<ScopedTensorHandle>(weightsTensorInfo);
- weights->m_LayerOutput->Allocate();
-
- // Connects up.
- Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale), 0, 0);
- Connect(weights, layer, weightsTensorInfo, 0, 1);
- Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale));
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<FullyConnectedWorkload>(*layer, factory);
-
- FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
-
- CHECK(queueDescriptor.m_Inputs.size() == 2);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
-template <typename FullyConnectedWorkload, armnn::DataType DataType>
-std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWithBlobWorkloadTest
- (armnn::IWorkloadFactory& factory,
- armnn::Graph& graph)
-{
- // Creates the layer we're testing.
- FullyConnectedDescriptor layerDesc;
- layerDesc.m_BiasEnabled = true;
- layerDesc.m_TransposeWeightMatrix = true;
-
- FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
-
- float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
-
- // As optimization isn't run member variables need to be updated.
- layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
- layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
- layer->m_Weight->Allocate();
- layer->m_Bias->Allocate();
-
- armnn::TensorInfo weightsTensorInfo({7, 20}, DataType, inputsQScale);
- armnn::TensorInfo biasesTensorInfo({7}, GetBiasDataType(DataType), inputsQScale);
- weightsTensorInfo.SetConstant();
- biasesTensorInfo.SetConstant();
-
- auto activationDesc = std::make_shared<ActivationDescriptor>();
- activationDesc->m_A = 10.0f;
- activationDesc->m_B = 5.0f;
- activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
-
- layer->SetAdditionalInfoForObject(activationDesc);
-
- // Check that the additional information can be queried from the layer
- std::shared_ptr<ActivationDescriptor> activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
- ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
- ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
- ARMNN_ASSERT(static_cast<ActivationFunction>(activationDescPtr->m_Function) ==
- armnn::ActivationFunction::BoundedReLu);
-
- // Creates extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- auto const weights = graph.AddLayer<ConstantLayer>("weights");
- auto const biases = graph.AddLayer<ConstantLayer>("biases");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- weights->m_LayerOutput = std::make_unique<ScopedTensorHandle>(weightsTensorInfo);
- weights->m_LayerOutput->Allocate();
- biases->m_LayerOutput = std::make_unique<ScopedTensorHandle>(biasesTensorInfo);
- biases->m_LayerOutput->Allocate();
-
- // Connects up.
- Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale), 0, 0);
- Connect(weights, layer, weightsTensorInfo, 0, 1);
- Connect(biases, layer, biasesTensorInfo, 0, 2);
- Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale));
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<FullyConnectedWorkload>(*layer, factory);
-
- FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
-
- const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
- IgnoreUnused(queueDescBlobPtr);
-
- ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
- ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
- ARMNN_ASSERT(
- static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
- );
-
- CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true);
- CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
- CHECK(queueDescriptor.m_Inputs.size() == 3);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
-template <typename FullyConnectedWorkload, armnn::DataType DataType>
-std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadWeightsBiasesAsInputsTest
- (armnn::IWorkloadFactory& factory,
- armnn::Graph& graph)
-{
- // Creates the layer we're testing.
- FullyConnectedDescriptor layerDesc;
- layerDesc.m_BiasEnabled = true;
- layerDesc.m_TransposeWeightMatrix = true;
- layerDesc.m_ConstantWeights = false;
-
- FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
-
- float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
- float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
-
- // Creates extra layers with weights and biases as input layers.
- Layer* const input = graph.AddLayer<InputLayer>(1, "input");
- Layer* const weights = graph.AddLayer<InputLayer>(2, "weights");
- Layer* const biases = graph.AddLayer<InputLayer>(3, "biases");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale), 0, 0);
- Connect(weights, layer, TensorInfo({7, 20}, DataType, inputsQScale), 0, 1);
- Connect(biases, layer, TensorInfo({7}, GetBiasDataType(DataType), inputsQScale), 0, 2);
- Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale));
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<FullyConnectedWorkload>(*layer, factory);
-
- FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
-
- CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true);
- CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
- CHECK(queueDescriptor.m_Parameters.m_ConstantWeights == false);
- CHECK(queueDescriptor.m_Inputs.size() == 3);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
-
-template <typename NormalizationWorkload, armnn::DataType DataType>
-std::unique_ptr<NormalizationWorkload> CreateNormalizationWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph,
- DataLayout dataLayout = DataLayout::NCHW)
-{
- // Creates the layer we're testing.
- NormalizationDescriptor layerDesc;
- layerDesc.m_NormChannelType = NormalizationAlgorithmChannel::Across;
- layerDesc.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
- layerDesc.m_NormSize = 3;
- layerDesc.m_Alpha = 0.5f;
- layerDesc.m_Beta = -1.0f;
- layerDesc.m_K = 0.2f;
- layerDesc.m_DataLayout = dataLayout;
-
- NormalizationLayer* layer = graph.AddLayer<NormalizationLayer>(layerDesc, "layer");
-
- // Creates extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- TensorShape inputShape = (dataLayout == DataLayout::NCHW) ?
- TensorShape{ 3, 5, 5, 1 } : TensorShape{ 3, 1, 5, 5 };
- TensorShape outputShape = (dataLayout == DataLayout::NCHW) ?
- TensorShape{ 3, 5, 5, 1 } : TensorShape{ 3, 1, 5, 5 };
-
- // Connects up.
- armnn::TensorInfo inputTensorInfo(inputShape, DataType);
- armnn::TensorInfo outputTensorInfo(outputShape, DataType);
- Connect(input, layer, inputTensorInfo);
- Connect(layer, output, outputTensorInfo);
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<NormalizationWorkload>(*layer, factory);
-
- NormalizationQueueDescriptor queueDescriptor = workload->GetData();
- CHECK((queueDescriptor.m_Parameters.m_NormChannelType == NormalizationAlgorithmChannel::Across));
- CHECK((queueDescriptor.m_Parameters.m_NormMethodType == NormalizationAlgorithmMethod::LocalBrightness));
- CHECK(queueDescriptor.m_Parameters.m_NormSize == 3);
- CHECK(queueDescriptor.m_Parameters.m_Alpha == 0.5f);
- CHECK(queueDescriptor.m_Parameters.m_Beta == -1.0f);
- CHECK(queueDescriptor.m_Parameters.m_K == 0.2f);
- CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
-
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
-template <typename Pooling2dWorkload, armnn::DataType DataType>
-std::unique_ptr<Pooling2dWorkload> CreatePooling2dWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph,
- DataLayout dataLayout = DataLayout::NCHW)
-{
- // Creates the layer we're testing.
- Pooling2dDescriptor layerDesc;
- layerDesc.m_PoolType = PoolingAlgorithm::Average;
- layerDesc.m_PoolWidth = 3;
- layerDesc.m_PoolHeight = 3;
- layerDesc.m_PadLeft = 2;
- layerDesc.m_PadRight = 2;
- layerDesc.m_PadTop = 1;
- layerDesc.m_PadBottom = 1;
- layerDesc.m_StrideX = 2;
- layerDesc.m_StrideY = 3;
- layerDesc.m_OutputShapeRounding = OutputShapeRounding::Floor;
- layerDesc.m_DataLayout = dataLayout;
-
- Pooling2dLayer* const layer = graph.AddLayer<Pooling2dLayer>(layerDesc, "layer");
-
- // Create extra layers
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 5, 5} : TensorShape{3, 5, 5, 2};
- TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 2, 4} : TensorShape{3, 2, 4, 2};
-
- // Connect up
- Connect(input, layer, TensorInfo(inputShape, DataType));
- Connect(layer, output, TensorInfo(outputShape, DataType));
- CreateTensorHandles(graph, factory);
-
- // Make the workload and checks it
- auto workload = MakeAndCheckWorkload<Pooling2dWorkload>(*layer, factory);
-
- Pooling2dQueueDescriptor queueDescriptor = workload->GetData();
- CHECK((queueDescriptor.m_Parameters.m_PoolType == PoolingAlgorithm::Average));
- CHECK((queueDescriptor.m_Parameters.m_OutputShapeRounding == OutputShapeRounding::Floor));
- CHECK(queueDescriptor.m_Parameters.m_PoolWidth == 3);
- CHECK(queueDescriptor.m_Parameters.m_PoolHeight == 3);
- CHECK(queueDescriptor.m_Parameters.m_StrideX == 2);
- CHECK(queueDescriptor.m_Parameters.m_StrideY == 3);
- CHECK(queueDescriptor.m_Parameters.m_PadLeft == 2);
- CHECK(queueDescriptor.m_Parameters.m_PadRight == 2);
- CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
- CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1);
- CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
-
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
-
- // Return so we can do extra, backend-specific tests
- return workload;
-}
-
-template <typename SoftmaxWorkload, armnn::DataType DataType>
-std::unique_ptr<SoftmaxWorkload> CreateSoftmaxWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph)
-{
- // Create the layer we're testing.
- SoftmaxDescriptor softmaxDescriptor;
- // Set Axis to -1 if CL or Neon until further Axes are supported.
- if (factory.GetBackendId() == armnn::Compute::CpuAcc || factory.GetBackendId() == armnn::Compute::GpuAcc)
- {
- softmaxDescriptor.m_Axis = -1;
- }
-
- Layer* const layer = graph.AddLayer<SoftmaxLayer>(softmaxDescriptor, "layer");
- // Create extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connect up
- armnn::TensorInfo tensorInfo({4, 1}, DataType);
- if (DataType == armnn::DataType::QAsymmU8)
- {
- tensorInfo.SetQuantizationOffset(0);
- tensorInfo.SetQuantizationScale(1.f / 256);
- }
- else if (DataType == armnn::DataType::QAsymmS8)
- {
- tensorInfo.SetQuantizationOffset(-128);
- tensorInfo.SetQuantizationScale(1.f / 256);
- }
-
- Connect(input, layer, tensorInfo);
- Connect(layer, output, tensorInfo);
- CreateTensorHandles(graph, factory);
-
- // Make the workload and checks it.
- auto workload = MakeAndCheckWorkload<SoftmaxWorkload>(*layer, factory);
-
- SoftmaxQueueDescriptor queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
-
- // Return so we can do extra, backend-specific tests.
- return workload;
-}
-
-template<typename SplitterWorkload, armnn::DataType DataType>
-std::unique_ptr<SplitterWorkload>
- CreateSplitterWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph)
-{
- // Create the layer we're testing.
- // NOTE: need three dimensions channels, height/y, width/x because the Compute
- // library restricts subtensors to have the same x and y dimensions as
- // their parent tensors, and therefore the origin on the x and y dimension
- // has to be zero for any view. So we need a third dimension to split...
- // NOTE: arguments are: number of views, number of dimensions.
- ViewsDescriptor layerDesc(3, 3);
- // NOTE: arguments are: view, dimension, value.
- layerDesc.SetViewOriginCoord(0, 0, 0);
- layerDesc.SetViewOriginCoord(1, 0, 1);
- layerDesc.SetViewOriginCoord(2, 0, 3);
-
- Layer* const layer = graph.AddLayer<SplitterLayer>(layerDesc, "layer");
-
- // Adds extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output0 = graph.AddLayer<OutputLayer>(0, "output0");
- Layer* const output1 = graph.AddLayer<OutputLayer>(1, "output1");
- Layer* const output2 = graph.AddLayer<OutputLayer>(2, "output2");
-
- // Connects up.
- armnn::TensorInfo tensorInfo({5, 7, 7}, DataType);
- Connect(input, layer, tensorInfo);
-
- armnn::TensorInfo output0Info({1, 7, 7}, DataType);
- armnn::TensorInfo output1Info({2, 7, 7}, DataType);
- armnn::TensorInfo output2Info({2, 7, 7}, DataType);
-
- Connect(layer, output0, output0Info, 0, 0);
- Connect(layer, output1, output1Info, 1, 0);
- Connect(layer, output2, output2Info, 2, 0);
-
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<SplitterWorkload>(*layer, factory);
-
- SplitterQueueDescriptor queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 3);
- CHECK(queueDescriptor.m_ViewOrigins.size() == 3);
-
- CHECK(queueDescriptor.m_ViewOrigins[0].m_Origin[0] == 0);
- CHECK(queueDescriptor.m_ViewOrigins[1].m_Origin[0] == 1);
- CHECK(queueDescriptor.m_ViewOrigins[2].m_Origin[0] == 3);
- CHECK(queueDescriptor.m_ViewOrigins[0].m_Origin[1] == 0);
- CHECK(queueDescriptor.m_ViewOrigins[1].m_Origin[1] == 0);
- CHECK(queueDescriptor.m_ViewOrigins[2].m_Origin[1] == 0);
- CHECK(queueDescriptor.m_ViewOrigins[0].m_Origin[2] == 0);
- CHECK(queueDescriptor.m_ViewOrigins[1].m_Origin[2] == 0);
- CHECK(queueDescriptor.m_ViewOrigins[2].m_Origin[2] == 0);
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
-/// This function constructs a graph with both a splitter and a concat, and returns a pair of the workloads.
-template<typename SplitterWorkload, typename ConcatWorkload, armnn::DataType DataType>
-std::pair<std::unique_ptr<SplitterWorkload>, std::unique_ptr<ConcatWorkload>>
- CreateSplitterConcatWorkloadTest(armnn::IWorkloadFactory &factory, armnn::Graph &graph)
-{
- armnn::TensorInfo inputTensorInfo({ 1, 2, 100, 10 }, DataType);
-
- armnn::TensorInfo splitTensorInfo1({ 1, 1, 100, 10 }, DataType);
- armnn::TensorInfo splitTensorInfo2({ 1, 1, 100, 10 }, DataType);
-
- //Constructs the graph.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
-
- armnn::ViewsDescriptor splitterViews(2);
- splitterViews.SetViewOriginCoord(0, 0, 0);
- splitterViews.SetViewOriginCoord(0, 1, 0);
- splitterViews.SetViewOriginCoord(0, 2, 0);
- splitterViews.SetViewOriginCoord(0, 3, 0);
-
- splitterViews.SetViewOriginCoord(1, 0, 0);
- splitterViews.SetViewOriginCoord(1, 1, 1);
- splitterViews.SetViewOriginCoord(1, 2, 0);
- splitterViews.SetViewOriginCoord(1, 3, 0);
-
- // create splitter layer
- Layer* const splitter = graph.AddLayer<SplitterLayer>(splitterViews, "splitter");
- CHECK(splitter);
-
- armnn::OriginsDescriptor concatViews(2);
- concatViews.SetViewOriginCoord(0, 0, 0);
- concatViews.SetViewOriginCoord(0, 1, 1);
- concatViews.SetViewOriginCoord(0, 2, 0);
- concatViews.SetViewOriginCoord(0, 3, 0);
-
- concatViews.SetViewOriginCoord(1, 0, 0);
- concatViews.SetViewOriginCoord(1, 1, 0);
- concatViews.SetViewOriginCoord(1, 2, 0);
- concatViews.SetViewOriginCoord(1, 3, 0);
-
- // create concat layer
- Layer* const concat = graph.AddLayer<ConcatLayer>(concatViews, "concat");
- CHECK(concat);
-
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Adds connections.
- // connect input to splitter
- Connect(input, splitter, inputTensorInfo, 0, 0);
- // connect splitter[0] to concat[1]
- Connect(splitter, concat, splitTensorInfo1, 0, 1); // The splitter & concat are connected up.
- // connect splitter[1] to concat[0]
- Connect(splitter, concat, splitTensorInfo2, 1, 0); // So that the outputs are flipped round.
- // connect concat to output
- Connect(concat, output, inputTensorInfo, 0, 0);
-
- // created tensor handles
- CreateTensorHandles(graph, factory);
-
- // created splitter workload
- auto workloadSplitter = MakeAndCheckWorkload<SplitterWorkload>(*splitter, factory);
- CHECK(workloadSplitter);
- // created concat workload
- auto workloadConcat = MakeAndCheckWorkload<ConcatWorkload>(*concat, factory);
- CHECK(workloadConcat);
-
- return {std::move(workloadSplitter), std::move(workloadConcat)};
-}
-
-
-/// This function constructs a graph with a splitter with two outputs. Each of the outputs is then
-/// connected to two different activation layers
-template<typename SplitterWorkload, typename ActivationWorkload, armnn::DataType DataType>
-void CreateSplitterMultipleInputsOneOutputWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph,
- std::unique_ptr<SplitterWorkload>& wlSplitter,
- std::unique_ptr<ActivationWorkload>& wlActiv0_0,
- std::unique_ptr<ActivationWorkload>& wlActiv0_1,
- std::unique_ptr<ActivationWorkload>& wlActiv1_0,
- std::unique_ptr<ActivationWorkload>& wlActiv1_1)
-{
- armnn::TensorInfo inputTensorInfo ({ 1, 3, 100, 50 }, DataType);
- armnn::TensorInfo splitTensorInfo1({ 1, 1, 100, 50 }, DataType);
- armnn::TensorInfo splitTensorInfo2({ 1, 2, 100, 50 }, DataType);
-
- //Constructs the graph.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
-
- armnn::ViewsDescriptor splitterViews(2);
-
- splitterViews.SetViewOriginCoord(0, 0, 0);
- splitterViews.SetViewOriginCoord(0, 1, 0);
- splitterViews.SetViewOriginCoord(0, 2, 0);
- splitterViews.SetViewOriginCoord(0, 3, 0);
-
- splitterViews.SetViewOriginCoord(1, 0, 0);
- splitterViews.SetViewOriginCoord(1, 1, 1);
- splitterViews.SetViewOriginCoord(1, 2, 0);
- splitterViews.SetViewOriginCoord(1, 3, 0);
-
- Layer* const splitter = graph.AddLayer<SplitterLayer>(splitterViews, "splitter");
-
- armnn::ActivationDescriptor activationDesc;
-
- Layer* const activ0_0 = graph.AddLayer<ActivationLayer>(activationDesc, "activ0_0");
- Layer* const activ0_1 = graph.AddLayer<ActivationLayer>(activationDesc, "activ0_1");
- Layer* const activ1_0 = graph.AddLayer<ActivationLayer>(activationDesc, "activ1_0");
- Layer* const activ1_1 = graph.AddLayer<ActivationLayer>(activationDesc, "activ1_1");
-
- Layer* const output1 = graph.AddLayer<OutputLayer>(1, "output1");
- Layer* const output2 = graph.AddLayer<OutputLayer>(2, "output2");
- Layer* const output3 = graph.AddLayer<OutputLayer>(3, "output3");
- Layer* const output4 = graph.AddLayer<OutputLayer>(4, "output4");
-
- // Adds connections.
- Connect(input, splitter, inputTensorInfo, 0, 0);
- Connect(splitter, activ0_0, splitTensorInfo1, 0, 0);
- Connect(splitter, activ0_1, splitTensorInfo1, 0, 0);
-
- Connect(splitter, activ1_0, splitTensorInfo2, 1, 0);
- Connect(splitter, activ1_1, splitTensorInfo2, 1, 0);
-
- Connect(activ0_0, output1, splitTensorInfo1, 0, 0);
- Connect(activ0_1, output2, splitTensorInfo1, 0, 0);
- Connect(activ1_0, output3, splitTensorInfo2, 0, 0);
- Connect(activ1_1, output4, splitTensorInfo2, 0, 0);
-
- CreateTensorHandles(graph, factory);
-
- auto workloadSplitter = MakeAndCheckWorkload<SplitterWorkload>(*splitter, factory);
- auto workloadActiv0_0 = MakeAndCheckWorkload<ActivationWorkload>(*activ0_0, factory);
- auto workloadActiv0_1 = MakeAndCheckWorkload<ActivationWorkload>(*activ0_1, factory);
- auto workloadActiv1_0 = MakeAndCheckWorkload<ActivationWorkload>(*activ1_0, factory);
- auto workloadActiv1_1 = MakeAndCheckWorkload<ActivationWorkload>(*activ1_1, factory);
-
- wlSplitter = std::move(workloadSplitter);
- wlActiv0_0 = std::move(workloadActiv0_0);
- wlActiv0_1 = std::move(workloadActiv0_1);
- wlActiv1_0 = std::move(workloadActiv1_0);
- wlActiv1_1 = std::move(workloadActiv1_1);
-}
-
-template <typename ResizeWorkload, armnn::DataType DataType>
-std::unique_ptr<ResizeWorkload> CreateResizeBilinearWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph,
- DataLayout dataLayout = DataLayout::NCHW)
-{
- TensorShape inputShape;
- TensorShape outputShape;
-
- switch (dataLayout) {
- case DataLayout::NHWC:
- inputShape = { 2, 4, 4, 3 };
- outputShape = { 2, 2, 2, 3 };
- break;
- case DataLayout::NCHW:
- default:
- inputShape = { 2, 3, 4, 4 };
- outputShape = { 2, 3, 2, 2 };
- }
-
- // Creates the layer we're testing.
- ResizeDescriptor resizeDesc;
- armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
- resizeDesc.m_Method = ResizeMethod::Bilinear;
- resizeDesc.m_TargetWidth = outputShape[dimensionIndices.GetWidthIndex()];
- resizeDesc.m_TargetHeight = outputShape[dimensionIndices.GetHeightIndex()];
- resizeDesc.m_DataLayout = dataLayout;
- Layer* const layer = graph.AddLayer<ResizeLayer>(resizeDesc, "resize");
-
- // Creates extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- armnn::TensorInfo inputTensorInfo(inputShape, DataType);
- armnn::TensorInfo outputTensorInfo(outputShape, DataType);
- Connect(input, layer, inputTensorInfo);
- Connect(layer, output, outputTensorInfo);
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<ResizeWorkload>(*layer, factory);
-
- auto queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
- CHECK(queueDescriptor.m_Parameters.m_DataLayout == dataLayout);
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
-template <typename BatchToSpaceNdWorkload, armnn::DataType DataType>
-std::unique_ptr<BatchToSpaceNdWorkload> CreateBatchToSpaceNdWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph)
-{
- BatchToSpaceNdDescriptor desc;
- Layer* const layer = graph.AddLayer<BatchToSpaceNdLayer>(desc, "batchToSpace");
-
- // Creates extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- armnn::TensorInfo tensorInfo({1, 1, 1, 1}, DataType);
-
- Connect(input, layer, tensorInfo);
- Connect(layer, output, tensorInfo);
-
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<BatchToSpaceNdWorkload>(*layer, factory);
-
- BatchToSpaceNdQueueDescriptor queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
-
- return workload;
-}
-
-template <typename LogSoftmaxWorkload, armnn::DataType DataType>
-std::unique_ptr<LogSoftmaxWorkload> CreateLogSoftmaxWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph)
-{
- // Create the layer we're testing.
- LogSoftmaxDescriptor logSoftmaxDescriptor;
- // Set Axis to -1 if CL or Neon until further Axes are supported.
- if (factory.GetBackendId() == armnn::Compute::CpuAcc || factory.GetBackendId() == armnn::Compute::GpuAcc)
- {
- logSoftmaxDescriptor.m_Axis = -1;
- }
-
- Layer* const layer = graph.AddLayer<LogSoftmaxLayer>(logSoftmaxDescriptor, "layer");
- // Create extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connect up
- armnn::TensorInfo tensorInfo({4, 1}, DataType);
-
- Connect(input, layer, tensorInfo);
- Connect(layer, output, tensorInfo);
- CreateTensorHandles(graph, factory);
-
- // Make the workload and checks it.
- auto workload = MakeAndCheckWorkload<LogSoftmaxWorkload>(*layer, factory);
-
- LogSoftmaxQueueDescriptor queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
-
- // Return so we can do extra, backend-specific tests.
- return workload;
-}
-
-template <typename L2NormalizationWorkload, armnn::DataType DataType>
-std::unique_ptr<L2NormalizationWorkload> CreateL2NormalizationWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW)
-{
- // Creates the layer we're testing.
- L2NormalizationDescriptor layerDesc;
- layerDesc.m_DataLayout = dataLayout;
-
- Layer* const layer = graph.AddLayer<L2NormalizationLayer>(layerDesc, "l2norm");
-
- // Creates extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- TensorShape inputShape = (dataLayout == DataLayout::NCHW) ?
- TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 };
- TensorShape outputShape = (dataLayout == DataLayout::NCHW) ?
- TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 };
-
- // Connects up.
- armnn::TensorInfo inputTensorInfo(inputShape, DataType);
- armnn::TensorInfo outputTensorInfo(outputShape, DataType);
- Connect(input, layer, inputTensorInfo);
- Connect(layer, output, outputTensorInfo);
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<L2NormalizationWorkload>(*layer, factory);
-
- L2NormalizationQueueDescriptor queueDescriptor = workload->GetData();
- CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
-template <typename ReshapeWorkload, armnn::DataType DataType>
-std::unique_ptr<ReshapeWorkload> CreateReshapeWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph)
-{
- // Creates the layer we're testing.
- TensorShape outputShape({ 1, 4 });
- ReshapeDescriptor reshapeDesc;
- reshapeDesc.m_TargetShape = outputShape;
- Layer* const layer = graph.AddLayer<ReshapeLayer>(reshapeDesc, "layer");
-
- // Creates extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- armnn::TensorInfo inputTensorInfo({ 4, 1 }, DataType);
- armnn::TensorInfo outputTensorInfo(outputShape, DataType);
- Connect(input, layer, inputTensorInfo);
- Connect(layer, output, outputTensorInfo);
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<ReshapeWorkload>(*layer, factory);
-
- ReshapeQueueDescriptor queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
-template <typename ConvertFp16ToFp32Float32Workload>
-std::unique_ptr<ConvertFp16ToFp32Float32Workload> CreateConvertFp16ToFp32WorkloadTest(
- armnn::IWorkloadFactory& factory, armnn::Graph& graph)
-{
- // Creates the layer we're testing.
- ConvertFp16ToFp32Layer* const layer = graph.AddLayer<ConvertFp16ToFp32Layer>("Fp16ToFp32Converter");
-
- // Creates extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
- armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
- Connect(input, layer, inputTensorInfo);
- Connect(layer, output, outputTensorInfo);
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<ConvertFp16ToFp32Float32Workload>(*layer, factory);
-
- ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
-template <typename ConvertFp32ToFp16Float16Workload>
-std::unique_ptr<ConvertFp32ToFp16Float16Workload> CreateConvertFp32ToFp16WorkloadTest(
- armnn::IWorkloadFactory& factory, armnn::Graph& graph)
-{
- // Creates the layer we're testing.
- ConvertFp32ToFp16Layer* const layer = graph.AddLayer<ConvertFp32ToFp16Layer>("Fp32ToFp16Converter");
-
- // Creates extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
- armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
- Connect(input, layer, inputTensorInfo);
- Connect(layer, output, outputTensorInfo);
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<ConvertFp32ToFp16Float16Workload>(*layer, factory);
-
- ConvertFp32ToFp16QueueDescriptor queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
-template <typename MeanWorkload, armnn::DataType DataType>
-std::unique_ptr<MeanWorkload> CreateMeanWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph)
-{
- // Reduce along the first and second dimensions, and do not keep the reduced dimensions.
- MeanDescriptor descriptor({ 1, 2 }, false);
-
- // Creates the layer we're testing.
- Layer* const layer = graph.AddLayer<MeanLayer>(descriptor, "mean");
-
- // Creates extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- armnn::TensorInfo inputTensorInfo({ 1, 3, 7, 4 }, DataType);
- armnn::TensorInfo outputTensorInfo({ 1, 4 }, DataType);
- Connect(input, layer, inputTensorInfo);
- Connect(layer, output, outputTensorInfo);
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<MeanWorkload>(*layer, factory);
-
- MeanQueueDescriptor queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Parameters.m_Axis == descriptor.m_Axis);
- CHECK(queueDescriptor.m_Parameters.m_KeepDims == descriptor.m_KeepDims);
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
-template<typename ConcatWorkload, armnn::DataType DataType>
-std::unique_ptr<ConcatWorkload> CreateConcatWorkloadTest(armnn::IWorkloadFactory &factory,
- armnn::Graph &graph,
- const armnn::TensorShape &outputShape,
- unsigned int concatAxis)
-{
- armnn::TensorInfo inputTensorInfo({ 2, 3, 2, 5 }, DataType);
- armnn::TensorInfo outputTensorInfo(outputShape, DataType);
-
- // Constructs the graph.
- Layer* const input0 = graph.AddLayer<InputLayer>(0, "input0");
- Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
- armnn::OriginsDescriptor descriptor;
-
- std::vector<armnn::TensorShape> inputShapes{{ 2, 3, 2, 5 }, { 2, 3, 2, 5 }};
-
- descriptor = CreateDescriptorForConcatenation(inputShapes.begin(),
- inputShapes.end(),
- concatAxis);
-
- // create concat layer
- Layer* const concat = graph.AddLayer<ConcatLayer>(descriptor, "concat");
- CHECK(concat);
-
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Adds connections.
- // connect input0 to concat
- Connect(input0, concat, inputTensorInfo, 0, 0);
- // connect input1 to concat
- Connect(input1, concat, inputTensorInfo, 0, 1);
- // connect concat to output
- Connect(concat, output, outputTensorInfo, 0, 0);
-
- // create tensor handles
- CreateTensorHandles(graph, factory);
-
- // create concat workload
- auto workloadConcat = MakeAndCheckWorkload<ConcatWorkload>(*concat, factory);
- CHECK(workloadConcat);
-
- return workloadConcat;
-}
-
-template <typename PreCompiledWorkload, armnn::DataType dataType>
-std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> CreatePreCompiledWorkloadTest(
- armnn::IWorkloadFactory& factory,
- armnn::Graph& graph,
- bool biasEnabled = false)
-{
- IgnoreUnused(graph);
-
- // build up the structure of the network
- armnn::INetworkPtr net(armnn::INetwork::Create());
-
- // Add an input layer
- armnn::IConnectableLayer* const inputLayer = net->AddInputLayer(0, "input layer");
- CHECK(inputLayer);
-
- // ArmNN weights tensor shape is OIHW (out channels, in channels, height, width) for NCHW
- // ArmNN weights tensor shape is OHWI (out channels, height, width, in channels) for NHWC
- // this test is using NHWC, so the weights shape is OHWI
- TensorInfo weightsTensorInfo(TensorShape({16, 1, 1, 16}), dataType, 0.9f, 0, true);
- unsigned int weightsLength = weightsTensorInfo.GetNumElements();
-
- using WeightType = armnn::ResolveType<dataType>;
- std::vector<WeightType> convWeightsData(weightsLength);
- for (unsigned int i = 0; i < weightsLength; ++i)
- {
- convWeightsData[i] = static_cast<WeightType>(i);
- }
-
- armnn::ConstTensor weights(weightsTensorInfo, convWeightsData);
-
- // Add a layer that can be used in the PreCompiled layer
- armnn::Convolution2dDescriptor convDesc2d;
- convDesc2d.m_StrideX = 1;
- convDesc2d.m_StrideY = 1;
- convDesc2d.m_BiasEnabled = biasEnabled;
- convDesc2d.m_DataLayout = armnn::DataLayout::NHWC;
-
- armnn::IConnectableLayer* convLayer = nullptr;
- const std::string convLayerName("conv layer");
-
- if (biasEnabled)
- {
- constexpr armnn::DataType biasDataType = ( dataType == armnn::DataType::QAsymmU8) ?
- armnn::DataType::Signed32 : armnn::DataType::Float32;
-
- TensorInfo biasTensorInfo(TensorShape({16}), biasDataType, 0.9f * 0.9f, 0, true);
- unsigned int biasLength = biasTensorInfo.GetNumElements();
-
- using BiasType = armnn::ResolveType<biasDataType>;
- std::vector<BiasType> biasData(biasLength);
- std::fill(biasData.begin(), biasData.end(), static_cast<BiasType>(0));
-
- armnn::ConstTensor biases(biasTensorInfo, biasData);
-
- // Create convolution layer with biases
- convLayer = net->AddConvolution2dLayer(convDesc2d,
- weights,
- Optional<ConstTensor>(biases),
- convLayerName.c_str());
- }
- else
- {
- // Create convolution layer without biases
- convLayer = net->AddConvolution2dLayer(convDesc2d,
- weights,
- EmptyOptional(),
- convLayerName.c_str());
- }
-
- CHECK(convLayer);
-
- // Add an output layer
- armnn::IConnectableLayer* const outputLayer = net->AddOutputLayer(0, "output layer");
- CHECK(outputLayer);
-
- // set the tensors in the network (NHWC format)
- TensorInfo inputTensorInfo(TensorShape({ 1, 16, 16, 16 }), dataType);
- if (dataType == armnn::DataType::QAsymmU8)
- {
- inputTensorInfo.SetQuantizationOffset(0);
- inputTensorInfo.SetQuantizationScale(0.9f);
- }
-
- TensorInfo outputTensorInfo(TensorShape({1, 16, 16, 16}), dataType);
- if (dataType == armnn::DataType::QAsymmU8)
- {
- outputTensorInfo.SetQuantizationOffset(0);
- outputTensorInfo.SetQuantizationScale(0.9f);
- }
-
- // Connect the layers
- inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
- inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
-
- convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
- convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
-
- // Optimize the network for the backend supported by the factory
- std::vector<armnn::BackendId> backends = {factory.GetBackendId()};
- armnn::IRuntime::CreationOptions options;
- armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
- armnn::OptimizerOptions optimizerOptions;
- armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec(),
- optimizerOptions);
- CHECK(optimizedNet != nullptr);
-
- // Find the PreCompiled layer in the optimised graph
- armnn::Graph& optimisedGraph = GetGraphForTesting(optimizedNet.get());
- Layer* preCompiledLayer = nullptr;
- for (auto& layer : optimisedGraph)
- {
- if (layer->GetType() == LayerType::PreCompiled)
- {
- preCompiledLayer = layer;
- }
- }
- CHECK(preCompiledLayer != nullptr);
-
- // Create the TensorHandles.
- CreateTensorHandles(optimisedGraph, factory);
-
- // Make the workload and check it.
- auto workload = MakeAndCheckWorkload<PreCompiledWorkload>(*preCompiledLayer, factory);
-
- PreCompiledQueueDescriptor queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
-
- // Returns the workload so we can do extra, backend-specific tests.
- // NOTE: We need to return the optimised network as well, otherwise it gets
- // out of scope and the tensor handles get destructed
- return std::make_pair(std::move(optimizedNet), std::move(workload));
-}
-
-template<typename ConstantWorkload, armnn::DataType DataType>
-std::unique_ptr<ConstantWorkload> CreateConstantWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph,
- const armnn::TensorShape& outputShape)
-{
- armnn::TensorInfo outputTensorInfo(outputShape, DataType);
-
- // create constant layer
- auto constant = graph.AddLayer<ConstantLayer>("constant");
- CHECK(constant);
- constant->m_LayerOutput = std::make_unique<ScopedTensorHandle>(outputTensorInfo);
-
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Adds connections.
- // connect constant to output
- Connect(constant, output, outputTensorInfo, 0, 0);
-
- // create tensor handles
- CreateTensorHandles(graph, factory);
-
- // create Constant workload"
- auto workloadConstant = MakeAndCheckWorkload<ConstantWorkload>(*constant, factory);
- CHECK(workloadConstant);
-
- return workloadConstant;
-}
-
-template <typename PreluWorkload>
-std::unique_ptr<PreluWorkload> CreatePreluWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph,
- const armnn::TensorShape& inputShape,
- const armnn::TensorShape& alphaShape,
- const armnn::TensorShape& outputShape,
- armnn::DataType dataType)
-{
- // Creates the PReLU layer
- Layer* const layer = graph.AddLayer<PreluLayer>("prelu");
- CHECK(layer != nullptr);
-
- // Creates extra layers
- Layer* const input = graph.AddLayer<InputLayer> (0, "input");
- Layer* const alpha = graph.AddLayer<InputLayer> (1, "alpha");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
- CHECK(input != nullptr);
- CHECK(alpha != nullptr);
- CHECK(output != nullptr);
-
- // Connects up
- armnn::TensorInfo inputTensorInfo (inputShape, dataType);
- armnn::TensorInfo alphaTensorInfo (alphaShape, dataType);
- armnn::TensorInfo outputTensorInfo(outputShape, dataType);
- Connect(input, layer, inputTensorInfo, 0, 0);
- Connect(alpha, layer, alphaTensorInfo, 0, 1);
- Connect(layer, output, outputTensorInfo, 0, 0);
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it
- auto workload = MakeAndCheckWorkload<PreluWorkload>(*layer, factory);
-
- PreluQueueDescriptor queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Inputs.size() == 2);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
-template <typename SpaceToDepthWorkload, armnn::DataType DataType>
-std::unique_ptr<SpaceToDepthWorkload> CreateSpaceToDepthWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph)
-{
- SpaceToDepthDescriptor desc;
- desc.m_BlockSize = 2;
- Layer* const layer = graph.AddLayer<SpaceToDepthLayer>(desc, "spaceToDepth");
-
- // Creates extra layers.
- Layer* const input = graph.AddLayer<InputLayer>(0, "input");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- armnn::TensorInfo inputTensorInfo({ 1, 2, 2, 1 }, DataType);
- armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 4 }, DataType);
-
- Connect(input, layer, inputTensorInfo);
- Connect(layer, output, outputTensorInfo);
-
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<SpaceToDepthWorkload>(*layer, factory);
-
- SpaceToDepthQueueDescriptor queueDescriptor = workload->GetData();
- CHECK(queueDescriptor.m_Inputs.size() == 1);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
-
- return workload;
-}
-
-template <typename StackWorkload, armnn::DataType DataType>
-std::unique_ptr<StackWorkload> CreateStackWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph,
- const armnn::TensorShape& inputShape,
- const armnn::TensorShape& outputShape,
- unsigned int axis,
- unsigned int numInputs)
-{
- armnn::TensorInfo inputTensorInfo(inputShape, DataType);
- armnn::TensorInfo outputTensorInfo(outputShape, DataType);
-
- // Constructs the Stack layer.
- armnn::StackDescriptor descriptor(axis, numInputs, inputShape);
- Layer* const stackLayer = graph.AddLayer<StackLayer>(descriptor, "stack");
- CHECK(stackLayer != nullptr);
-
- // Constructs layer inputs and output.
- std::vector<Layer*> inputs;
- for (unsigned int i=0; i<numInputs; ++i)
- {
- inputs.push_back(graph.AddLayer<InputLayer>(
- static_cast<int>(i),
- ("input" + std::to_string(i)).c_str()
- ));
- CHECK(inputs[i] != nullptr);
- }
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
- CHECK(output != nullptr);
-
- // Adds connections.
- for (unsigned int i=0; i<numInputs; ++i)
- {
- Connect(inputs[i], stackLayer, inputTensorInfo, 0, i);
- }
- Connect(stackLayer, output, outputTensorInfo, 0, 0);
-
- CreateTensorHandles(graph, factory);
-
- auto stackWorkload = MakeAndCheckWorkload<StackWorkload>(*stackLayer, factory);
- StackQueueDescriptor queueDescriptor = stackWorkload->GetData();
- CHECK(queueDescriptor.m_Inputs.size() == numInputs);
- CHECK(queueDescriptor.m_Outputs.size() == 1);
-
- return stackWorkload;
-}
-
-} // Anonymous namespace
+// This file is deprecated and will be removed soon.
+// Please use the new header in armnnTestUtils instead.
+// This will use the new armnnTestUtils header.
+#include "../../armnnTestUtils/CreateWorkload.hpp" \ No newline at end of file
diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp
index f3753398b4..d246a082ec 100644
--- a/src/armnn/test/GraphTests.cpp
+++ b/src/armnn/test/GraphTests.cpp
@@ -2,7 +2,7 @@
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include "GraphUtils.hpp"
+#include <GraphUtils.hpp>
#include <Graph.hpp>
#include <Layer.hpp>
diff --git a/src/armnn/test/GraphUtils.cpp b/src/armnn/test/GraphUtils.cpp
deleted file mode 100644
index bc6b562c9d..0000000000
--- a/src/armnn/test/GraphUtils.cpp
+++ /dev/null
@@ -1,78 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "GraphUtils.hpp"
-
-#include <armnn/utility/PolymorphicDowncast.hpp>
-
-bool GraphHasNamedLayer(const armnn::Graph& graph, const std::string& name)
-{
- for (auto&& layer : graph)
- {
- if (layer->GetName() == name)
- {
- return true;
- }
- }
- return false;
-}
-
-armnn::Layer* GetFirstLayerWithName(armnn::Graph& graph, const std::string& name)
-{
- for (auto&& layer : graph)
- {
- if (layer->GetNameStr() == name)
- {
- return layer;
- }
- }
- return nullptr;
-}
-
-bool CheckNumberOfInputSlot(armnn::Layer* layer, unsigned int num)
-{
- return layer->GetNumInputSlots() == num;
-}
-
-bool CheckNumberOfOutputSlot(armnn::Layer* layer, unsigned int num)
-{
- return layer->GetNumOutputSlots() == num;
-}
-
-bool IsConnected(armnn::Layer* srcLayer, armnn::Layer* destLayer,
- unsigned int srcSlot, unsigned int destSlot,
- const armnn::TensorInfo& expectedTensorInfo)
-{
- const armnn::IOutputSlot& outputSlot = srcLayer->GetOutputSlot(srcSlot);
- const armnn::TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
- if (expectedTensorInfo != tensorInfo)
- {
- return false;
- }
- const unsigned int numConnections = outputSlot.GetNumConnections();
- for (unsigned int c = 0; c < numConnections; ++c)
- {
- auto inputSlot = armnn::PolymorphicDowncast<const armnn::InputSlot*>(outputSlot.GetConnection(c));
- if (inputSlot->GetOwningLayer().GetNameStr() == destLayer->GetNameStr() &&
- inputSlot->GetSlotIndex() == destSlot)
- {
- return true;
- }
- }
- return false;
-}
-
-/// Checks that first comes before second in the order.
-bool CheckOrder(const armnn::Graph& graph, const armnn::Layer* first, const armnn::Layer* second)
-{
- graph.Print();
-
- const auto& order = graph.TopologicalSort();
-
- auto firstPos = std::find(order.begin(), order.end(), first);
- auto secondPos = std::find(firstPos, order.end(), second);
-
- return (secondPos != order.end());
-}
diff --git a/src/armnn/test/GraphUtils.hpp b/src/armnn/test/GraphUtils.hpp
index 60d03dca23..02954e3d1f 100644
--- a/src/armnn/test/GraphUtils.hpp
+++ b/src/armnn/test/GraphUtils.hpp
@@ -1,25 +1,9 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#pragma once
-#include <Graph.hpp>
-
-#include <string>
-
-
-bool GraphHasNamedLayer(const armnn::Graph& graph, const std::string& name);
-
-armnn::Layer* GetFirstLayerWithName(armnn::Graph& graph, const std::string& name);
-
-bool CheckNumberOfInputSlot(armnn::Layer* layer, unsigned int num);
-
-bool CheckNumberOfOutputSlot(armnn::Layer* layer, unsigned int num);
-
-bool IsConnected(armnn::Layer* srcLayer, armnn::Layer* destLayer,
- unsigned int srcSlot, unsigned int destSlot,
- const armnn::TensorInfo& expectedTensorInfo);
-
-bool CheckOrder(const armnn::Graph& graph, const armnn::Layer* first, const armnn::Layer* second);
+#include "../../armnnTestUtils/GraphUtils.hpp"
+#pragma message("src/armnn/test/GraphUtils.hpp has been deprecated, it is due for removal in 22.08 release." \
+ " Please use from armnnTestUtils library, /src/armnnTestUtils/GraphUtils.hpp)
diff --git a/src/armnn/test/InferOutputTests.cpp b/src/armnn/test/InferOutputTests.cpp
index f8d8e89555..c7c0c6d2a7 100644
--- a/src/armnn/test/InferOutputTests.cpp
+++ b/src/armnn/test/InferOutputTests.cpp
@@ -5,7 +5,7 @@
#include "InferOutputTests.hpp"
-#include <test/UnitTests.hpp>
+#include <UnitTests.hpp>
TEST_SUITE("LayerValidateOutput")
{
diff --git a/src/armnn/test/InferOutputTests.hpp b/src/armnn/test/InferOutputTests.hpp
index 6435d87be3..799739b9ef 100644
--- a/src/armnn/test/InferOutputTests.hpp
+++ b/src/armnn/test/InferOutputTests.hpp
@@ -5,7 +5,7 @@
#pragma once
-#include "TestUtils.hpp"
+#include <TestUtils.hpp>
#include <Graph.hpp>
#include <layers/ArgMinMaxLayer.hpp>
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index c1927e3601..d4edf5da97 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "GraphUtils.hpp"
+#include <GraphUtils.hpp>
#include <armnn/LayerVisitorBase.hpp>
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 750e6967ad..a5db0ac0b0 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "TestUtils.hpp"
+#include <TestUtils.hpp>
#include <BackendSettings.hpp>
#include <Graph.hpp>
diff --git a/src/armnn/test/PredicateResult.hpp b/src/armnn/test/PredicateResult.hpp
index a344c8e3ad..8edf8b1180 100644
--- a/src/armnn/test/PredicateResult.hpp
+++ b/src/armnn/test/PredicateResult.hpp
@@ -2,47 +2,8 @@
// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#pragma once
-#include <sstream>
+#include <armnnTestUtils/PredicateResult.hpp>
-namespace armnn
-{
-
-class PredicateResult
-{
-public:
- explicit PredicateResult(bool result)
- : m_Result(result)
- {}
-
- PredicateResult(const PredicateResult& predicateResult)
- : m_Result(predicateResult.m_Result)
- , m_Message(predicateResult.m_Message.str())
- {}
-
- void SetResult(bool newResult)
- {
- m_Result = newResult;
- }
-
- std::stringstream& Message()
- {
- return m_Message;
- }
-
- bool operator!() const
- {
- return !m_Result;
- }
-
- void operator=(PredicateResult otherPredicateResult)
- {
- otherPredicateResult.m_Result = m_Result;
- }
-
- bool m_Result;
- std::stringstream m_Message;
-};
-
-} // namespace armnn \ No newline at end of file
+#pragma message("src/armnn/test/PredicateResult.hpp has been deprecated, it is due for removal in 22.08 release." \
+ " Please use public interface include/armnnTestUtils/PredicateResult.hpp") \ No newline at end of file
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index f055f2368b..045007b5c9 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -22,7 +22,7 @@
#include <doctest/doctest.h>
#include "RuntimeTests.hpp"
-#include "TestUtils.hpp"
+#include <TestUtils.hpp>
namespace armnn
{
diff --git a/src/armnn/test/TensorHelpers.hpp b/src/armnn/test/TensorHelpers.hpp
index 95cea58b30..626cda3d1c 100644
--- a/src/armnn/test/TensorHelpers.hpp
+++ b/src/armnn/test/TensorHelpers.hpp
@@ -1,235 +1,9 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#pragma once
-#include "PredicateResult.hpp"
-
-#include <armnn/Tensor.hpp>
-#include <armnn/utility/Assert.hpp>
-#include <armnnUtils/FloatingPointComparison.hpp>
-
-#include <QuantizeHelper.hpp>
-
-#include <doctest/doctest.h>
-
-#include <array>
-#include <cmath>
-#include <random>
-#include <vector>
-
-constexpr float g_FloatCloseToZeroTolerance = 1.0e-6f;
-
-template<typename T, bool isQuantized = true>
-struct SelectiveComparer
-{
- static bool Compare(T a, T b)
- {
- return (std::max(a, b) - std::min(a, b)) <= 1;
- }
-
-};
-
-template<typename T>
-struct SelectiveComparer<T, false>
-{
- static bool Compare(T a, T b)
- {
- // If a or b is zero, percent_tolerance does an exact match, so compare to a small, constant tolerance instead.
- if (a == 0.0f || b == 0.0f)
- {
- return std::abs(a - b) <= g_FloatCloseToZeroTolerance;
- }
-
- if (std::isinf(a) && a == b)
- {
- return true;
- }
-
- if (std::isnan(a) && std::isnan(b))
- {
- return true;
- }
-
- // For unquantized floats we use a tolerance of 1%.
- return armnnUtils::within_percentage_tolerance(a, b);
- }
-};
-
-template<typename T>
-bool SelectiveCompare(T a, T b)
-{
- return SelectiveComparer<T, armnn::IsQuantizedType<T>()>::Compare(a, b);
-};
-
-template<typename T>
-bool SelectiveCompareBoolean(T a, T b)
-{
- return (((a == 0) && (b == 0)) || ((a != 0) && (b != 0)));
-};
-
-template <typename T>
-armnn::PredicateResult CompareTensors(const std::vector<T>& actualData,
- const std::vector<T>& expectedData,
- const armnn::TensorShape& actualShape,
- const armnn::TensorShape& expectedShape,
- bool compareBoolean = false,
- bool isDynamic = false)
-{
- if (actualData.size() != expectedData.size())
- {
- armnn::PredicateResult res(false);
- res.Message() << "Different data size ["
- << actualData.size()
- << "!="
- << expectedData.size()
- << "]";
- return res;
- }
-
- if (actualShape.GetNumDimensions() != expectedShape.GetNumDimensions())
- {
- armnn::PredicateResult res(false);
- res.Message() << "Different number of dimensions ["
- << actualShape.GetNumDimensions()
- << "!="
- << expectedShape.GetNumDimensions()
- << "]";
- return res;
- }
-
- if (actualShape.GetNumElements() != expectedShape.GetNumElements())
- {
- armnn::PredicateResult res(false);
- res.Message() << "Different number of elements ["
- << actualShape.GetNumElements()
- << "!="
- << expectedShape.GetNumElements()
- << "]";
- return res;
- }
-
- unsigned int numberOfDimensions = actualShape.GetNumDimensions();
-
- if (!isDynamic)
- {
- // Checks they are same shape.
- for (unsigned int i = 0; i < numberOfDimensions; ++i)
- {
- if (actualShape[i] != expectedShape[i])
- {
- armnn::PredicateResult res(false);
- res.Message() << "Different shapes ["
- << actualShape[i]
- << "!="
- << expectedShape[i]
- << "]";
- return res;
- }
- }
- }
-
- // Fun iteration over n dimensions.
- std::vector<unsigned int> indices;
- for (unsigned int i = 0; i < numberOfDimensions; i++)
- {
- indices.emplace_back(0);
- }
-
- std::stringstream errorString;
- int numFailedElements = 0;
- constexpr int maxReportedDifferences = 3;
- unsigned int index = 0;
-
- // Compare data element by element.
- while (true)
- {
- bool comparison;
- // As true for uint8_t is non-zero (1-255) we must have a dedicated compare for Booleans.
- if(compareBoolean)
- {
- comparison = SelectiveCompareBoolean(actualData[index], expectedData[index]);
- }
- else
- {
- comparison = SelectiveCompare(actualData[index], expectedData[index]);
- }
-
- if (!comparison)
- {
- ++numFailedElements;
-
- if (numFailedElements <= maxReportedDifferences)
- {
- if (numFailedElements >= 2)
- {
- errorString << ", ";
- }
- errorString << "[";
- for (unsigned int i = 0; i < numberOfDimensions; ++i)
- {
- errorString << indices[i];
- if (i != numberOfDimensions - 1)
- {
- errorString << ",";
- }
- }
- errorString << "]";
-
- errorString << " (" << +actualData[index] << " != " << +expectedData[index] << ")";
- }
- }
-
- ++indices[numberOfDimensions - 1];
- for (unsigned int i=numberOfDimensions-1; i>0; i--)
- {
- if (indices[i] == actualShape[i])
- {
- indices[i] = 0;
- ++indices[i - 1];
- }
- }
- if (indices[0] == actualShape[0])
- {
- break;
- }
-
- index++;
- }
-
- armnn::PredicateResult comparisonResult(true);
- if (numFailedElements > 0)
- {
- comparisonResult.SetResult(false);
- comparisonResult.Message() << numFailedElements << " different values at: ";
- if (numFailedElements > maxReportedDifferences)
- {
- errorString << ", ... (and " << (numFailedElements - maxReportedDifferences) << " other differences)";
- }
- comparisonResult.Message() << errorString.str();
- }
-
- return comparisonResult;
-}
-
-template <typename T>
-std::vector<T> MakeRandomTensor(const armnn::TensorInfo& tensorInfo,
- unsigned int seed,
- float min = -10.0f,
- float max = 10.0f)
-{
- std::mt19937 gen(seed);
- std::uniform_real_distribution<float> dist(min, max);
-
- std::vector<float> init(tensorInfo.GetNumElements());
- for (unsigned int i = 0; i < init.size(); i++)
- {
- init[i] = dist(gen);
- }
-
- const float qScale = tensorInfo.GetQuantizationScale();
- const int32_t qOffset = tensorInfo.GetQuantizationOffset();
-
- return armnnUtils::QuantizedVector<T>(init, qScale, qOffset);
-}
+// This file is deprecated and will be removed soon.
+// Please use the new header in armnnTestUtils instead.
+// This will use the new armnnTestUtils header.
+#include "../../armnnTestUtils/TensorHelpers.hpp" \ No newline at end of file
diff --git a/src/armnn/test/TestUtils.cpp b/src/armnn/test/TestUtils.cpp
deleted file mode 100644
index 97cc80c8a2..0000000000
--- a/src/armnn/test/TestUtils.cpp
+++ /dev/null
@@ -1,62 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "TestUtils.hpp"
-
-#include <armnn/utility/Assert.hpp>
-
-using namespace armnn;
-
-void Connect(armnn::IConnectableLayer* from, armnn::IConnectableLayer* to, const armnn::TensorInfo& tensorInfo,
- unsigned int fromIndex, unsigned int toIndex)
-{
- ARMNN_ASSERT(from);
- ARMNN_ASSERT(to);
-
- try
- {
- from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex));
- }
- catch (const std::out_of_range& exc)
- {
- std::ostringstream message;
-
- if (to->GetType() == armnn::LayerType::FullyConnected && toIndex == 2)
- {
- message << "Tried to connect bias to FullyConnected layer when bias is not enabled: ";
- }
-
- message << "Failed to connect to input slot "
- << toIndex
- << " on "
- << GetLayerTypeAsCString(to->GetType())
- << " layer "
- << std::quoted(to->GetName())
- << " as the slot does not exist or is unavailable";
- throw LayerValidationException(message.str());
- }
-
- from->GetOutputSlot(fromIndex).SetTensorInfo(tensorInfo);
-}
-
-namespace armnn
-{
-
-Graph& GetGraphForTesting(IOptimizedNetwork* optNet)
-{
- return optNet->pOptimizedNetworkImpl->GetGraph();
-}
-
-ModelOptions& GetModelOptionsForTesting(IOptimizedNetwork* optNet)
-{
- return optNet->pOptimizedNetworkImpl->GetModelOptions();
-}
-
-profiling::ProfilingService& GetProfilingService(armnn::RuntimeImpl* runtime)
-{
- return runtime->m_ProfilingService;
-}
-
-} \ No newline at end of file
diff --git a/src/armnn/test/TestUtils.hpp b/src/armnn/test/TestUtils.hpp
index fa9156bc09..fe5331ec3d 100644
--- a/src/armnn/test/TestUtils.hpp
+++ b/src/armnn/test/TestUtils.hpp
@@ -1,58 +1,9 @@
//
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#pragma once
+#include "../../armnnTestUtils/TestUtils.hpp"
-#include <armnn/INetwork.hpp>
-#include <Graph.hpp>
-#include <Runtime.hpp>
-
-void Connect(armnn::IConnectableLayer* from, armnn::IConnectableLayer* to, const armnn::TensorInfo& tensorInfo,
- unsigned int fromIndex = 0, unsigned int toIndex = 0);
-
-template <typename LayerT>
-bool IsLayerOfType(const armnn::Layer* const layer)
-{
- return (layer->GetType() == armnn::LayerEnumOf<LayerT>());
-}
-
-inline bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
-{
- return (first == last);
-}
-
-/// Checks each unary function in Us evaluates true for each correspondent layer in the sequence [first, last).
-template <typename U, typename... Us>
-bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last, U&& u, Us&&... us)
-{
- return u(*first) && CheckSequence(std::next(first), last, us...);
-}
-
-template <typename LayerT>
-bool CheckRelatedLayers(armnn::Graph& graph, const std::list<std::string>& testRelatedLayers)
-{
- for (auto& layer : graph)
- {
- if (layer->GetType() == armnn::LayerEnumOf<LayerT>())
- {
- auto& relatedLayers = layer->GetRelatedLayerNames();
- if (!std::equal(relatedLayers.begin(), relatedLayers.end(), testRelatedLayers.begin(),
- testRelatedLayers.end()))
- {
- return false;
- }
- }
- }
-
- return true;
-}
-
-namespace armnn
-{
-Graph& GetGraphForTesting(IOptimizedNetwork* optNetPtr);
-ModelOptions& GetModelOptionsForTesting(IOptimizedNetwork* optNetPtr);
-profiling::ProfilingService& GetProfilingService(RuntimeImpl* runtime);
-
-} // namespace armnn \ No newline at end of file
+#pragma message("src/armnn/test/TestUtils.hpp has been deprecated, it is due for removal in 22.08 release." \
+ " Please use from armnnTestUtils library, /src/armnnTestUtils/TestUtils.hpp) \ No newline at end of file
diff --git a/src/armnn/test/UnitTests.cpp b/src/armnn/test/UnitTests.cpp
deleted file mode 100644
index cf532a76fd..0000000000
--- a/src/armnn/test/UnitTests.cpp
+++ /dev/null
@@ -1,67 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#ifndef DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
-#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
-#endif
-#include <doctest/doctest.h>
-
-#include "UnitTests.hpp"
-
-struct ConfigureLoggingFixture
-{
- ConfigureLoggingFixture()
- {
- ConfigureLoggingTest();
- }
-};
-
-
-
-TEST_SUITE("LoggerSuite")
-{
-TEST_CASE_FIXTURE(ConfigureLoggingFixture, "LoggerTest")
-{
- std::stringstream ss;
- {
- struct StreamRedirector
- {
- public:
- StreamRedirector(std::ostream& stream, std::streambuf* newStreamBuffer)
- : m_Stream(stream)
- , m_BackupBuffer(m_Stream.rdbuf(newStreamBuffer))
- {}
- ~StreamRedirector() { m_Stream.rdbuf(m_BackupBuffer); }
-
- private:
- std::ostream& m_Stream;
- std::streambuf* m_BackupBuffer;
- };
-
- StreamRedirector redirect(std::cout, ss.rdbuf());
-
- using namespace armnn;
- SetLogFilter(LogSeverity::Trace);
- SetAllLoggingSinks(true, false, false);
-
- ARMNN_LOG(trace) << "My trace message; " << -2;
- ARMNN_LOG(debug) << "My debug message; " << -1;
- ARMNN_LOG(info) << "My info message; " << 0;
- ARMNN_LOG(warning) << "My warning message; " << 1;
- ARMNN_LOG(error) << "My error message; " << 2;
- ARMNN_LOG(fatal) << "My fatal message; " << 3;
-
- SetLogFilter(LogSeverity::Fatal);
- }
-
- CHECK(ss.str().find("Trace: My trace message; -2") != std::string::npos);
- CHECK(ss.str().find("Debug: My debug message; -1") != std::string::npos);
- CHECK(ss.str().find("Info: My info message; 0") != std::string::npos);
- CHECK(ss.str().find("Warning: My warning message; 1") != std::string::npos);
- CHECK(ss.str().find("Error: My error message; 2") != std::string::npos);
- CHECK(ss.str().find("Fatal: My fatal message; 3") != std::string::npos);
-}
-
-} \ No newline at end of file
diff --git a/src/armnn/test/UnitTests.hpp b/src/armnn/test/UnitTests.hpp
index e4a8b96b52..129a766729 100644
--- a/src/armnn/test/UnitTests.hpp
+++ b/src/armnn/test/UnitTests.hpp
@@ -2,187 +2,8 @@
// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#pragma once
-#include <armnn/Logging.hpp>
-#include <armnn/Utils.hpp>
-#include <reference/RefWorkloadFactory.hpp>
-#include <reference/test/RefWorkloadFactoryHelper.hpp>
+#include "../../armnnTestUtils/UnitTests.hpp"
-#include <backendsCommon/test/LayerTests.hpp>
-#include <backendsCommon/test/WorkloadFactoryHelper.hpp>
-
-#include "TensorHelpers.hpp"
-
-#include <doctest/doctest.h>
-
-inline void ConfigureLoggingTest()
-{
- // Configures logging for both the ARMNN library and this test program.
- armnn::ConfigureLogging(true, true, armnn::LogSeverity::Fatal);
-}
-
-// The following macros require the caller to have defined FactoryType, with one of the following using statements:
-//
-// using FactoryType = armnn::RefWorkloadFactory;
-// using FactoryType = armnn::ClWorkloadFactory;
-// using FactoryType = armnn::NeonWorkloadFactory;
-
-/// Executes CHECK_MESSAGE on CompareTensors() return value so that the predicate_result message is reported.
-/// If the test reports itself as not supported then the tensors are not compared.
-/// Additionally this checks that the supportedness reported by the test matches the name of the test.
-/// Unsupported tests must be 'tagged' by including "UNSUPPORTED" in their name.
-/// This is useful because it clarifies that the feature being tested is not actually supported
-/// (a passed test with the name of a feature would imply that feature was supported).
-/// If support is added for a feature, the test case will fail because the name incorrectly contains UNSUPPORTED.
-/// If support is removed for a feature, the test case will fail because the name doesn't contain UNSUPPORTED.
-template <typename T, std::size_t n>
-void CompareTestResultIfSupported(const std::string& testName, const LayerTestResult<T, n>& testResult)
-{
- bool testNameIndicatesUnsupported = testName.find("UNSUPPORTED") != std::string::npos;
- CHECK_MESSAGE(testNameIndicatesUnsupported != testResult.m_Supported,
- "The test name does not match the supportedness it is reporting");
- if (testResult.m_Supported)
- {
- auto result = CompareTensors(testResult.m_ActualData,
- testResult.m_ExpectedData,
- testResult.m_ActualShape,
- testResult.m_ExpectedShape,
- testResult.m_CompareBoolean);
- CHECK_MESSAGE(result.m_Result, result.m_Message.str());
- }
-}
-
-template <typename T, std::size_t n>
-void CompareTestResultIfSupported(const std::string& testName, const std::vector<LayerTestResult<T, n>>& testResult)
-{
- bool testNameIndicatesUnsupported = testName.find("UNSUPPORTED") != std::string::npos;
- for (unsigned int i = 0; i < testResult.size(); ++i)
- {
- CHECK_MESSAGE(testNameIndicatesUnsupported != testResult[i].m_Supported,
- "The test name does not match the supportedness it is reporting");
- if (testResult[i].m_Supported)
- {
- auto result = CompareTensors(testResult[i].m_ActualData,
- testResult[i].m_ExpectedData,
- testResult[i].m_ActualShape,
- testResult[i].m_ExpectedShape);
- CHECK_MESSAGE(result.m_Result, result.m_Message.str());
- }
- }
-}
-
-template<typename FactoryType, typename TFuncPtr, typename... Args>
-void RunTestFunction(const char* testName, TFuncPtr testFunction, Args... args)
-{
- std::unique_ptr<armnn::IProfiler> profiler = std::make_unique<armnn::IProfiler>();
- armnn::ProfilerManager::GetInstance().RegisterProfiler(profiler.get());
-
- auto memoryManager = WorkloadFactoryHelper<FactoryType>::GetMemoryManager();
- FactoryType workloadFactory = WorkloadFactoryHelper<FactoryType>::GetFactory(memoryManager);
-
- auto testResult = (*testFunction)(workloadFactory, memoryManager, args...);
- CompareTestResultIfSupported(testName, testResult);
-
- armnn::ProfilerManager::GetInstance().RegisterProfiler(nullptr);
-}
-
-
-template<typename FactoryType, typename TFuncPtr, typename... Args>
-void RunTestFunctionUsingTensorHandleFactory(const char* testName, TFuncPtr testFunction, Args... args)
-{
- std::unique_ptr<armnn::IProfiler> profiler = std::make_unique<armnn::IProfiler>();
- armnn::ProfilerManager::GetInstance().RegisterProfiler(profiler.get());
-
- auto memoryManager = WorkloadFactoryHelper<FactoryType>::GetMemoryManager();
- FactoryType workloadFactory = WorkloadFactoryHelper<FactoryType>::GetFactory(memoryManager);
-
- auto tensorHandleFactory = WorkloadFactoryHelper<FactoryType>::GetTensorHandleFactory(memoryManager);
-
- auto testResult = (*testFunction)(workloadFactory, memoryManager, tensorHandleFactory, args...);
- CompareTestResultIfSupported(testName, testResult);
-
- armnn::ProfilerManager::GetInstance().RegisterProfiler(nullptr);
-}
-
-#define ARMNN_SIMPLE_TEST_CASE(TestName, TestFunction) \
- TEST_CASE(#TestName) \
- { \
- TestFunction(); \
- }
-
-#define ARMNN_AUTO_TEST_CASE(TestName, TestFunction, ...) \
- TEST_CASE(#TestName) \
- { \
- RunTestFunction<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
- }
-
-#define ARMNN_AUTO_TEST_FIXTURE(TestName, Fixture, TestFunction, ...) \
- TEST_CASE_FIXTURE(Fixture, #TestName) \
- { \
- RunTestFunction<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
- }
-
-#define ARMNN_AUTO_TEST_CASE_WITH_THF(TestName, TestFunction, ...) \
- TEST_CASE(#TestName) \
- { \
- RunTestFunctionUsingTensorHandleFactory<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
- }
-
-#define ARMNN_AUTO_TEST_FIXTURE_WITH_THF(TestName, Fixture, TestFunction, ...) \
- TEST_CASE_FIXTURE(Fixture, #TestName) \
- { \
- RunTestFunctionUsingTensorHandleFactory<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
- }
-
-template<typename FactoryType, typename TFuncPtr, typename... Args>
-void CompareRefTestFunction(const char* testName, TFuncPtr testFunction, Args... args)
-{
- auto memoryManager = WorkloadFactoryHelper<FactoryType>::GetMemoryManager();
- FactoryType workloadFactory = WorkloadFactoryHelper<FactoryType>::GetFactory(memoryManager);
-
- armnn::RefWorkloadFactory refWorkloadFactory;
-
- auto testResult = (*testFunction)(workloadFactory, memoryManager, refWorkloadFactory, args...);
- CompareTestResultIfSupported(testName, testResult);
-}
-
-template<typename FactoryType, typename TFuncPtr, typename... Args>
-void CompareRefTestFunctionUsingTensorHandleFactory(const char* testName, TFuncPtr testFunction, Args... args)
-{
- auto memoryManager = WorkloadFactoryHelper<FactoryType>::GetMemoryManager();
- FactoryType workloadFactory = WorkloadFactoryHelper<FactoryType>::GetFactory(memoryManager);
-
- armnn::RefWorkloadFactory refWorkloadFactory;
- auto tensorHandleFactory = WorkloadFactoryHelper<FactoryType>::GetTensorHandleFactory(memoryManager);
- auto refTensorHandleFactory =
- RefWorkloadFactoryHelper::GetTensorHandleFactory(memoryManager);
-
- auto testResult = (*testFunction)(
- workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, args...);
- CompareTestResultIfSupported(testName, testResult);
-}
-
-#define ARMNN_COMPARE_REF_AUTO_TEST_CASE(TestName, TestFunction, ...) \
- TEST_CASE(#TestName) \
- { \
- CompareRefTestFunction<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
- }
-
-#define ARMNN_COMPARE_REF_AUTO_TEST_CASE_WITH_THF(TestName, TestFunction, ...) \
- TEST_CASE(#TestName) \
- { \
- CompareRefTestFunctionUsingTensorHandleFactory<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
- }
-
-#define ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(TestName, Fixture, TestFunction, ...) \
- TEST_CASE_FIXTURE(Fixture, #TestName) \
- { \
- CompareRefTestFunction<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
- }
-
-#define ARMNN_COMPARE_REF_FIXTURE_TEST_CASE_WITH_THF(TestName, Fixture, TestFunction, ...) \
- TEST_CASE_FIXTURE(Fixture, #TestName) \
- { \
- CompareRefTestFunctionUsingTensorHandleFactory<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
- }
+#pragma message("src/armnn/test/UnitTests.hpp has been deprecated, it is due for removal in 22.08 release." \
+ " Please use from armnnTestUtils library, /src/armnnTestUtils/UnitTests.hpp) \ No newline at end of file
diff --git a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
index 7573005518..0636a00234 100644
--- a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
+++ b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
@@ -3,8 +3,8 @@
// SPDX-License-Identifier: MIT
//
-#include "../GraphUtils.hpp"
-#include "../TestUtils.hpp"
+#include <GraphUtils.hpp>
+#include <TestUtils.hpp>
#include <Optimizer.hpp>
diff --git a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
index 7b326fa8bc..4aacf7f4fe 100644
--- a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "../TestUtils.hpp"
+#include <TestUtils.hpp>
#include <BFloat16.hpp>
#include <Optimizer.hpp>
diff --git a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
index f74ab0f308..531a0dd92a 100644
--- a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "../TestUtils.hpp"
+#include <TestUtils.hpp>
#include <Optimizer.hpp>
#include <Half.hpp>
diff --git a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
index c4551525c1..4c453cc799 100644
--- a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "../TestUtils.hpp"
+#include <TestUtils.hpp>
#include <Optimizer.hpp>
diff --git a/src/armnn/test/optimizations/FoldPadTests.cpp b/src/armnn/test/optimizations/FoldPadTests.cpp
index a598983706..a64660f987 100644
--- a/src/armnn/test/optimizations/FoldPadTests.cpp
+++ b/src/armnn/test/optimizations/FoldPadTests.cpp
@@ -5,7 +5,7 @@
#include "LayersFwd.hpp"
#include <Network.hpp>
-#include <test/TestUtils.hpp>
+#include <TestUtils.hpp>
#include <doctest/doctest.h>
#include <backendsCommon/TensorHandle.hpp>
#include <Optimizer.hpp>
diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
index 63cd170f02..37d770190a 100644
--- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
+++ b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "../TestUtils.hpp"
+#include <TestUtils.hpp>
#include <Optimizer.hpp>
diff --git a/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
index e2ac1bd69e..bc8839948b 100644
--- a/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
+++ b/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "../TestUtils.hpp"
+#include <TestUtils.hpp>
#include <Optimizer.hpp>
diff --git a/src/armnn/test/optimizations/FuseActivationTests.cpp b/src/armnn/test/optimizations/FuseActivationTests.cpp
index 54a9d9a189..99b2b80556 100644
--- a/src/armnn/test/optimizations/FuseActivationTests.cpp
+++ b/src/armnn/test/optimizations/FuseActivationTests.cpp
@@ -8,8 +8,8 @@
#include <Network.hpp>
#include <ResolveType.hpp>
#include <armnn/INetwork.hpp>
-#include "test/GraphUtils.hpp"
-#include <test/TestUtils.hpp>
+#include <GraphUtils.hpp>
+#include <TestUtils.hpp>
#include <doctest/doctest.h>
diff --git a/src/armnn/test/optimizations/FuseBatchNormTests.cpp b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
index 0e969c1a5c..70cffea2b2 100644
--- a/src/armnn/test/optimizations/FuseBatchNormTests.cpp
+++ b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
@@ -8,7 +8,7 @@
#include <Network.hpp>
#include <ResolveType.hpp>
#include <armnn/INetwork.hpp>
-#include <test/TestUtils.hpp>
+#include <TestUtils.hpp>
#include <doctest/doctest.h>
diff --git a/src/armnn/test/optimizations/InsertDebugLayerTests.cpp b/src/armnn/test/optimizations/InsertDebugLayerTests.cpp
index 03d0d22f95..523ffcf44f 100644
--- a/src/armnn/test/optimizations/InsertDebugLayerTests.cpp
+++ b/src/armnn/test/optimizations/InsertDebugLayerTests.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "../TestUtils.hpp"
+#include <TestUtils.hpp>
#include <Optimizer.hpp>
diff --git a/src/armnn/test/optimizations/MovePermuteUpTests.cpp b/src/armnn/test/optimizations/MovePermuteUpTests.cpp
index 38a65a6173..152e79925b 100644
--- a/src/armnn/test/optimizations/MovePermuteUpTests.cpp
+++ b/src/armnn/test/optimizations/MovePermuteUpTests.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "../TestUtils.hpp"
+#include <TestUtils.hpp>
#include <Optimizer.hpp>
diff --git a/src/armnn/test/optimizations/MoveTransposeUpTests.cpp b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
index 68d277a4bd..09bf9ae7d9 100644
--- a/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
+++ b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "../TestUtils.hpp"
+#include <TestUtils.hpp>
#include <Optimizer.hpp>
diff --git a/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp b/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp
index 694b103091..599b44aa3e 100644
--- a/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp
+++ b/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "../TestUtils.hpp"
+#include <TestUtils.hpp>
#include <Optimizer.hpp>
diff --git a/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp b/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp
index 4b6dfe582b..1e03140b38 100644
--- a/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp
+++ b/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "../TestUtils.hpp"
+#include <TestUtils.hpp>
#include <Optimizer.hpp>
diff --git a/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp b/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp
index 98c84d4fc2..cfd1a23411 100644
--- a/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp
+++ b/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "../TestUtils.hpp"
+#include <TestUtils.hpp>
#include <Optimizer.hpp>
diff --git a/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp b/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
index f862315220..d87d3f08b5 100644
--- a/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
+++ b/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "../TestUtils.hpp"
+#include <TestUtils.hpp>
#include <Network.hpp>
#include <Optimizer.hpp>
diff --git a/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp b/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp
index fdd0a6ddd3..b143078e67 100644
--- a/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp
+++ b/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "../TestUtils.hpp"
+#include <TestUtils.hpp>
#include <Optimizer.hpp>
diff --git a/src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp b/src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp
index 46b06a55c7..b3f9ed8780 100644
--- a/src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp
+++ b/src/armnn/test/optimizations/RedirectMembersToConstantInputsTests.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "../TestUtils.hpp"
+#include <TestUtils.hpp>
#include <Optimizer.hpp>
diff --git a/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp b/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp
index 692f371356..cf1dfa0d10 100644
--- a/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp
+++ b/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp
@@ -3,8 +3,8 @@
// SPDX-License-Identifier: MIT
//
-#include "../GraphUtils.hpp"
-#include "../TestUtils.hpp"
+#include <GraphUtils.hpp>
+#include <TestUtils.hpp>
#include <armnn/INetwork.hpp>
diff --git a/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp b/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp
index 069d28457e..e66bb75b36 100644
--- a/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp
+++ b/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "../TestUtils.hpp"
+#include <TestUtils.hpp>
#include <Optimizer.hpp>
diff --git a/src/armnn/test/optimizations/TransposeAsReshapeTests.cpp b/src/armnn/test/optimizations/TransposeAsReshapeTests.cpp
index 5d1d950573..371f3acadd 100644
--- a/src/armnn/test/optimizations/TransposeAsReshapeTests.cpp
+++ b/src/armnn/test/optimizations/TransposeAsReshapeTests.cpp
@@ -3,7 +3,7 @@
// SPDX-License-Identifier: MIT
//
-#include "../TestUtils.hpp"
+#include <TestUtils.hpp>
#include <Optimizer.hpp>