aboutsummaryrefslogtreecommitdiff
path: root/src/armnnTestUtils
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2021-11-24 15:47:28 +0000
committerSadik Armagan <sadik.armagan@arm.com>2021-12-14 11:02:41 +0000
commita097d2a0ed8e30d5aaf6d29ec18d0c39201b7b67 (patch)
tree947e587bc42d07f52c55b155308b5ea5bd3ebacd /src/armnnTestUtils
parentbc14881a76699dd942e94265116da68a6466455e (diff)
downloadarmnn-a097d2a0ed8e30d5aaf6d29ec18d0c39201b7b67.tar.gz
IVGCVSW-6453 'Move the ArmNN Test Utils code to a physically separate directory'
* Created include/armnnTestUtils directory * Moved Arm NN test utils files into armnnTestUtils directory Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I03ac54c645c41c52650c4c03b6a58fb1481fef5d
Diffstat (limited to 'src/armnnTestUtils')
-rwxr-xr-xsrc/armnnTestUtils/CMakeLists.txt50
-rw-r--r--src/armnnTestUtils/CommonTestUtils.cpp70
-rw-r--r--src/armnnTestUtils/CommonTestUtils.hpp119
-rw-r--r--src/armnnTestUtils/CreateWorkload.hpp2316
-rw-r--r--src/armnnTestUtils/DataTypeUtils.hpp45
-rw-r--r--src/armnnTestUtils/GraphUtils.cpp78
-rw-r--r--src/armnnTestUtils/GraphUtils.hpp25
-rw-r--r--src/armnnTestUtils/TensorCopyUtils.cpp23
-rw-r--r--src/armnnTestUtils/TensorHelpers.hpp235
-rw-r--r--src/armnnTestUtils/TestUtils.cpp62
-rw-r--r--src/armnnTestUtils/TestUtils.hpp58
-rw-r--r--src/armnnTestUtils/UnitTests.cpp67
-rw-r--r--src/armnnTestUtils/UnitTests.hpp191
-rw-r--r--src/armnnTestUtils/WorkloadTestUtils.hpp113
14 files changed, 3452 insertions, 0 deletions
diff --git a/src/armnnTestUtils/CMakeLists.txt b/src/armnnTestUtils/CMakeLists.txt
new file mode 100755
index 0000000000..3738fad033
--- /dev/null
+++ b/src/armnnTestUtils/CMakeLists.txt
@@ -0,0 +1,50 @@
+#
+# Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+# SPDX-License-Identifier: MIT
+#
+
+# armnnTestUtils library provides useful test functions for backend developers.
+set(armnnTestUtils_sources)
+list(APPEND armnnTestUtils_sources
+ ../../include/armnnTestUtils/DataLayoutUtils.hpp
+ ../../include/armnnTestUtils/LayerTestResult.hpp
+ ../../include/armnnTestUtils/PredicateResult.hpp
+ ../../include/armnnTestUtils/TensorCopyUtils.hpp
+ TensorHelpers.hpp
+ CreateWorkload.hpp
+ CommonTestUtils.cpp
+ CommonTestUtils.hpp
+ DataTypeUtils.hpp
+ GraphUtils.cpp
+ GraphUtils.hpp
+ TensorCopyUtils.cpp
+ TestUtils.cpp
+ TestUtils.hpp
+ UnitTests.cpp
+ UnitTests.hpp
+ WorkloadTestUtils.hpp
+ )
+
+add_library_ex(armnnTestUtils SHARED ${armnnTestUtils_sources})
+
+set_target_properties(armnnTestUtils PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR})
+
+target_include_directories(armnnTestUtils
+ PUBLIC
+ $<INSTALL_INTERFACE:include>
+ $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
+ PRIVATE
+ ${CMAKE_CURRENT_SOURCE_DIR}/src)
+
+target_include_directories(armnnTestUtils PRIVATE ../armnn)
+target_include_directories(armnnTestUtils PRIVATE ../armnnUtils)
+target_include_directories(armnnTestUtils PRIVATE ../backends)
+target_include_directories(armnnTestUtils PRIVATE ../profiling)
+
+install(TARGETS armnnTestUtils
+ EXPORT armnn-targets
+ LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
+
+add_library(Armnn::armnnTestUtils ALIAS armnnTestUtils) \ No newline at end of file
diff --git a/src/armnnTestUtils/CommonTestUtils.cpp b/src/armnnTestUtils/CommonTestUtils.cpp
new file mode 100644
index 0000000000..c85330577d
--- /dev/null
+++ b/src/armnnTestUtils/CommonTestUtils.cpp
@@ -0,0 +1,70 @@
+//
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "CommonTestUtils.hpp"
+
+#include <armnn/backends/IBackendInternal.hpp>
+
+using namespace armnn;
+
+SubgraphView::InputSlots CreateInputsFrom(const std::vector<Layer*>& layers)
+{
+ SubgraphView::InputSlots result;
+ for (auto&& layer : layers)
+ {
+ for (auto&& it = layer->BeginInputSlots(); it != layer->EndInputSlots(); ++it)
+ {
+ result.push_back(&(*it));
+ }
+ }
+ return result;
+}
+
+SubgraphView::OutputSlots CreateOutputsFrom(const std::vector<Layer*>& layers)
+{
+ SubgraphView::OutputSlots result;
+ for (auto && layer : layers)
+ {
+ for (auto&& it = layer->BeginOutputSlots(); it != layer->EndOutputSlots(); ++it)
+ {
+ result.push_back(&(*it));
+ }
+ }
+ return result;
+}
+
+SubgraphView::SubgraphViewPtr CreateSubgraphViewFrom(SubgraphView::InputSlots&& inputs,
+ SubgraphView::OutputSlots&& outputs,
+ SubgraphView::Layers&& layers)
+{
+ return std::make_unique<SubgraphView>(std::move(inputs), std::move(outputs), std::move(layers));
+}
+
+armnn::IBackendInternalUniquePtr CreateBackendObject(const armnn::BackendId& backendId)
+{
+ auto& backendRegistry = BackendRegistryInstance();
+ auto backendFactory = backendRegistry.GetFactory(backendId);
+ auto backendObjPtr = backendFactory();
+
+ return backendObjPtr;
+}
+
+armnn::TensorShape MakeTensorShape(unsigned int batches,
+ unsigned int channels,
+ unsigned int height,
+ unsigned int width,
+ armnn::DataLayout layout)
+{
+ using namespace armnn;
+ switch (layout)
+ {
+ case DataLayout::NCHW:
+ return TensorShape{ batches, channels, height, width };
+ case DataLayout::NHWC:
+ return TensorShape{ batches, height, width, channels };
+ default:
+ throw InvalidArgumentException(std::string("Unsupported data layout: ") + GetDataLayoutName(layout));
+ }
+}
diff --git a/src/armnnTestUtils/CommonTestUtils.hpp b/src/armnnTestUtils/CommonTestUtils.hpp
new file mode 100644
index 0000000000..a4babc5568
--- /dev/null
+++ b/src/armnnTestUtils/CommonTestUtils.hpp
@@ -0,0 +1,119 @@
+//
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <Graph.hpp>
+#include <SubgraphView.hpp>
+#include <SubgraphViewSelector.hpp>
+#include <ResolveType.hpp>
+
+#include <armnn/BackendRegistry.hpp>
+
+#include <armnn/Types.hpp>
+#include <backendsCommon/TensorHandle.hpp>
+
+#include <algorithm>
+#include <random>
+#include <vector>
+
+// Checks that two collections have the exact same contents (in any order)
+// The given collections do not have to contain duplicates
+// Cannot use std::sort here because std lists have their own std::list::sort method
+template <typename CollectionType>
+bool AreEqual(const CollectionType& lhs, const CollectionType& rhs)
+{
+ if (lhs.size() != rhs.size())
+ {
+ return false;
+ }
+
+ auto lhs_it = std::find_if(lhs.begin(), lhs.end(), [&rhs](auto& item)
+ {
+ return std::find(rhs.begin(), rhs.end(), item) == rhs.end();
+ });
+
+ return lhs_it == lhs.end();
+}
+
+// Checks that the given collection contains the specified item
+template <typename CollectionType>
+bool Contains(const CollectionType& collection, const typename CollectionType::value_type& item)
+{
+ return std::find(collection.begin(), collection.end(), item) != collection.end();
+}
+
+// Checks that the given map contains the specified key
+template <typename MapType>
+bool Contains(const MapType& map, const typename MapType::key_type& key)
+{
+ return map.find(key) != map.end();
+}
+
+// Utility template for comparing tensor elements
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+inline bool Compare(T a, T b, float tolerance = 0.000001f)
+{
+ if (ArmnnType == armnn::DataType::Boolean)
+ {
+ // NOTE: Boolean is represented as uint8_t (with zero equals
+ // false and everything else equals true), therefore values
+ // need to be casted to bool before comparing them
+ return static_cast<bool>(a) == static_cast<bool>(b);
+ }
+
+ // NOTE: All other types can be cast to float and compared with
+ // a certain level of tolerance
+ return std::fabs(static_cast<float>(a) - static_cast<float>(b)) <= tolerance;
+}
+
+template <typename ConvolutionLayer>
+void SetWeightAndBias(ConvolutionLayer* layer, const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& biasInfo)
+{
+ layer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weightInfo);
+ layer->m_Bias = std::make_unique<armnn::ScopedTensorHandle>(biasInfo);
+
+ layer->m_Weight->Allocate();
+ layer->m_Bias->Allocate();
+}
+
+armnn::SubgraphView::InputSlots CreateInputsFrom(const std::vector<armnn::Layer*>& layers);
+
+armnn::SubgraphView::OutputSlots CreateOutputsFrom(const std::vector<armnn::Layer*>& layers);
+
+armnn::SubgraphView::SubgraphViewPtr CreateSubgraphViewFrom(armnn::SubgraphView::InputSlots&& inputs,
+ armnn::SubgraphView::OutputSlots&& outputs,
+ armnn::SubgraphView::Layers&& layers);
+
+armnn::IBackendInternalUniquePtr CreateBackendObject(const armnn::BackendId& backendId);
+
+armnn::TensorShape MakeTensorShape(unsigned int batches,
+ unsigned int channels,
+ unsigned int height,
+ unsigned int width,
+ armnn::DataLayout layout);
+
+template<typename DataType>
+static std::vector<DataType> GenerateRandomData(size_t size)
+{
+ constexpr bool isIntegerType = std::is_integral<DataType>::value;
+ using Distribution =
+ typename std::conditional<isIntegerType,
+ std::uniform_int_distribution<DataType>,
+ std::uniform_real_distribution<DataType>>::type;
+
+ static constexpr DataType lowerLimit = std::numeric_limits<DataType>::min();
+ static constexpr DataType upperLimit = std::numeric_limits<DataType>::max();
+
+ static Distribution distribution(lowerLimit, upperLimit);
+ static std::default_random_engine generator;
+
+ std::vector<DataType> randomData(size);
+ generate(randomData.begin(), randomData.end(), []() { return distribution(generator); });
+
+ return randomData;
+}
diff --git a/src/armnnTestUtils/CreateWorkload.hpp b/src/armnnTestUtils/CreateWorkload.hpp
new file mode 100644
index 0000000000..ea8a436177
--- /dev/null
+++ b/src/armnnTestUtils/CreateWorkload.hpp
@@ -0,0 +1,2316 @@
+//
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <Graph.hpp>
+#include <Network.hpp>
+#include <ResolveType.hpp>
+
+#include <armnnUtils/DataLayoutIndexed.hpp>
+#include <armnn/utility/Assert.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
+#include <backendsCommon/TensorHandle.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <doctest/doctest.h>
+
+#include <utility>
+
+using namespace armnn;
+
+namespace
+{
+
+using namespace std;
+
+// Calls CreateWorkload for a layer, and checks the returned pointer is of the correct type.
+template<typename Workload>
+std::unique_ptr<Workload> MakeAndCheckWorkload(Layer& layer,
+ const IWorkloadFactory& factory,
+ const ModelOptions& modelOptions = {})
+{
+ std::unique_ptr<IWorkload> workload = layer.CreateWorkload(factory);
+ CHECK_MESSAGE(workload.get() == PolymorphicDowncast<Workload*>(workload.get()),
+ "Cannot convert to derived class");
+ std::string reasonIfUnsupported;
+ layer.SetBackendId(factory.GetBackendId());
+ CHECK(factory.IsLayerSupported(layer, layer.GetDataType(), reasonIfUnsupported, modelOptions));
+ return std::unique_ptr<Workload>(static_cast<Workload*>(workload.release()));
+}
+
+// Helper function to create tensor handlers for workloads, assuming they all use the same factory.
+void CreateTensorHandles(armnn::Graph& graph,
+ armnn::IWorkloadFactory& factory)
+{
+ TensorHandleFactoryRegistry tmpRegistry;
+ for (auto&& layer : graph.TopologicalSort())
+ {
+ layer->CreateTensorHandles(tmpRegistry, factory);
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+// The following functions are called by backendsCommon/test/CreateWorkload*.cpp
+// They build very simple graphs, and then create a workload.
+// Some checks are performed on the workload to ensure parameters have been passed correctly.
+// They return the created workloads so that backend-specific checks can be performed.
+/////////////////////////////////////////////////////////////////////////////////////////////
+
+template <typename ActivationWorkload, armnn::DataType DataType>
+std::unique_ptr<ActivationWorkload> CreateActivationWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph)
+{
+ // Creates the layer we're testing.
+ ActivationDescriptor layerDesc;
+ layerDesc.m_Function = ActivationFunction::Abs;
+ layerDesc.m_A = 3.5f;
+ layerDesc.m_B = -10.0f;
+
+ ActivationLayer* const layer = graph.AddLayer<ActivationLayer>(layerDesc, "layer");
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ armnn::TensorInfo tensorInfo({1, 1}, DataType);
+
+ Connect(input, layer, tensorInfo);
+ Connect(layer, output, tensorInfo);
+
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<ActivationWorkload>(*layer, factory);
+
+ ActivationQueueDescriptor queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ CHECK(queueDescriptor.m_Parameters.m_A == 3.5f);
+ CHECK(queueDescriptor.m_Parameters.m_B == -10.0f);
+ CHECK((queueDescriptor.m_Parameters.m_Function == ActivationFunction::Abs));
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template <typename WorkloadType,
+ typename DescriptorType,
+ typename LayerType,
+ armnn::DataType DataType>
+std::unique_ptr<WorkloadType> CreateElementwiseWorkloadTest(armnn::IWorkloadFactory & factory,
+ armnn::Graph & graph)
+{
+ // Creates the layer we're testing.
+ Layer* const layer = graph.AddLayer<LayerType>("layer");
+
+ // Creates extra layers.
+ Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
+ Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ armnn::TensorInfo tensorInfo({2, 3}, DataType);
+ Connect(input1, layer, tensorInfo, 0, 0);
+ Connect(input2, layer, tensorInfo, 0, 1);
+ Connect(layer, output, tensorInfo);
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
+
+ DescriptorType queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Inputs.size() == 2);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template<typename WorkloadType,
+ typename DescriptorType,
+ armnn::DataType DataType>
+std::unique_ptr<WorkloadType> CreateSubtractionWithBlobWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph)
+{
+ // Creates the layer we're testing.
+ SubtractionLayer* const layer = graph.AddLayer<SubtractionLayer>("layer");
+
+ auto activationDesc = std::make_shared<ActivationDescriptor>();
+ activationDesc->m_A = 10.0f;
+ activationDesc->m_B = 5.0f;
+ activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
+
+ layer->SetAdditionalInfoForObject(activationDesc);
+
+ // Creates extra layers.
+ Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
+ Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ armnn::TensorInfo tensorInfo({2, 3}, DataType);
+ Connect(input1, layer, tensorInfo, 0, 0);
+ Connect(input2, layer, tensorInfo, 0, 1);
+ Connect(layer, output, tensorInfo);
+ CreateTensorHandles(graph, factory);
+
+ // Check that the additional information can be queried from the layer
+ std::shared_ptr<ActivationDescriptor>
+ activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
+
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
+ static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
+
+ DescriptorType queueDescriptor = workload->GetData();
+
+ const ActivationDescriptor* queueDescBlobPtr =
+ queueDescriptor.template GetAdditionalInformation<ActivationDescriptor>();
+ IgnoreUnused(queueDescBlobPtr);
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
+ static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ CHECK(queueDescriptor.m_Inputs.size() == 2);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+ return workload;
+}
+
+template<typename WorkloadType,
+ typename DescriptorType,
+ armnn::DataType DataType>
+std::unique_ptr<WorkloadType> CreateMultiplicationWithBlobWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph)
+{
+ // Creates the layer we're testing.
+ MultiplicationLayer* const layer = graph.AddLayer<MultiplicationLayer>("layer");
+
+ auto activationDesc = std::make_shared<ActivationDescriptor>();
+ activationDesc->m_A = 10.0f;
+ activationDesc->m_B = 5.0f;
+ activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
+
+ layer->SetAdditionalInfoForObject(activationDesc);
+
+ // Creates extra layers.
+ Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
+ Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ armnn::TensorInfo tensorInfo({2, 3}, DataType);
+ Connect(input1, layer, tensorInfo, 0, 0);
+ Connect(input2, layer, tensorInfo, 0, 1);
+ Connect(layer, output, tensorInfo);
+ CreateTensorHandles(graph, factory);
+
+ // Check that the additional information can be queried from the layer
+ std::shared_ptr<ActivationDescriptor>
+ activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
+
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
+ static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
+
+ DescriptorType queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Inputs.size() == 2);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ const ActivationDescriptor* queueDescBlobPtr =
+ queueDescriptor.template GetAdditionalInformation<ActivationDescriptor>();
+ IgnoreUnused(queueDescBlobPtr);
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
+ static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ return workload;// Returns so we can do extra, backend-specific tests.
+}
+
+template<typename WorkloadType,
+ typename DescriptorType,
+ armnn::DataType DataType>
+std::unique_ptr<WorkloadType> CreateAdditionWithBlobWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph)
+{
+ // Creates the layer we're testing.
+ AdditionLayer* const layer = graph.AddLayer<AdditionLayer>("layer");
+
+ auto activationDesc = std::make_shared<ActivationDescriptor>();
+ activationDesc->m_A = 10.0f;
+ activationDesc->m_B = 5.0f;
+ activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
+
+ layer->SetAdditionalInfoForObject(activationDesc);
+
+ // Creates extra layers.
+ Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
+ Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ armnn::TensorInfo tensorInfo({2, 3}, DataType);
+ Connect(input1, layer, tensorInfo, 0, 0);
+ Connect(input2, layer, tensorInfo, 0, 1);
+ Connect(layer, output, tensorInfo);
+ CreateTensorHandles(graph, factory);
+
+ // Check that the additional information can be queried from the layer
+ std::shared_ptr<ActivationDescriptor>
+ activationDescPtr = layer->template GetAdditionalInformation<ActivationDescriptor>();
+
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
+ static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
+
+ DescriptorType queueDescriptor = workload->GetData();
+ const ActivationDescriptor* queueDescBlobPtr =
+ queueDescriptor.template GetAdditionalInformation<ActivationDescriptor>();
+ IgnoreUnused(queueDescBlobPtr);
+ CHECK(queueDescriptor.m_Inputs.size() == 2);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
+ static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ return workload;
+}
+
+template <typename WorkloadType,
+ typename DescriptorType,
+ armnn::DataType DataType>
+std::unique_ptr<WorkloadType> CreateElementwiseUnaryWorkloadTest(armnn::IWorkloadFactory & factory,
+ armnn::Graph & graph,
+ armnn::UnaryOperation op)
+{
+ ElementwiseUnaryDescriptor desc = ElementwiseUnaryDescriptor(op);
+ Layer* const layer = graph.AddLayer<armnn::ElementwiseUnaryLayer>(desc, "layer");
+
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ armnn::TensorInfo tensorInfo({ 2, 3 }, DataType);
+ Connect(input, layer, tensorInfo, 0, 0);
+ Connect(layer, output, tensorInfo, 0, 0);
+ CreateTensorHandles(graph, factory);
+
+ auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
+ DescriptorType queueDescriptor = workload->GetData();
+
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+ return workload;
+}
+
+template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
+std::unique_ptr<BatchNormalizationWorkloadType> CreateBatchNormalizationWorkloadTest(
+ armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW)
+{
+ TensorShape tensorShape;
+ switch (dataLayout)
+ {
+ case DataLayout::NHWC:
+ tensorShape = { 2, 4, 4, 3 };
+ break;
+ case DataLayout::NCHW:
+ default:
+ tensorShape = { 2, 3, 4, 4 };
+ }
+
+ // Creates the layer we're testing.
+ BatchNormalizationDescriptor layerDesc;
+ layerDesc.m_Eps = 0.05f;
+ layerDesc.m_DataLayout = dataLayout;
+
+ BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
+
+ armnn::TensorInfo weightInfo({3}, DataType);
+ layer->m_Mean = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Variance = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Beta = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Gamma = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Mean->Allocate();
+ layer->m_Variance->Allocate();
+ layer->m_Beta->Allocate();
+ layer->m_Gamma->Allocate();
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ armnn::TensorInfo tensorInfo(tensorShape, DataType);
+ Connect(input, layer, tensorInfo);
+ Connect(layer, output, tensorInfo);
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<BatchNormalizationWorkloadType>(*layer, factory);
+ BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Parameters.m_Eps == 0.05f);
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ CHECK((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType)));
+ CHECK((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType)));
+ CHECK((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType)));
+ CHECK((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType)));
+ CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
+std::unique_ptr<BatchNormalizationWorkloadType> CreateBatchNormalizationWithBlobWorkloadTest(
+ armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW)
+{
+ TensorShape tensorShape;
+ switch (dataLayout)
+ {
+ case DataLayout::NHWC:
+ tensorShape = { 2, 4, 4, 3 };
+ break;
+ case DataLayout::NCHW:
+ default:
+ tensorShape = { 2, 3, 4, 4 };
+ }
+
+ // Creates the layer we're testing.
+ BatchNormalizationDescriptor layerDesc;
+ layerDesc.m_Eps = 0.05f;
+ layerDesc.m_DataLayout = dataLayout;
+
+ BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
+
+ armnn::TensorInfo weightInfo({3}, DataType);
+ layer->m_Mean = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Variance = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Beta = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Gamma = std::make_unique<ScopedTensorHandle>(weightInfo);
+ layer->m_Mean->Allocate();
+ layer->m_Variance->Allocate();
+ layer->m_Beta->Allocate();
+ layer->m_Gamma->Allocate();
+
+ auto activationDesc = std::make_shared<ActivationDescriptor>();
+ activationDesc->m_A = 10.0f;
+ activationDesc->m_B = 5.0f;
+ activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
+
+ layer->SetAdditionalInfoForObject(activationDesc);
+
+ // Check that the additional information can be queried from the layer
+ std::shared_ptr<ActivationDescriptor> activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
+ static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ armnn::TensorInfo tensorInfo(tensorShape, DataType);
+ Connect(input, layer, tensorInfo);
+ Connect(layer, output, tensorInfo);
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<BatchNormalizationWorkloadType>(*layer, factory);
+ BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
+ const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
+ IgnoreUnused(queueDescBlobPtr);
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
+ static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ CHECK(queueDescriptor.m_Parameters.m_Eps == 0.05f);
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ CHECK((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType)));
+ CHECK((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType)));
+ CHECK((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType)));
+ CHECK((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType)));
+ CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template <typename Convolution2dWorkload, armnn::DataType DataType>
+std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph,
+ DataLayout dataLayout = DataLayout::NCHW,
+ const ModelOptions& modelOptions = {})
+{
+ // Creates the layer we're testing.
+ Convolution2dDescriptor layerDesc;
+ layerDesc.m_PadLeft = 3;
+ layerDesc.m_PadRight = 3;
+ layerDesc.m_PadTop = 1;
+ layerDesc.m_PadBottom = 1;
+ layerDesc.m_StrideX = 2;
+ layerDesc.m_StrideY = 4;
+ layerDesc.m_BiasEnabled = true;
+ layerDesc.m_DataLayout = dataLayout;
+
+ Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
+
+ TensorShape weightShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 5, 3} : TensorShape{2, 5, 3, 3};
+ TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3};
+ TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2};
+
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
+
+ layer->m_Weight->Allocate();
+ layer->m_Bias->Allocate();
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ Connect(input, layer, TensorInfo(inputShape, DataType));
+ Connect(layer, output, TensorInfo(outputShape, DataType));
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory, modelOptions);
+
+ Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Parameters.m_StrideX == 2);
+ CHECK(queueDescriptor.m_Parameters.m_StrideY == 4);
+ CHECK(queueDescriptor.m_Parameters.m_PadLeft == 3);
+ CHECK(queueDescriptor.m_Parameters.m_PadRight == 3);
+ CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1);
+ CHECK(queueDescriptor.m_Parameters.m_BiasEnabled);
+ CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
+ CHECK((queueDescriptor.m_Bias->GetTensorInfo() ==
+ TensorInfo({2}, GetBiasDataType(DataType))));
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template<typename Convolution2dWorkload, armnn::DataType DataType>
+std::unique_ptr<Convolution2dWorkload> CreateConvolution2dFusedActivationWithBlobWorkloadTest(
+ armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph,
+ DataLayout dataLayout = DataLayout::NCHW,
+ const ModelOptions& modelOptions = {})
+{
+ // Creates the layer we're testing.
+ Convolution2dDescriptor layerDesc;
+ layerDesc.m_PadLeft = 3;
+ layerDesc.m_PadRight = 3;
+ layerDesc.m_PadTop = 1;
+ layerDesc.m_PadBottom = 1;
+ layerDesc.m_StrideX = 2;
+ layerDesc.m_StrideY = 4;
+ layerDesc.m_BiasEnabled = true;
+ layerDesc.m_DataLayout = dataLayout;
+
+
+ Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
+
+ TensorShape weightShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 5, 3} : TensorShape{2, 5, 3, 3};
+ TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 8, 16} : TensorShape{2, 8, 16, 3};
+ TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 2, 2, 10} : TensorShape{2, 2, 10, 2};
+
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
+
+ layer->m_Weight->Allocate();
+ layer->m_Bias->Allocate();
+
+ auto activationDesc = std::make_shared<ActivationDescriptor>();
+ activationDesc->m_A = 10.0f;
+ activationDesc->m_B = 5.0f;
+ activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
+
+ layer->SetAdditionalInfoForObject(activationDesc);
+
+ // Check that the additional information can be queried from the layer
+ std::shared_ptr<ActivationDescriptor> activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
+
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
+ static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ Connect(input, layer, TensorInfo(inputShape, DataType));
+ Connect(layer, output, TensorInfo(outputShape, DataType));
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory, modelOptions);
+
+ Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
+ const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
+ IgnoreUnused(queueDescBlobPtr);
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
+ static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ CHECK(queueDescriptor.m_Parameters.m_StrideX == 2);
+ CHECK(queueDescriptor.m_Parameters.m_StrideY == 4);
+ CHECK(queueDescriptor.m_Parameters.m_PadLeft == 3);
+ CHECK(queueDescriptor.m_Parameters.m_PadRight == 3);
+ CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1);
+ CHECK(queueDescriptor.m_Parameters.m_BiasEnabled);
+ CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
+ CHECK((queueDescriptor.m_Bias->GetTensorInfo() ==
+ TensorInfo({2}, GetBiasDataType(DataType))));
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template <typename Convolution2dWorkload, armnn::DataType DataType>
+std::unique_ptr<Convolution2dWorkload> CreateConvolution2dWorkloadFastMathTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph,
+ DataLayout dataLayout = DataLayout::NCHW,
+ const ModelOptions& modelOptions = {})
+{
+ // Creates the layer we're testing.
+ Convolution2dDescriptor layerDesc;
+ layerDesc.m_PadLeft = 0;
+ layerDesc.m_PadRight = 0;
+ layerDesc.m_PadTop = 0;
+ layerDesc.m_PadBottom = 0;
+ layerDesc.m_StrideX = 1;
+ layerDesc.m_StrideY = 1;
+ layerDesc.m_BiasEnabled = false;
+ layerDesc.m_DataLayout = dataLayout;
+
+ Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
+
+ TensorShape weightShape = TensorShape{32, 32, 3, 3};
+ TensorShape inputShape = TensorShape{1, 32, 149, 149};
+ TensorShape outputShape = TensorShape{1, 32, 147, 147};
+
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo(weightShape, DataType));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({2}, GetBiasDataType(DataType)));
+
+ layer->m_Weight->Allocate();
+ layer->m_Bias->Allocate();
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ Connect(input, layer, TensorInfo(inputShape, DataType));
+ Connect(layer, output, TensorInfo(outputShape, DataType));
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory, modelOptions);
+
+ Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Parameters.m_StrideX == 1);
+ CHECK(queueDescriptor.m_Parameters.m_StrideY == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadLeft == 0);
+ CHECK(queueDescriptor.m_Parameters.m_PadRight == 0);
+ CHECK(queueDescriptor.m_Parameters.m_PadTop == 0);
+ CHECK(queueDescriptor.m_Parameters.m_PadBottom == 0);
+ CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template <typename LstmWorkload>
+std::unique_ptr<LstmWorkload> CreateLstmWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph)
+{
+ // This parameter setting is for withCifgWithPeepholeNoProjection
+ LstmDescriptor layerDesc;
+ layerDesc.m_ActivationFunc = 4;
+ layerDesc.m_ClippingThresCell = 0.0f;
+ layerDesc.m_ClippingThresProj = 0.0f;
+ layerDesc.m_CifgEnabled = true;
+ layerDesc.m_PeepholeEnabled = true;
+ layerDesc.m_ProjectionEnabled = false;
+
+ LstmLayer* const layer = graph.AddLayer<LstmLayer>(layerDesc, "layer");
+ unsigned int batchSize = 2;
+ unsigned int inputSize = 2;
+ unsigned int numUnits = 4;
+ unsigned int outputSize = 4;
+
+ layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>
+ (TensorInfo({ numUnits, inputSize }, DataType::Float32));
+ layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>
+ (TensorInfo({ numUnits, inputSize }, DataType::Float32));
+ layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>
+ (TensorInfo({ numUnits, inputSize }, DataType::Float32));
+ layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<ScopedTensorHandle>
+ (TensorInfo({ numUnits, outputSize }, DataType::Float32));
+ layer->m_BasicParameters.m_RecurrentToCellWeights = std::make_unique<ScopedTensorHandle>
+ (TensorInfo({ numUnits, outputSize }, DataType::Float32));
+ layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<ScopedTensorHandle>
+ (TensorInfo({ numUnits, outputSize }, DataType::Float32));
+ layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>
+ (TensorInfo({ numUnits }, DataType::Float32));
+ layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>
+ (TensorInfo({ numUnits }, DataType::Float32));
+ layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>
+ (TensorInfo({ numUnits }, DataType::Float32));
+
+ layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
+ layer->m_BasicParameters.m_InputToCellWeights->Allocate();
+ layer->m_BasicParameters.m_InputToOutputWeights->Allocate();
+ layer->m_BasicParameters.m_RecurrentToForgetWeights->Allocate();
+ layer->m_BasicParameters.m_RecurrentToCellWeights->Allocate();
+ layer->m_BasicParameters.m_RecurrentToOutputWeights->Allocate();
+ layer->m_BasicParameters.m_ForgetGateBias->Allocate();
+ layer->m_BasicParameters.m_CellBias->Allocate();
+ layer->m_BasicParameters.m_OutputGateBias->Allocate();
+
+
+ if (layerDesc.m_PeepholeEnabled)
+ {
+ layer->m_PeepholeParameters.m_CellToForgetWeights = std::make_unique<ScopedTensorHandle>
+ (TensorInfo({ numUnits }, DataType::Float32));
+ layer->m_PeepholeParameters.m_CellToOutputWeights = std::make_unique<ScopedTensorHandle>
+ (TensorInfo({ numUnits }, DataType::Float32));
+ layer->m_PeepholeParameters.m_CellToForgetWeights->Allocate();
+ layer->m_PeepholeParameters.m_CellToOutputWeights->Allocate();
+ }
+
+ // create input and output layers
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const outputStateIn = graph.AddLayer<InputLayer>(1, "outputStateIn");
+ Layer* const cellStateIn = graph.AddLayer<InputLayer>(2, "cellStateIn");
+ Layer* const scratchBuffer = graph.AddLayer<OutputLayer>(0, "scratchBuffer");
+ Layer* const outputStateOut = graph.AddLayer<OutputLayer>(1, "outputStateOut");
+ Layer* const cellStateOut = graph.AddLayer<OutputLayer>(2, "cellStateOut");
+ Layer* const output = graph.AddLayer<OutputLayer>(3, "output");
+
+ // connect up
+ armnn::TensorInfo lstmTensorInfo1({ batchSize, inputSize }, DataType::Float32);
+ armnn::TensorInfo lstmTensorInfo2({ batchSize, numUnits}, DataType::Float32);
+ armnn::TensorInfo lstmTensorInfo3({ batchSize, outputSize }, DataType::Float32);
+ armnn::TensorInfo lstmTensorInfoScratchBuff({ batchSize, numUnits * (layerDesc.m_CifgEnabled ? 3 : 4) },
+ DataType::Float32);
+ Connect(input, layer, lstmTensorInfo1, 0, 0);
+ Connect(cellStateIn, layer, lstmTensorInfo2, 0, 1);
+ Connect(outputStateIn, layer, lstmTensorInfo3, 0, 2);
+ Connect(layer, scratchBuffer, lstmTensorInfoScratchBuff, 0, 0);
+ Connect(layer, outputStateOut, lstmTensorInfo3, 1, 0);
+ Connect(layer, cellStateOut, lstmTensorInfo2, 2, 0);
+ Connect(layer, output, lstmTensorInfo3, 3, 0);
+
+ CreateTensorHandles(graph, factory);
+
+ // make the workload and check it
+ auto workload = MakeAndCheckWorkload<LstmWorkload>(*layer, factory);
+ LstmQueueDescriptor queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Parameters.m_ActivationFunc == 4);
+ CHECK(queueDescriptor.m_Parameters.m_ClippingThresCell == 0.0f);
+ CHECK(queueDescriptor.m_Parameters.m_ClippingThresProj == 0.0f);
+ CHECK(queueDescriptor.m_Inputs.size() == 3);
+ CHECK(queueDescriptor.m_Outputs.size() == 4);
+
+ CHECK((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == TensorInfo({ numUnits, inputSize },
+ DataType::Float32)));
+ CHECK((queueDescriptor.m_OutputGateBias->GetTensorInfo() == TensorInfo({ numUnits },
+ DataType::Float32)));
+ CHECK((queueDescriptor.m_CellBias->GetTensorInfo() == TensorInfo({ numUnits }, DataType::Float32)));
+ return workload;
+}
+
+template <typename QuantizedLstmWorkload>
+std::unique_ptr<QuantizedLstmWorkload> CreateQuantizedLstmWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph)
+{
+ auto layer = graph.AddLayer<QuantizedLstmLayer>("quantizedLstmlayer");
+ unsigned int numBatches = 2;
+ unsigned int inputSize = 2;
+ unsigned int outputSize = 4;
+
+ // Scale/Offset for input/output, cellState In/Out, weights, bias
+ float inputOutputScale = 0.0078125f;
+ int32_t inputOutputOffset = 128;
+
+ float cellStateScale = 0.00048828125f;
+ int32_t cellStateOffset = 0;
+
+ float weightsScale = 0.00408021f;
+ int32_t weightsOffset = 100;
+
+ float biasScale = 3.1876640625e-05f;
+ int32_t biasOffset = 0;
+
+ // Weights and bias tensor and quantization info
+ armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
+ armnn::DataType::QAsymmU8,
+ weightsScale,
+ weightsOffset);
+
+ armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
+ armnn::DataType::QAsymmU8,
+ weightsScale,
+ weightsOffset);
+
+ armnn::TensorInfo biasInfo({outputSize},
+ armnn::DataType::Signed32,
+ biasScale,
+ biasOffset);
+
+ // Weights and bias
+ layer->m_QuantizedLstmParameters.m_InputToInputWeights =
+ std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
+ layer->m_QuantizedLstmParameters.m_InputToForgetWeights =
+ std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
+ layer->m_QuantizedLstmParameters.m_InputToCellWeights =
+ std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
+ layer->m_QuantizedLstmParameters.m_InputToOutputWeights =
+ std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
+
+ layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights =
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
+ layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights =
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
+ layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights =
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
+ layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights =
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
+
+ layer->m_QuantizedLstmParameters.m_InputGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+ layer->m_QuantizedLstmParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+ layer->m_QuantizedLstmParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+ layer->m_QuantizedLstmParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+
+ // Allocate weights and bias
+ layer->m_QuantizedLstmParameters.m_InputToInputWeights->Allocate();
+ layer->m_QuantizedLstmParameters.m_InputToForgetWeights->Allocate();
+ layer->m_QuantizedLstmParameters.m_InputToCellWeights->Allocate();
+ layer->m_QuantizedLstmParameters.m_InputToOutputWeights->Allocate();
+
+ layer->m_QuantizedLstmParameters.m_RecurrentToInputWeights->Allocate();
+ layer->m_QuantizedLstmParameters.m_RecurrentToForgetWeights->Allocate();
+ layer->m_QuantizedLstmParameters.m_RecurrentToCellWeights->Allocate();
+ layer->m_QuantizedLstmParameters.m_RecurrentToOutputWeights->Allocate();
+
+ layer->m_QuantizedLstmParameters.m_InputGateBias->Allocate();
+ layer->m_QuantizedLstmParameters.m_ForgetGateBias->Allocate();
+ layer->m_QuantizedLstmParameters.m_CellBias->Allocate();
+ layer->m_QuantizedLstmParameters.m_OutputGateBias->Allocate();
+
+ // Create input and output layers
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const cellStateIn = graph.AddLayer<InputLayer>(1, "cellStateIn");
+ Layer* const outputStateIn = graph.AddLayer<InputLayer>(2, "outputStateIn");
+
+ Layer* const cellStateOut = graph.AddLayer<OutputLayer>(0, "cellStateOut");
+ Layer* const outputStateOut = graph.AddLayer<OutputLayer>(1, "outputStateOut");
+
+ // Input/output tensor info and quantization info
+ armnn::TensorInfo inputInfo({numBatches , inputSize},
+ armnn::DataType::QAsymmU8,
+ inputOutputScale,
+ inputOutputOffset);
+
+ armnn::TensorInfo cellStateInfo({numBatches , outputSize},
+ armnn::DataType::QSymmS16,
+ cellStateScale,
+ cellStateOffset);
+
+ armnn::TensorInfo outputStateInfo({numBatches , outputSize},
+ armnn::DataType::QAsymmU8,
+ inputOutputScale,
+ inputOutputOffset);
+
+ // Connect input/output slots
+ Connect(input, layer, inputInfo, 0, 0);
+ Connect(cellStateIn, layer, cellStateInfo, 0, 1);
+ Connect(outputStateIn, layer, outputStateInfo, 0, 2);
+
+ Connect(layer, cellStateOut, cellStateInfo, 0, 0);
+ Connect(layer, outputStateOut, outputStateInfo, 1, 0);
+
+ CreateTensorHandles(graph, factory);
+
+ // Create workload and check layer support
+ auto workload = MakeAndCheckWorkload<QuantizedLstmWorkload>(*layer, factory);
+ QuantizedLstmQueueDescriptor queueDescriptor = workload->GetData();
+
+ // Validate input/output sizes
+ CHECK(queueDescriptor.m_Inputs.size() == 3);
+ CHECK(queueDescriptor.m_Outputs.size() == 2);
+
+ // Validate weight tensor info
+ CHECK((queueDescriptor.m_InputToInputWeights->GetTensorInfo() == inputWeightsInfo));
+ CHECK((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == inputWeightsInfo));
+ CHECK((queueDescriptor.m_InputToCellWeights->GetTensorInfo() == inputWeightsInfo));
+ CHECK((queueDescriptor.m_InputToOutputWeights->GetTensorInfo() == inputWeightsInfo));
+
+ CHECK((queueDescriptor.m_RecurrentToInputWeights->GetTensorInfo() == recurrentWeightsInfo));
+ CHECK((queueDescriptor.m_RecurrentToForgetWeights->GetTensorInfo() == recurrentWeightsInfo));
+ CHECK((queueDescriptor.m_RecurrentToCellWeights->GetTensorInfo() == recurrentWeightsInfo));
+ CHECK((queueDescriptor.m_RecurrentToOutputWeights->GetTensorInfo() == recurrentWeightsInfo));
+
+ CHECK((queueDescriptor.m_InputGateBias->GetTensorInfo() == biasInfo));
+ CHECK((queueDescriptor.m_ForgetGateBias->GetTensorInfo() == biasInfo));
+ CHECK((queueDescriptor.m_CellBias->GetTensorInfo() == biasInfo));
+ CHECK((queueDescriptor.m_OutputGateBias->GetTensorInfo() == biasInfo));
+
+ return workload;
+}
+
+template <typename QLstmWorkload>
+std::unique_ptr<QLstmWorkload> CreateQLstmWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph)
+{
+ QLstmDescriptor layerDesc;
+ layerDesc.m_CifgEnabled = true;
+ layerDesc.m_PeepholeEnabled = false;
+ layerDesc.m_ProjectionEnabled = false;
+ layerDesc.m_LayerNormEnabled = true;
+
+ layerDesc.m_CellClip = 0.0f;
+ layerDesc.m_ProjectionClip = 0.0f;
+
+ layerDesc.m_HiddenStateZeroPoint = 0;
+ layerDesc.m_HiddenStateScale = 0.007f;
+
+ layerDesc.m_InputIntermediateScale = 0.007059f;
+ layerDesc.m_ForgetIntermediateScale = 0.007812f;
+ layerDesc.m_CellIntermediateScale = 0.007059f;
+ layerDesc.m_OutputIntermediateScale = 0.007812f;
+
+ QLstmLayer* const layer = graph.AddLayer<QLstmLayer>(layerDesc, "qLstm");
+
+ unsigned int numBatches = 2;
+ unsigned int inputSize = 4;
+ unsigned int numUnits = 4;
+ unsigned int outputSize = 4;
+
+ // Scale/Offset quantization info
+ float inputScale = 0.0078125f;
+ int32_t inputOffset = 0;
+
+ // if (!projectionEnabled) outputScale == hiddenStateScale
+ float outputScale = layerDesc.m_HiddenStateScale;
+ int32_t outputOffset = layerDesc.m_HiddenStateZeroPoint;
+
+ float cellStateScale = 3.05176e-05f;
+ int32_t cellStateOffset = 0;
+
+ float weightsScale = 0.00784314f;
+ int32_t weightsOffset = 0;
+
+ float layerNormScale = 3.05182e-05f;
+ int32_t layerNormOffset = 0;
+
+ float biasScale = layerNormScale / 1024;
+ int32_t biasOffset = 0;
+
+ // Weights and bias tensor and quantization info
+ armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
+ armnn::DataType::QSymmS8,
+ weightsScale,
+ weightsOffset);
+
+ armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
+ armnn::DataType::QSymmS8,
+ weightsScale,
+ weightsOffset);
+
+ armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset);
+
+ armnn::TensorInfo layerNormWeightsInfo({numUnits}, armnn::DataType::QSymmS16, layerNormScale, layerNormOffset);
+
+ // Create and allocate tensors
+ layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
+ layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
+ layer->m_BasicParameters.m_InputToOutputWeights = std::make_unique<ScopedTensorHandle>(inputWeightsInfo);
+
+ layer->m_BasicParameters.m_RecurrentToForgetWeights =
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
+ layer->m_BasicParameters.m_RecurrentToCellWeights =
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
+ layer->m_BasicParameters.m_RecurrentToOutputWeights =
+ std::make_unique<ScopedTensorHandle>(recurrentWeightsInfo);
+
+ layer->m_BasicParameters.m_ForgetGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+ layer->m_BasicParameters.m_CellBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+ layer->m_BasicParameters.m_OutputGateBias = std::make_unique<ScopedTensorHandle>(biasInfo);
+
+ layer->m_LayerNormParameters.m_ForgetLayerNormWeights =
+ std::make_unique<ScopedTensorHandle>(layerNormWeightsInfo);
+ layer->m_LayerNormParameters.m_CellLayerNormWeights =
+ std::make_unique<ScopedTensorHandle>(layerNormWeightsInfo);
+ layer->m_LayerNormParameters.m_OutputLayerNormWeights =
+ std::make_unique<ScopedTensorHandle>(layerNormWeightsInfo);
+
+ layer->m_BasicParameters.m_InputToForgetWeights->Allocate();
+ layer->m_BasicParameters.m_InputToCellWeights->Allocate();
+ layer->m_BasicParameters.m_InputToOutputWeights->Allocate();
+
+ layer->m_BasicParameters.m_RecurrentToForgetWeights->Allocate();
+ layer->m_BasicParameters.m_RecurrentToCellWeights->Allocate();
+ layer->m_BasicParameters.m_RecurrentToOutputWeights->Allocate();
+
+ layer->m_BasicParameters.m_ForgetGateBias->Allocate();
+ layer->m_BasicParameters.m_CellBias->Allocate();
+ layer->m_BasicParameters.m_OutputGateBias->Allocate();
+
+ layer->m_LayerNormParameters.m_ForgetLayerNormWeights->Allocate();
+ layer->m_LayerNormParameters.m_CellLayerNormWeights->Allocate();
+ layer->m_LayerNormParameters.m_OutputLayerNormWeights->Allocate();
+
+ // Input and output layers
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const outputStateIn = graph.AddLayer<InputLayer>(1, "outputStateIn");
+ Layer* const cellStateIn = graph.AddLayer<InputLayer>(2, "cellStateIn");
+
+ Layer* const outputStateOut = graph.AddLayer<OutputLayer>(0, "outputStateOut");
+ Layer* const cellStateOut = graph.AddLayer<OutputLayer>(1, "cellStateOut");
+ Layer* const output = graph.AddLayer<OutputLayer>(2, "output");
+
+ // Input/Output tensor info
+ armnn::TensorInfo inputInfo({numBatches , inputSize},
+ armnn::DataType::QAsymmS8,
+ inputScale,
+ inputOffset);
+
+ armnn::TensorInfo cellStateInfo({numBatches , numUnits},
+ armnn::DataType::QSymmS16,
+ cellStateScale,
+ cellStateOffset);
+
+ armnn::TensorInfo outputStateInfo({numBatches , outputSize},
+ armnn::DataType::QAsymmS8,
+ outputScale,
+ outputOffset);
+
+ // Connect layers to slots
+ Connect(input, layer, inputInfo, 0, 0);
+ Connect(outputStateIn, layer, outputStateInfo, 0, 1);
+ Connect(cellStateIn, layer, cellStateInfo, 0, 2);
+
+ Connect(layer, outputStateOut, outputStateInfo, 0, 0);
+ Connect(layer, cellStateOut, cellStateInfo, 1, 0);
+ Connect(layer, output, outputStateInfo, 2, 0);
+
+ CreateTensorHandles(graph, factory);
+
+ // Create and check workload
+ auto workload = MakeAndCheckWorkload<QLstmWorkload>(*layer, factory);
+ QLstmQueueDescriptor queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Parameters.m_CellClip == 0.0f);
+ CHECK(queueDescriptor.m_Parameters.m_ProjectionClip == 0.0f);
+ CHECK(queueDescriptor.m_Inputs.size() == 3);
+ CHECK(queueDescriptor.m_Outputs.size() == 3);
+
+ CHECK((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == inputWeightsInfo));
+ CHECK((queueDescriptor.m_InputToCellWeights->GetTensorInfo() == inputWeightsInfo));
+ CHECK((queueDescriptor.m_InputToOutputWeights->GetTensorInfo() == inputWeightsInfo));
+
+ CHECK((queueDescriptor.m_RecurrentToForgetWeights->GetTensorInfo() == recurrentWeightsInfo));
+ CHECK((queueDescriptor.m_RecurrentToCellWeights->GetTensorInfo() == recurrentWeightsInfo));
+ CHECK((queueDescriptor.m_RecurrentToOutputWeights->GetTensorInfo() == recurrentWeightsInfo));
+
+ CHECK((queueDescriptor.m_ForgetGateBias->GetTensorInfo() == biasInfo));
+ CHECK((queueDescriptor.m_CellBias->GetTensorInfo() == biasInfo));
+ CHECK((queueDescriptor.m_OutputGateBias->GetTensorInfo() == biasInfo));
+
+ return workload;
+}
+
+template <typename Convolution2dWorkload, armnn::DataType DataType>
+std::unique_ptr<Convolution2dWorkload> CreateDirectConvolution2dWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph)
+{
+ // Creates the layer we're testing.
+ Convolution2dDescriptor layerDesc;
+ layerDesc.m_PadLeft = 1;
+ layerDesc.m_PadRight = 1;
+ layerDesc.m_PadTop = 1;
+ layerDesc.m_PadBottom = 1;
+ layerDesc.m_StrideX = 1;
+ layerDesc.m_StrideY = 1;
+ layerDesc.m_BiasEnabled = true;
+
+ Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
+
+ float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
+
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({ 2, 3, 3, 3 }, DataType, inputsQScale));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>
+ (TensorInfo({2}, GetBiasDataType(DataType), inputsQScale));
+ layer->m_Weight->Allocate();
+ layer->m_Bias->Allocate();
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ Connect(input, layer, TensorInfo({2, 3, 6, 6}, DataType, inputsQScale));
+ Connect(layer, output, TensorInfo({2, 2, 6, 6}, DataType, outputQScale));
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory);
+
+ Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Parameters.m_StrideX == 1);
+ CHECK(queueDescriptor.m_Parameters.m_StrideY == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadLeft == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadRight == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1);
+ CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true);
+
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({2, 3, 3, 3},
+ DataType, inputsQScale)));
+ CHECK((queueDescriptor.m_Bias->GetTensorInfo()
+ == TensorInfo({2}, GetBiasDataType(DataType), inputsQScale)));
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template <typename DepthwiseConvolution2dFloat32Workload, armnn::DataType DataType>
+std::unique_ptr<DepthwiseConvolution2dFloat32Workload> CreateDepthwiseConvolution2dWorkloadTest(
+ armnn::IWorkloadFactory& factory, armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW)
+{
+ // Creates the layer we're testing.
+ DepthwiseConvolution2dDescriptor layerDesc;
+ layerDesc.m_PadLeft = 1;
+ layerDesc.m_PadRight = 2;
+ layerDesc.m_PadTop = 1;
+ layerDesc.m_PadBottom = 2;
+ layerDesc.m_StrideX = 1;
+ layerDesc.m_StrideY = 1;
+ layerDesc.m_BiasEnabled = false;
+ layerDesc.m_DataLayout = dataLayout;
+
+ DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer");
+
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({1, 4, 4, 2}, DataType)); // [ 1, H, W, I*M ]
+ layer->m_Weight->Allocate();
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ TensorShape inputShape = (dataLayout == DataLayout::NCHW) ?
+ TensorShape{ 2, 2, 5, 5 } : TensorShape{ 2, 5, 5, 2 };
+ TensorShape outputShape = (dataLayout == DataLayout::NCHW) ?
+ TensorShape{ 2, 2, 5, 5 } : TensorShape{ 2, 5, 5, 2 };
+
+ // Connects up.
+ Connect(input, layer, TensorInfo(inputShape, DataType));
+ Connect(layer, output, TensorInfo(outputShape, DataType));
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<DepthwiseConvolution2dFloat32Workload>(*layer, factory);
+
+ DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Parameters.m_StrideX == 1);
+ CHECK(queueDescriptor.m_Parameters.m_StrideY == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadLeft == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadRight == 2);
+ CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadBottom == 2);
+ CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == false);
+ CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({1, 4, 4, 2}, DataType)));
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template <typename FullyConnectedWorkload, armnn::DataType DataType>
+std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph)
+{
+ // Creates the layer we're testing.
+ FullyConnectedDescriptor layerDesc;
+ layerDesc.m_BiasEnabled = false;
+ layerDesc.m_TransposeWeightMatrix = true;
+
+ FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
+
+ float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
+
+ // As optimization isn't run member variables need to be updated.
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
+ layer->m_Weight->Allocate();
+
+ armnn::TensorInfo weightsTensorInfo({7, 20}, DataType, inputsQScale);
+ weightsTensorInfo.SetConstant();
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ auto const weights = graph.AddLayer<ConstantLayer>("weights");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ weights->m_LayerOutput = std::make_unique<ScopedTensorHandle>(weightsTensorInfo);
+ weights->m_LayerOutput->Allocate();
+
+ // Connects up.
+ Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale), 0, 0);
+ Connect(weights, layer, weightsTensorInfo, 0, 1);
+ Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale));
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<FullyConnectedWorkload>(*layer, factory);
+
+ FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
+
+ CHECK(queueDescriptor.m_Inputs.size() == 2);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template <typename FullyConnectedWorkload, armnn::DataType DataType>
+std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWithBlobWorkloadTest
+ (armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph)
+{
+ // Creates the layer we're testing.
+ FullyConnectedDescriptor layerDesc;
+ layerDesc.m_BiasEnabled = true;
+ layerDesc.m_TransposeWeightMatrix = true;
+
+ FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
+
+ float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
+
+ // As optimization isn't run member variables need to be updated.
+ layer->m_Weight = std::make_unique<ScopedTensorHandle>(TensorInfo({7, 20}, DataType, inputsQScale, 0));
+ layer->m_Bias = std::make_unique<ScopedTensorHandle>(TensorInfo({7}, GetBiasDataType(DataType), inputsQScale));
+ layer->m_Weight->Allocate();
+ layer->m_Bias->Allocate();
+
+ armnn::TensorInfo weightsTensorInfo({7, 20}, DataType, inputsQScale);
+ armnn::TensorInfo biasesTensorInfo({7}, GetBiasDataType(DataType), inputsQScale);
+ weightsTensorInfo.SetConstant();
+ biasesTensorInfo.SetConstant();
+
+ auto activationDesc = std::make_shared<ActivationDescriptor>();
+ activationDesc->m_A = 10.0f;
+ activationDesc->m_B = 5.0f;
+ activationDesc->m_Function = armnn::ActivationFunction::BoundedReLu;
+
+ layer->SetAdditionalInfoForObject(activationDesc);
+
+ // Check that the additional information can be queried from the layer
+ std::shared_ptr<ActivationDescriptor> activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(static_cast<ActivationFunction>(activationDescPtr->m_Function) ==
+ armnn::ActivationFunction::BoundedReLu);
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ auto const weights = graph.AddLayer<ConstantLayer>("weights");
+ auto const biases = graph.AddLayer<ConstantLayer>("biases");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ weights->m_LayerOutput = std::make_unique<ScopedTensorHandle>(weightsTensorInfo);
+ weights->m_LayerOutput->Allocate();
+ biases->m_LayerOutput = std::make_unique<ScopedTensorHandle>(biasesTensorInfo);
+ biases->m_LayerOutput->Allocate();
+
+ // Connects up.
+ Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale), 0, 0);
+ Connect(weights, layer, weightsTensorInfo, 0, 1);
+ Connect(biases, layer, biasesTensorInfo, 0, 2);
+ Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale));
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<FullyConnectedWorkload>(*layer, factory);
+
+ FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
+
+ const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
+ IgnoreUnused(queueDescBlobPtr);
+
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+ ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+ ARMNN_ASSERT(
+ static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
+ );
+
+ CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true);
+ CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
+ CHECK(queueDescriptor.m_Inputs.size() == 3);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template <typename FullyConnectedWorkload, armnn::DataType DataType>
+std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadWeightsBiasesAsInputsTest
+ (armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph)
+{
+ // Creates the layer we're testing.
+ FullyConnectedDescriptor layerDesc;
+ layerDesc.m_BiasEnabled = true;
+ layerDesc.m_TransposeWeightMatrix = true;
+ layerDesc.m_ConstantWeights = false;
+
+ FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
+
+ float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
+ float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
+
+ // Creates extra layers with weights and biases as input layers.
+ Layer* const input = graph.AddLayer<InputLayer>(1, "input");
+ Layer* const weights = graph.AddLayer<InputLayer>(2, "weights");
+ Layer* const biases = graph.AddLayer<InputLayer>(3, "biases");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale), 0, 0);
+ Connect(weights, layer, TensorInfo({7, 20}, DataType, inputsQScale), 0, 1);
+ Connect(biases, layer, TensorInfo({7}, GetBiasDataType(DataType), inputsQScale), 0, 2);
+ Connect(layer, output, TensorInfo({3, 7}, DataType, outputQScale));
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<FullyConnectedWorkload>(*layer, factory);
+
+ FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
+
+ CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true);
+ CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
+ CHECK(queueDescriptor.m_Parameters.m_ConstantWeights == false);
+ CHECK(queueDescriptor.m_Inputs.size() == 3);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+
+template <typename NormalizationWorkload, armnn::DataType DataType>
+std::unique_ptr<NormalizationWorkload> CreateNormalizationWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph,
+ DataLayout dataLayout = DataLayout::NCHW)
+{
+ // Creates the layer we're testing.
+ NormalizationDescriptor layerDesc;
+ layerDesc.m_NormChannelType = NormalizationAlgorithmChannel::Across;
+ layerDesc.m_NormMethodType = NormalizationAlgorithmMethod::LocalBrightness;
+ layerDesc.m_NormSize = 3;
+ layerDesc.m_Alpha = 0.5f;
+ layerDesc.m_Beta = -1.0f;
+ layerDesc.m_K = 0.2f;
+ layerDesc.m_DataLayout = dataLayout;
+
+ NormalizationLayer* layer = graph.AddLayer<NormalizationLayer>(layerDesc, "layer");
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ TensorShape inputShape = (dataLayout == DataLayout::NCHW) ?
+ TensorShape{ 3, 5, 5, 1 } : TensorShape{ 3, 1, 5, 5 };
+ TensorShape outputShape = (dataLayout == DataLayout::NCHW) ?
+ TensorShape{ 3, 5, 5, 1 } : TensorShape{ 3, 1, 5, 5 };
+
+ // Connects up.
+ armnn::TensorInfo inputTensorInfo(inputShape, DataType);
+ armnn::TensorInfo outputTensorInfo(outputShape, DataType);
+ Connect(input, layer, inputTensorInfo);
+ Connect(layer, output, outputTensorInfo);
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<NormalizationWorkload>(*layer, factory);
+
+ NormalizationQueueDescriptor queueDescriptor = workload->GetData();
+ CHECK((queueDescriptor.m_Parameters.m_NormChannelType == NormalizationAlgorithmChannel::Across));
+ CHECK((queueDescriptor.m_Parameters.m_NormMethodType == NormalizationAlgorithmMethod::LocalBrightness));
+ CHECK(queueDescriptor.m_Parameters.m_NormSize == 3);
+ CHECK(queueDescriptor.m_Parameters.m_Alpha == 0.5f);
+ CHECK(queueDescriptor.m_Parameters.m_Beta == -1.0f);
+ CHECK(queueDescriptor.m_Parameters.m_K == 0.2f);
+ CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template <typename Pooling2dWorkload, armnn::DataType DataType>
+std::unique_ptr<Pooling2dWorkload> CreatePooling2dWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph,
+ DataLayout dataLayout = DataLayout::NCHW)
+{
+ // Creates the layer we're testing.
+ Pooling2dDescriptor layerDesc;
+ layerDesc.m_PoolType = PoolingAlgorithm::Average;
+ layerDesc.m_PoolWidth = 3;
+ layerDesc.m_PoolHeight = 3;
+ layerDesc.m_PadLeft = 2;
+ layerDesc.m_PadRight = 2;
+ layerDesc.m_PadTop = 1;
+ layerDesc.m_PadBottom = 1;
+ layerDesc.m_StrideX = 2;
+ layerDesc.m_StrideY = 3;
+ layerDesc.m_OutputShapeRounding = OutputShapeRounding::Floor;
+ layerDesc.m_DataLayout = dataLayout;
+
+ Pooling2dLayer* const layer = graph.AddLayer<Pooling2dLayer>(layerDesc, "layer");
+
+ // Create extra layers
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ TensorShape inputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 5, 5} : TensorShape{3, 5, 5, 2};
+ TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 2, 2, 4} : TensorShape{3, 2, 4, 2};
+
+ // Connect up
+ Connect(input, layer, TensorInfo(inputShape, DataType));
+ Connect(layer, output, TensorInfo(outputShape, DataType));
+ CreateTensorHandles(graph, factory);
+
+ // Make the workload and checks it
+ auto workload = MakeAndCheckWorkload<Pooling2dWorkload>(*layer, factory);
+
+ Pooling2dQueueDescriptor queueDescriptor = workload->GetData();
+ CHECK((queueDescriptor.m_Parameters.m_PoolType == PoolingAlgorithm::Average));
+ CHECK((queueDescriptor.m_Parameters.m_OutputShapeRounding == OutputShapeRounding::Floor));
+ CHECK(queueDescriptor.m_Parameters.m_PoolWidth == 3);
+ CHECK(queueDescriptor.m_Parameters.m_PoolHeight == 3);
+ CHECK(queueDescriptor.m_Parameters.m_StrideX == 2);
+ CHECK(queueDescriptor.m_Parameters.m_StrideY == 3);
+ CHECK(queueDescriptor.m_Parameters.m_PadLeft == 2);
+ CHECK(queueDescriptor.m_Parameters.m_PadRight == 2);
+ CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
+ CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1);
+ CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+ // Return so we can do extra, backend-specific tests
+ return workload;
+}
+
+template <typename SoftmaxWorkload, armnn::DataType DataType>
+std::unique_ptr<SoftmaxWorkload> CreateSoftmaxWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph)
+{
+ // Create the layer we're testing.
+ SoftmaxDescriptor softmaxDescriptor;
+ // Set Axis to -1 if CL or Neon until further Axes are supported.
+ if (factory.GetBackendId() == armnn::Compute::CpuAcc || factory.GetBackendId() == armnn::Compute::GpuAcc)
+ {
+ softmaxDescriptor.m_Axis = -1;
+ }
+
+ Layer* const layer = graph.AddLayer<SoftmaxLayer>(softmaxDescriptor, "layer");
+ // Create extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connect up
+ armnn::TensorInfo tensorInfo({4, 1}, DataType);
+ if (DataType == armnn::DataType::QAsymmU8)
+ {
+ tensorInfo.SetQuantizationOffset(0);
+ tensorInfo.SetQuantizationScale(1.f / 256);
+ }
+ else if (DataType == armnn::DataType::QAsymmS8)
+ {
+ tensorInfo.SetQuantizationOffset(-128);
+ tensorInfo.SetQuantizationScale(1.f / 256);
+ }
+
+ Connect(input, layer, tensorInfo);
+ Connect(layer, output, tensorInfo);
+ CreateTensorHandles(graph, factory);
+
+ // Make the workload and checks it.
+ auto workload = MakeAndCheckWorkload<SoftmaxWorkload>(*layer, factory);
+
+ SoftmaxQueueDescriptor queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+ // Return so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template<typename SplitterWorkload, armnn::DataType DataType>
+std::unique_ptr<SplitterWorkload>
+ CreateSplitterWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph)
+{
+ // Create the layer we're testing.
+ // NOTE: need three dimensions channels, height/y, width/x because the Compute
+ // library restricts subtensors to have the same x and y dimensions as
+ // their parent tensors, and therefore the origin on the x and y dimension
+ // has to be zero for any view. So we need a third dimension to split...
+ // NOTE: arguments are: number of views, number of dimensions.
+ ViewsDescriptor layerDesc(3, 3);
+ // NOTE: arguments are: view, dimension, value.
+ layerDesc.SetViewOriginCoord(0, 0, 0);
+ layerDesc.SetViewOriginCoord(1, 0, 1);
+ layerDesc.SetViewOriginCoord(2, 0, 3);
+
+ Layer* const layer = graph.AddLayer<SplitterLayer>(layerDesc, "layer");
+
+ // Adds extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output0 = graph.AddLayer<OutputLayer>(0, "output0");
+ Layer* const output1 = graph.AddLayer<OutputLayer>(1, "output1");
+ Layer* const output2 = graph.AddLayer<OutputLayer>(2, "output2");
+
+ // Connects up.
+ armnn::TensorInfo tensorInfo({5, 7, 7}, DataType);
+ Connect(input, layer, tensorInfo);
+
+ armnn::TensorInfo output0Info({1, 7, 7}, DataType);
+ armnn::TensorInfo output1Info({2, 7, 7}, DataType);
+ armnn::TensorInfo output2Info({2, 7, 7}, DataType);
+
+ Connect(layer, output0, output0Info, 0, 0);
+ Connect(layer, output1, output1Info, 1, 0);
+ Connect(layer, output2, output2Info, 2, 0);
+
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<SplitterWorkload>(*layer, factory);
+
+ SplitterQueueDescriptor queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 3);
+ CHECK(queueDescriptor.m_ViewOrigins.size() == 3);
+
+ CHECK(queueDescriptor.m_ViewOrigins[0].m_Origin[0] == 0);
+ CHECK(queueDescriptor.m_ViewOrigins[1].m_Origin[0] == 1);
+ CHECK(queueDescriptor.m_ViewOrigins[2].m_Origin[0] == 3);
+ CHECK(queueDescriptor.m_ViewOrigins[0].m_Origin[1] == 0);
+ CHECK(queueDescriptor.m_ViewOrigins[1].m_Origin[1] == 0);
+ CHECK(queueDescriptor.m_ViewOrigins[2].m_Origin[1] == 0);
+ CHECK(queueDescriptor.m_ViewOrigins[0].m_Origin[2] == 0);
+ CHECK(queueDescriptor.m_ViewOrigins[1].m_Origin[2] == 0);
+ CHECK(queueDescriptor.m_ViewOrigins[2].m_Origin[2] == 0);
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+/// This function constructs a graph with both a splitter and a concat, and returns a pair of the workloads.
+template<typename SplitterWorkload, typename ConcatWorkload, armnn::DataType DataType>
+std::pair<std::unique_ptr<SplitterWorkload>, std::unique_ptr<ConcatWorkload>>
+ CreateSplitterConcatWorkloadTest(armnn::IWorkloadFactory &factory, armnn::Graph &graph)
+{
+ armnn::TensorInfo inputTensorInfo({ 1, 2, 100, 10 }, DataType);
+
+ armnn::TensorInfo splitTensorInfo1({ 1, 1, 100, 10 }, DataType);
+ armnn::TensorInfo splitTensorInfo2({ 1, 1, 100, 10 }, DataType);
+
+ //Constructs the graph.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+
+ armnn::ViewsDescriptor splitterViews(2);
+ splitterViews.SetViewOriginCoord(0, 0, 0);
+ splitterViews.SetViewOriginCoord(0, 1, 0);
+ splitterViews.SetViewOriginCoord(0, 2, 0);
+ splitterViews.SetViewOriginCoord(0, 3, 0);
+
+ splitterViews.SetViewOriginCoord(1, 0, 0);
+ splitterViews.SetViewOriginCoord(1, 1, 1);
+ splitterViews.SetViewOriginCoord(1, 2, 0);
+ splitterViews.SetViewOriginCoord(1, 3, 0);
+
+ // create splitter layer
+ Layer* const splitter = graph.AddLayer<SplitterLayer>(splitterViews, "splitter");
+ CHECK(splitter);
+
+ armnn::OriginsDescriptor concatViews(2);
+ concatViews.SetViewOriginCoord(0, 0, 0);
+ concatViews.SetViewOriginCoord(0, 1, 1);
+ concatViews.SetViewOriginCoord(0, 2, 0);
+ concatViews.SetViewOriginCoord(0, 3, 0);
+
+ concatViews.SetViewOriginCoord(1, 0, 0);
+ concatViews.SetViewOriginCoord(1, 1, 0);
+ concatViews.SetViewOriginCoord(1, 2, 0);
+ concatViews.SetViewOriginCoord(1, 3, 0);
+
+ // create concat layer
+ Layer* const concat = graph.AddLayer<ConcatLayer>(concatViews, "concat");
+ CHECK(concat);
+
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Adds connections.
+ // connect input to splitter
+ Connect(input, splitter, inputTensorInfo, 0, 0);
+ // connect splitter[0] to concat[1]
+ Connect(splitter, concat, splitTensorInfo1, 0, 1); // The splitter & concat are connected up.
+ // connect splitter[1] to concat[0]
+ Connect(splitter, concat, splitTensorInfo2, 1, 0); // So that the outputs are flipped round.
+ // connect concat to output
+ Connect(concat, output, inputTensorInfo, 0, 0);
+
+ // created tensor handles
+ CreateTensorHandles(graph, factory);
+
+ // created splitter workload
+ auto workloadSplitter = MakeAndCheckWorkload<SplitterWorkload>(*splitter, factory);
+ CHECK(workloadSplitter);
+ // created concat workload
+ auto workloadConcat = MakeAndCheckWorkload<ConcatWorkload>(*concat, factory);
+ CHECK(workloadConcat);
+
+ return {std::move(workloadSplitter), std::move(workloadConcat)};
+}
+
+
+/// This function constructs a graph with a splitter with two outputs. Each of the outputs is then
+/// connected to two different activation layers
+template<typename SplitterWorkload, typename ActivationWorkload, armnn::DataType DataType>
+void CreateSplitterMultipleInputsOneOutputWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph,
+ std::unique_ptr<SplitterWorkload>& wlSplitter,
+ std::unique_ptr<ActivationWorkload>& wlActiv0_0,
+ std::unique_ptr<ActivationWorkload>& wlActiv0_1,
+ std::unique_ptr<ActivationWorkload>& wlActiv1_0,
+ std::unique_ptr<ActivationWorkload>& wlActiv1_1)
+{
+ armnn::TensorInfo inputTensorInfo ({ 1, 3, 100, 50 }, DataType);
+ armnn::TensorInfo splitTensorInfo1({ 1, 1, 100, 50 }, DataType);
+ armnn::TensorInfo splitTensorInfo2({ 1, 2, 100, 50 }, DataType);
+
+ //Constructs the graph.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+
+ armnn::ViewsDescriptor splitterViews(2);
+
+ splitterViews.SetViewOriginCoord(0, 0, 0);
+ splitterViews.SetViewOriginCoord(0, 1, 0);
+ splitterViews.SetViewOriginCoord(0, 2, 0);
+ splitterViews.SetViewOriginCoord(0, 3, 0);
+
+ splitterViews.SetViewOriginCoord(1, 0, 0);
+ splitterViews.SetViewOriginCoord(1, 1, 1);
+ splitterViews.SetViewOriginCoord(1, 2, 0);
+ splitterViews.SetViewOriginCoord(1, 3, 0);
+
+ Layer* const splitter = graph.AddLayer<SplitterLayer>(splitterViews, "splitter");
+
+ armnn::ActivationDescriptor activationDesc;
+
+ Layer* const activ0_0 = graph.AddLayer<ActivationLayer>(activationDesc, "activ0_0");
+ Layer* const activ0_1 = graph.AddLayer<ActivationLayer>(activationDesc, "activ0_1");
+ Layer* const activ1_0 = graph.AddLayer<ActivationLayer>(activationDesc, "activ1_0");
+ Layer* const activ1_1 = graph.AddLayer<ActivationLayer>(activationDesc, "activ1_1");
+
+ Layer* const output1 = graph.AddLayer<OutputLayer>(1, "output1");
+ Layer* const output2 = graph.AddLayer<OutputLayer>(2, "output2");
+ Layer* const output3 = graph.AddLayer<OutputLayer>(3, "output3");
+ Layer* const output4 = graph.AddLayer<OutputLayer>(4, "output4");
+
+ // Adds connections.
+ Connect(input, splitter, inputTensorInfo, 0, 0);
+ Connect(splitter, activ0_0, splitTensorInfo1, 0, 0);
+ Connect(splitter, activ0_1, splitTensorInfo1, 0, 0);
+
+ Connect(splitter, activ1_0, splitTensorInfo2, 1, 0);
+ Connect(splitter, activ1_1, splitTensorInfo2, 1, 0);
+
+ Connect(activ0_0, output1, splitTensorInfo1, 0, 0);
+ Connect(activ0_1, output2, splitTensorInfo1, 0, 0);
+ Connect(activ1_0, output3, splitTensorInfo2, 0, 0);
+ Connect(activ1_1, output4, splitTensorInfo2, 0, 0);
+
+ CreateTensorHandles(graph, factory);
+
+ auto workloadSplitter = MakeAndCheckWorkload<SplitterWorkload>(*splitter, factory);
+ auto workloadActiv0_0 = MakeAndCheckWorkload<ActivationWorkload>(*activ0_0, factory);
+ auto workloadActiv0_1 = MakeAndCheckWorkload<ActivationWorkload>(*activ0_1, factory);
+ auto workloadActiv1_0 = MakeAndCheckWorkload<ActivationWorkload>(*activ1_0, factory);
+ auto workloadActiv1_1 = MakeAndCheckWorkload<ActivationWorkload>(*activ1_1, factory);
+
+ wlSplitter = std::move(workloadSplitter);
+ wlActiv0_0 = std::move(workloadActiv0_0);
+ wlActiv0_1 = std::move(workloadActiv0_1);
+ wlActiv1_0 = std::move(workloadActiv1_0);
+ wlActiv1_1 = std::move(workloadActiv1_1);
+}
+
+template <typename ResizeWorkload, armnn::DataType DataType>
+std::unique_ptr<ResizeWorkload> CreateResizeBilinearWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph,
+ DataLayout dataLayout = DataLayout::NCHW)
+{
+ TensorShape inputShape;
+ TensorShape outputShape;
+
+ switch (dataLayout) {
+ case DataLayout::NHWC:
+ inputShape = { 2, 4, 4, 3 };
+ outputShape = { 2, 2, 2, 3 };
+ break;
+ case DataLayout::NCHW:
+ default:
+ inputShape = { 2, 3, 4, 4 };
+ outputShape = { 2, 3, 2, 2 };
+ }
+
+ // Creates the layer we're testing.
+ ResizeDescriptor resizeDesc;
+ armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
+ resizeDesc.m_Method = ResizeMethod::Bilinear;
+ resizeDesc.m_TargetWidth = outputShape[dimensionIndices.GetWidthIndex()];
+ resizeDesc.m_TargetHeight = outputShape[dimensionIndices.GetHeightIndex()];
+ resizeDesc.m_DataLayout = dataLayout;
+ Layer* const layer = graph.AddLayer<ResizeLayer>(resizeDesc, "resize");
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ armnn::TensorInfo inputTensorInfo(inputShape, DataType);
+ armnn::TensorInfo outputTensorInfo(outputShape, DataType);
+ Connect(input, layer, inputTensorInfo);
+ Connect(layer, output, outputTensorInfo);
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<ResizeWorkload>(*layer, factory);
+
+ auto queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+ CHECK(queueDescriptor.m_Parameters.m_DataLayout == dataLayout);
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template <typename BatchToSpaceNdWorkload, armnn::DataType DataType>
+std::unique_ptr<BatchToSpaceNdWorkload> CreateBatchToSpaceNdWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph)
+{
+ BatchToSpaceNdDescriptor desc;
+ Layer* const layer = graph.AddLayer<BatchToSpaceNdLayer>(desc, "batchToSpace");
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ armnn::TensorInfo tensorInfo({1, 1, 1, 1}, DataType);
+
+ Connect(input, layer, tensorInfo);
+ Connect(layer, output, tensorInfo);
+
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<BatchToSpaceNdWorkload>(*layer, factory);
+
+ BatchToSpaceNdQueueDescriptor queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+ return workload;
+}
+
+template <typename LogSoftmaxWorkload, armnn::DataType DataType>
+std::unique_ptr<LogSoftmaxWorkload> CreateLogSoftmaxWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph)
+{
+ // Create the layer we're testing.
+ LogSoftmaxDescriptor logSoftmaxDescriptor;
+ // Set Axis to -1 if CL or Neon until further Axes are supported.
+ if (factory.GetBackendId() == armnn::Compute::CpuAcc || factory.GetBackendId() == armnn::Compute::GpuAcc)
+ {
+ logSoftmaxDescriptor.m_Axis = -1;
+ }
+
+ Layer* const layer = graph.AddLayer<LogSoftmaxLayer>(logSoftmaxDescriptor, "layer");
+ // Create extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connect up
+ armnn::TensorInfo tensorInfo({4, 1}, DataType);
+
+ Connect(input, layer, tensorInfo);
+ Connect(layer, output, tensorInfo);
+ CreateTensorHandles(graph, factory);
+
+ // Make the workload and checks it.
+ auto workload = MakeAndCheckWorkload<LogSoftmaxWorkload>(*layer, factory);
+
+ LogSoftmaxQueueDescriptor queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+ // Return so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template <typename L2NormalizationWorkload, armnn::DataType DataType>
+std::unique_ptr<L2NormalizationWorkload> CreateL2NormalizationWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph, DataLayout dataLayout = DataLayout::NCHW)
+{
+ // Creates the layer we're testing.
+ L2NormalizationDescriptor layerDesc;
+ layerDesc.m_DataLayout = dataLayout;
+
+ Layer* const layer = graph.AddLayer<L2NormalizationLayer>(layerDesc, "l2norm");
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ TensorShape inputShape = (dataLayout == DataLayout::NCHW) ?
+ TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 };
+ TensorShape outputShape = (dataLayout == DataLayout::NCHW) ?
+ TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 };
+
+ // Connects up.
+ armnn::TensorInfo inputTensorInfo(inputShape, DataType);
+ armnn::TensorInfo outputTensorInfo(outputShape, DataType);
+ Connect(input, layer, inputTensorInfo);
+ Connect(layer, output, outputTensorInfo);
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<L2NormalizationWorkload>(*layer, factory);
+
+ L2NormalizationQueueDescriptor queueDescriptor = workload->GetData();
+ CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template <typename ReshapeWorkload, armnn::DataType DataType>
+std::unique_ptr<ReshapeWorkload> CreateReshapeWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph)
+{
+ // Creates the layer we're testing.
+ TensorShape outputShape({ 1, 4 });
+ ReshapeDescriptor reshapeDesc;
+ reshapeDesc.m_TargetShape = outputShape;
+ Layer* const layer = graph.AddLayer<ReshapeLayer>(reshapeDesc, "layer");
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ armnn::TensorInfo inputTensorInfo({ 4, 1 }, DataType);
+ armnn::TensorInfo outputTensorInfo(outputShape, DataType);
+ Connect(input, layer, inputTensorInfo);
+ Connect(layer, output, outputTensorInfo);
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<ReshapeWorkload>(*layer, factory);
+
+ ReshapeQueueDescriptor queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template <typename ConvertFp16ToFp32Float32Workload>
+std::unique_ptr<ConvertFp16ToFp32Float32Workload> CreateConvertFp16ToFp32WorkloadTest(
+ armnn::IWorkloadFactory& factory, armnn::Graph& graph)
+{
+ // Creates the layer we're testing.
+ ConvertFp16ToFp32Layer* const layer = graph.AddLayer<ConvertFp16ToFp32Layer>("Fp16ToFp32Converter");
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
+ armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
+ Connect(input, layer, inputTensorInfo);
+ Connect(layer, output, outputTensorInfo);
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<ConvertFp16ToFp32Float32Workload>(*layer, factory);
+
+ ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template <typename ConvertFp32ToFp16Float16Workload>
+std::unique_ptr<ConvertFp32ToFp16Float16Workload> CreateConvertFp32ToFp16WorkloadTest(
+ armnn::IWorkloadFactory& factory, armnn::Graph& graph)
+{
+ // Creates the layer we're testing.
+ ConvertFp32ToFp16Layer* const layer = graph.AddLayer<ConvertFp32ToFp16Layer>("Fp32ToFp16Converter");
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
+ Connect(input, layer, inputTensorInfo);
+ Connect(layer, output, outputTensorInfo);
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<ConvertFp32ToFp16Float16Workload>(*layer, factory);
+
+ ConvertFp32ToFp16QueueDescriptor queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template <typename MeanWorkload, armnn::DataType DataType>
+std::unique_ptr<MeanWorkload> CreateMeanWorkloadTest(armnn::IWorkloadFactory& factory, armnn::Graph& graph)
+{
+ // Reduce along the first and second dimensions, and do not keep the reduced dimensions.
+ MeanDescriptor descriptor({ 1, 2 }, false);
+
+ // Creates the layer we're testing.
+ Layer* const layer = graph.AddLayer<MeanLayer>(descriptor, "mean");
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ armnn::TensorInfo inputTensorInfo({ 1, 3, 7, 4 }, DataType);
+ armnn::TensorInfo outputTensorInfo({ 1, 4 }, DataType);
+ Connect(input, layer, inputTensorInfo);
+ Connect(layer, output, outputTensorInfo);
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<MeanWorkload>(*layer, factory);
+
+ MeanQueueDescriptor queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Parameters.m_Axis == descriptor.m_Axis);
+ CHECK(queueDescriptor.m_Parameters.m_KeepDims == descriptor.m_KeepDims);
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template<typename ConcatWorkload, armnn::DataType DataType>
+std::unique_ptr<ConcatWorkload> CreateConcatWorkloadTest(armnn::IWorkloadFactory &factory,
+ armnn::Graph &graph,
+ const armnn::TensorShape &outputShape,
+ unsigned int concatAxis)
+{
+ armnn::TensorInfo inputTensorInfo({ 2, 3, 2, 5 }, DataType);
+ armnn::TensorInfo outputTensorInfo(outputShape, DataType);
+
+ // Constructs the graph.
+ Layer* const input0 = graph.AddLayer<InputLayer>(0, "input0");
+ Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
+ armnn::OriginsDescriptor descriptor;
+
+ std::vector<armnn::TensorShape> inputShapes{{ 2, 3, 2, 5 }, { 2, 3, 2, 5 }};
+
+ descriptor = CreateDescriptorForConcatenation(inputShapes.begin(),
+ inputShapes.end(),
+ concatAxis);
+
+ // create concat layer
+ Layer* const concat = graph.AddLayer<ConcatLayer>(descriptor, "concat");
+ CHECK(concat);
+
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Adds connections.
+ // connect input0 to concat
+ Connect(input0, concat, inputTensorInfo, 0, 0);
+ // connect input1 to concat
+ Connect(input1, concat, inputTensorInfo, 0, 1);
+ // connect concat to output
+ Connect(concat, output, outputTensorInfo, 0, 0);
+
+ // create tensor handles
+ CreateTensorHandles(graph, factory);
+
+ // create concat workload
+ auto workloadConcat = MakeAndCheckWorkload<ConcatWorkload>(*concat, factory);
+ CHECK(workloadConcat);
+
+ return workloadConcat;
+}
+
+template <typename PreCompiledWorkload, armnn::DataType dataType>
+std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> CreatePreCompiledWorkloadTest(
+ armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph,
+ bool biasEnabled = false)
+{
+ IgnoreUnused(graph);
+
+ // build up the structure of the network
+ armnn::INetworkPtr net(armnn::INetwork::Create());
+
+ // Add an input layer
+ armnn::IConnectableLayer* const inputLayer = net->AddInputLayer(0, "input layer");
+ CHECK(inputLayer);
+
+ // ArmNN weights tensor shape is OIHW (out channels, in channels, height, width) for NCHW
+ // ArmNN weights tensor shape is OHWI (out channels, height, width, in channels) for NHWC
+ // this test is using NHWC, so the weights shape is OHWI
+ TensorInfo weightsTensorInfo(TensorShape({16, 1, 1, 16}), dataType, 0.9f, 0, true);
+ unsigned int weightsLength = weightsTensorInfo.GetNumElements();
+
+ using WeightType = armnn::ResolveType<dataType>;
+ std::vector<WeightType> convWeightsData(weightsLength);
+ for (unsigned int i = 0; i < weightsLength; ++i)
+ {
+ convWeightsData[i] = static_cast<WeightType>(i);
+ }
+
+ armnn::ConstTensor weights(weightsTensorInfo, convWeightsData);
+
+ // Add a layer that can be used in the PreCompiled layer
+ armnn::Convolution2dDescriptor convDesc2d;
+ convDesc2d.m_StrideX = 1;
+ convDesc2d.m_StrideY = 1;
+ convDesc2d.m_BiasEnabled = biasEnabled;
+ convDesc2d.m_DataLayout = armnn::DataLayout::NHWC;
+
+ armnn::IConnectableLayer* convLayer = nullptr;
+ const std::string convLayerName("conv layer");
+
+ if (biasEnabled)
+ {
+ constexpr armnn::DataType biasDataType = ( dataType == armnn::DataType::QAsymmU8) ?
+ armnn::DataType::Signed32 : armnn::DataType::Float32;
+
+ TensorInfo biasTensorInfo(TensorShape({16}), biasDataType, 0.9f * 0.9f, 0, true);
+ unsigned int biasLength = biasTensorInfo.GetNumElements();
+
+ using BiasType = armnn::ResolveType<biasDataType>;
+ std::vector<BiasType> biasData(biasLength);
+ std::fill(biasData.begin(), biasData.end(), static_cast<BiasType>(0));
+
+ armnn::ConstTensor biases(biasTensorInfo, biasData);
+
+ // Create convolution layer with biases
+ convLayer = net->AddConvolution2dLayer(convDesc2d,
+ weights,
+ Optional<ConstTensor>(biases),
+ convLayerName.c_str());
+ }
+ else
+ {
+ // Create convolution layer without biases
+ convLayer = net->AddConvolution2dLayer(convDesc2d,
+ weights,
+ EmptyOptional(),
+ convLayerName.c_str());
+ }
+
+ CHECK(convLayer);
+
+ // Add an output layer
+ armnn::IConnectableLayer* const outputLayer = net->AddOutputLayer(0, "output layer");
+ CHECK(outputLayer);
+
+ // set the tensors in the network (NHWC format)
+ TensorInfo inputTensorInfo(TensorShape({ 1, 16, 16, 16 }), dataType);
+ if (dataType == armnn::DataType::QAsymmU8)
+ {
+ inputTensorInfo.SetQuantizationOffset(0);
+ inputTensorInfo.SetQuantizationScale(0.9f);
+ }
+
+ TensorInfo outputTensorInfo(TensorShape({1, 16, 16, 16}), dataType);
+ if (dataType == armnn::DataType::QAsymmU8)
+ {
+ outputTensorInfo.SetQuantizationOffset(0);
+ outputTensorInfo.SetQuantizationScale(0.9f);
+ }
+
+ // Connect the layers
+ inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
+ inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+
+ convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+ convLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ // Optimize the network for the backend supported by the factory
+ std::vector<armnn::BackendId> backends = {factory.GetBackendId()};
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+ armnn::OptimizerOptions optimizerOptions;
+ armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec(),
+ optimizerOptions);
+ CHECK(optimizedNet != nullptr);
+
+ // Find the PreCompiled layer in the optimised graph
+ armnn::Graph& optimisedGraph = GetGraphForTesting(optimizedNet.get());
+ Layer* preCompiledLayer = nullptr;
+ for (auto& layer : optimisedGraph)
+ {
+ if (layer->GetType() == LayerType::PreCompiled)
+ {
+ preCompiledLayer = layer;
+ }
+ }
+ CHECK(preCompiledLayer != nullptr);
+
+ // Create the TensorHandles.
+ CreateTensorHandles(optimisedGraph, factory);
+
+ // Make the workload and check it.
+ auto workload = MakeAndCheckWorkload<PreCompiledWorkload>(*preCompiledLayer, factory);
+
+ PreCompiledQueueDescriptor queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+ // Returns the workload so we can do extra, backend-specific tests.
+ // NOTE: We need to return the optimised network as well, otherwise it gets
+ // out of scope and the tensor handles get destructed
+ return std::make_pair(std::move(optimizedNet), std::move(workload));
+}
+
+template<typename ConstantWorkload, armnn::DataType DataType>
+std::unique_ptr<ConstantWorkload> CreateConstantWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph,
+ const armnn::TensorShape& outputShape)
+{
+ armnn::TensorInfo outputTensorInfo(outputShape, DataType);
+
+ // create constant layer
+ auto constant = graph.AddLayer<ConstantLayer>("constant");
+ CHECK(constant);
+ constant->m_LayerOutput = std::make_unique<ScopedTensorHandle>(outputTensorInfo);
+
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Adds connections.
+ // connect constant to output
+ Connect(constant, output, outputTensorInfo, 0, 0);
+
+ // create tensor handles
+ CreateTensorHandles(graph, factory);
+
+ // create Constant workload"
+ auto workloadConstant = MakeAndCheckWorkload<ConstantWorkload>(*constant, factory);
+ CHECK(workloadConstant);
+
+ return workloadConstant;
+}
+
+template <typename PreluWorkload>
+std::unique_ptr<PreluWorkload> CreatePreluWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph,
+ const armnn::TensorShape& inputShape,
+ const armnn::TensorShape& alphaShape,
+ const armnn::TensorShape& outputShape,
+ armnn::DataType dataType)
+{
+ // Creates the PReLU layer
+ Layer* const layer = graph.AddLayer<PreluLayer>("prelu");
+ CHECK(layer != nullptr);
+
+ // Creates extra layers
+ Layer* const input = graph.AddLayer<InputLayer> (0, "input");
+ Layer* const alpha = graph.AddLayer<InputLayer> (1, "alpha");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+ CHECK(input != nullptr);
+ CHECK(alpha != nullptr);
+ CHECK(output != nullptr);
+
+ // Connects up
+ armnn::TensorInfo inputTensorInfo (inputShape, dataType);
+ armnn::TensorInfo alphaTensorInfo (alphaShape, dataType);
+ armnn::TensorInfo outputTensorInfo(outputShape, dataType);
+ Connect(input, layer, inputTensorInfo, 0, 0);
+ Connect(alpha, layer, alphaTensorInfo, 0, 1);
+ Connect(layer, output, outputTensorInfo, 0, 0);
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it
+ auto workload = MakeAndCheckWorkload<PreluWorkload>(*layer, factory);
+
+ PreluQueueDescriptor queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Inputs.size() == 2);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+ // Returns so we can do extra, backend-specific tests.
+ return workload;
+}
+
+template <typename SpaceToDepthWorkload, armnn::DataType DataType>
+std::unique_ptr<SpaceToDepthWorkload> CreateSpaceToDepthWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph)
+{
+ SpaceToDepthDescriptor desc;
+ desc.m_BlockSize = 2;
+ Layer* const layer = graph.AddLayer<SpaceToDepthLayer>(desc, "spaceToDepth");
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ armnn::TensorInfo inputTensorInfo({ 1, 2, 2, 1 }, DataType);
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 4 }, DataType);
+
+ Connect(input, layer, inputTensorInfo);
+ Connect(layer, output, outputTensorInfo);
+
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<SpaceToDepthWorkload>(*layer, factory);
+
+ SpaceToDepthQueueDescriptor queueDescriptor = workload->GetData();
+ CHECK(queueDescriptor.m_Inputs.size() == 1);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+ return workload;
+}
+
+template <typename StackWorkload, armnn::DataType DataType>
+std::unique_ptr<StackWorkload> CreateStackWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph,
+ const armnn::TensorShape& inputShape,
+ const armnn::TensorShape& outputShape,
+ unsigned int axis,
+ unsigned int numInputs)
+{
+ armnn::TensorInfo inputTensorInfo(inputShape, DataType);
+ armnn::TensorInfo outputTensorInfo(outputShape, DataType);
+
+ // Constructs the Stack layer.
+ armnn::StackDescriptor descriptor(axis, numInputs, inputShape);
+ Layer* const stackLayer = graph.AddLayer<StackLayer>(descriptor, "stack");
+ CHECK(stackLayer != nullptr);
+
+ // Constructs layer inputs and output.
+ std::vector<Layer*> inputs;
+ for (unsigned int i=0; i<numInputs; ++i)
+ {
+ inputs.push_back(graph.AddLayer<InputLayer>(
+ static_cast<int>(i),
+ ("input" + std::to_string(i)).c_str()
+ ));
+ CHECK(inputs[i] != nullptr);
+ }
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+ CHECK(output != nullptr);
+
+ // Adds connections.
+ for (unsigned int i=0; i<numInputs; ++i)
+ {
+ Connect(inputs[i], stackLayer, inputTensorInfo, 0, i);
+ }
+ Connect(stackLayer, output, outputTensorInfo, 0, 0);
+
+ CreateTensorHandles(graph, factory);
+
+ auto stackWorkload = MakeAndCheckWorkload<StackWorkload>(*stackLayer, factory);
+ StackQueueDescriptor queueDescriptor = stackWorkload->GetData();
+ CHECK(queueDescriptor.m_Inputs.size() == numInputs);
+ CHECK(queueDescriptor.m_Outputs.size() == 1);
+
+ return stackWorkload;
+}
+
+} // Anonymous namespace
diff --git a/src/armnnTestUtils/DataTypeUtils.hpp b/src/armnnTestUtils/DataTypeUtils.hpp
new file mode 100644
index 0000000000..528a573b99
--- /dev/null
+++ b/src/armnnTestUtils/DataTypeUtils.hpp
@@ -0,0 +1,45 @@
+//
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <ResolveType.hpp>
+
+
+#include <reference/workloads/Encoders.hpp>
+
+#include <vector>
+
+// Utility tenmplate to convert a collection of values to the correct type
+template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+std::vector<T> ConvertToDataType(const std::vector<float>& input,
+ const armnn::TensorInfo& inputTensorInfo)
+{
+ std::vector<T> output(input.size());
+ auto outputTensorInfo = inputTensorInfo;
+ outputTensorInfo.SetDataType(ArmnnType);
+
+ std::unique_ptr<armnn::Encoder<float>> pOutputEncoder = armnn::MakeEncoder<float>(outputTensorInfo, output.data());
+ armnn::Encoder<float>& rOutputEncoder = *pOutputEncoder;
+
+ for (auto it = input.begin(); it != input.end(); ++it)
+ {
+ rOutputEncoder.Set(*it);
+ ++rOutputEncoder;
+ }
+ return output;
+}
+
+// Utility tenmplate to convert a single value to the correct type
+template <typename T>
+T ConvertToDataType(const float& value,
+ const armnn::TensorInfo& tensorInfo)
+{
+ std::vector<T> output(1);
+ std::unique_ptr<armnn::Encoder<float>> pEncoder = armnn::MakeEncoder<float>(tensorInfo, output.data());
+ armnn::Encoder<float>& rEncoder = *pEncoder;
+ rEncoder.Set(value);
+ return output[0];
+}
diff --git a/src/armnnTestUtils/GraphUtils.cpp b/src/armnnTestUtils/GraphUtils.cpp
new file mode 100644
index 0000000000..15dc888e21
--- /dev/null
+++ b/src/armnnTestUtils/GraphUtils.cpp
@@ -0,0 +1,78 @@
+//
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GraphUtils.hpp"
+
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
+bool GraphHasNamedLayer(const armnn::Graph& graph, const std::string& name)
+{
+ for (auto&& layer : graph)
+ {
+ if (layer->GetName() == name)
+ {
+ return true;
+ }
+ }
+ return false;
+}
+
+armnn::Layer* GetFirstLayerWithName(armnn::Graph& graph, const std::string& name)
+{
+ for (auto&& layer : graph)
+ {
+ if (layer->GetNameStr() == name)
+ {
+ return layer;
+ }
+ }
+ return nullptr;
+}
+
+bool CheckNumberOfInputSlot(armnn::Layer* layer, unsigned int num)
+{
+ return layer->GetNumInputSlots() == num;
+}
+
+bool CheckNumberOfOutputSlot(armnn::Layer* layer, unsigned int num)
+{
+ return layer->GetNumOutputSlots() == num;
+}
+
+bool IsConnected(armnn::Layer* srcLayer, armnn::Layer* destLayer,
+ unsigned int srcSlot, unsigned int destSlot,
+ const armnn::TensorInfo& expectedTensorInfo)
+{
+ const armnn::IOutputSlot& outputSlot = srcLayer->GetOutputSlot(srcSlot);
+ const armnn::TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
+ if (expectedTensorInfo != tensorInfo)
+ {
+ return false;
+ }
+ const unsigned int numConnections = outputSlot.GetNumConnections();
+ for (unsigned int c = 0; c < numConnections; ++c)
+ {
+ auto inputSlot = armnn::PolymorphicDowncast<const armnn::InputSlot*>(outputSlot.GetConnection(c));
+ if (inputSlot->GetOwningLayer().GetNameStr() == destLayer->GetNameStr() &&
+ inputSlot->GetSlotIndex() == destSlot)
+ {
+ return true;
+ }
+ }
+ return false;
+}
+
+/// Checks that first comes before second in the order.
+bool CheckOrder(const armnn::Graph& graph, const armnn::Layer* first, const armnn::Layer* second)
+{
+ graph.Print();
+
+ const auto& order = graph.TopologicalSort();
+
+ auto firstPos = std::find(order.begin(), order.end(), first);
+ auto secondPos = std::find(firstPos, order.end(), second);
+
+ return (secondPos != order.end());
+}
diff --git a/src/armnnTestUtils/GraphUtils.hpp b/src/armnnTestUtils/GraphUtils.hpp
new file mode 100644
index 0000000000..95f07040f2
--- /dev/null
+++ b/src/armnnTestUtils/GraphUtils.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <Graph.hpp>
+
+#include <string>
+
+
+bool GraphHasNamedLayer(const armnn::Graph& graph, const std::string& name);
+
+armnn::Layer* GetFirstLayerWithName(armnn::Graph& graph, const std::string& name);
+
+bool CheckNumberOfInputSlot(armnn::Layer* layer, unsigned int num);
+
+bool CheckNumberOfOutputSlot(armnn::Layer* layer, unsigned int num);
+
+bool IsConnected(armnn::Layer* srcLayer, armnn::Layer* destLayer,
+ unsigned int srcSlot, unsigned int destSlot,
+ const armnn::TensorInfo& expectedTensorInfo);
+
+bool CheckOrder(const armnn::Graph& graph, const armnn::Layer* first, const armnn::Layer* second);
+
diff --git a/src/armnnTestUtils/TensorCopyUtils.cpp b/src/armnnTestUtils/TensorCopyUtils.cpp
new file mode 100644
index 0000000000..14c6d5cc61
--- /dev/null
+++ b/src/armnnTestUtils/TensorCopyUtils.cpp
@@ -0,0 +1,23 @@
+//
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnnTestUtils/TensorCopyUtils.hpp>
+#include <Half.hpp>
+
+void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory)
+{
+ tensorHandle->CopyInFrom(memory);
+}
+
+void CopyDataFromITensorHandle(void* memory, const armnn::ITensorHandle* tensorHandle)
+{
+ tensorHandle->CopyOutTo(memory);
+}
+
+void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory)
+{
+ tensorHandle->Allocate();
+ CopyDataToITensorHandle(tensorHandle, memory);
+}
diff --git a/src/armnnTestUtils/TensorHelpers.hpp b/src/armnnTestUtils/TensorHelpers.hpp
new file mode 100644
index 0000000000..d51e4b1bce
--- /dev/null
+++ b/src/armnnTestUtils/TensorHelpers.hpp
@@ -0,0 +1,235 @@
+//
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnnTestUtils/PredicateResult.hpp>
+
+#include <armnn/Tensor.hpp>
+#include <armnn/utility/Assert.hpp>
+#include <armnnUtils/FloatingPointComparison.hpp>
+
+#include <QuantizeHelper.hpp>
+
+#include <doctest/doctest.h>
+
+#include <array>
+#include <cmath>
+#include <random>
+#include <vector>
+
+constexpr float g_FloatCloseToZeroTolerance = 1.0e-6f;
+
+template<typename T, bool isQuantized = true>
+struct SelectiveComparer
+{
+ static bool Compare(T a, T b)
+ {
+ return (std::max(a, b) - std::min(a, b)) <= 1;
+ }
+
+};
+
+template<typename T>
+struct SelectiveComparer<T, false>
+{
+ static bool Compare(T a, T b)
+ {
+ // If a or b is zero, percent_tolerance does an exact match, so compare to a small, constant tolerance instead.
+ if (a == 0.0f || b == 0.0f)
+ {
+ return std::abs(a - b) <= g_FloatCloseToZeroTolerance;
+ }
+
+ if (std::isinf(a) && a == b)
+ {
+ return true;
+ }
+
+ if (std::isnan(a) && std::isnan(b))
+ {
+ return true;
+ }
+
+ // For unquantized floats we use a tolerance of 1%.
+ return armnnUtils::within_percentage_tolerance(a, b);
+ }
+};
+
+template<typename T>
+bool SelectiveCompare(T a, T b)
+{
+ return SelectiveComparer<T, armnn::IsQuantizedType<T>()>::Compare(a, b);
+};
+
+template<typename T>
+bool SelectiveCompareBoolean(T a, T b)
+{
+ return (((a == 0) && (b == 0)) || ((a != 0) && (b != 0)));
+};
+
+template <typename T>
+armnn::PredicateResult CompareTensors(const std::vector<T>& actualData,
+ const std::vector<T>& expectedData,
+ const armnn::TensorShape& actualShape,
+ const armnn::TensorShape& expectedShape,
+ bool compareBoolean = false,
+ bool isDynamic = false)
+{
+ if (actualData.size() != expectedData.size())
+ {
+ armnn::PredicateResult res(false);
+ res.Message() << "Different data size ["
+ << actualData.size()
+ << "!="
+ << expectedData.size()
+ << "]";
+ return res;
+ }
+
+ if (actualShape.GetNumDimensions() != expectedShape.GetNumDimensions())
+ {
+ armnn::PredicateResult res(false);
+ res.Message() << "Different number of dimensions ["
+ << actualShape.GetNumDimensions()
+ << "!="
+ << expectedShape.GetNumDimensions()
+ << "]";
+ return res;
+ }
+
+ if (actualShape.GetNumElements() != expectedShape.GetNumElements())
+ {
+ armnn::PredicateResult res(false);
+ res.Message() << "Different number of elements ["
+ << actualShape.GetNumElements()
+ << "!="
+ << expectedShape.GetNumElements()
+ << "]";
+ return res;
+ }
+
+ unsigned int numberOfDimensions = actualShape.GetNumDimensions();
+
+ if (!isDynamic)
+ {
+ // Checks they are same shape.
+ for (unsigned int i = 0; i < numberOfDimensions; ++i)
+ {
+ if (actualShape[i] != expectedShape[i])
+ {
+ armnn::PredicateResult res(false);
+ res.Message() << "Different shapes ["
+ << actualShape[i]
+ << "!="
+ << expectedShape[i]
+ << "]";
+ return res;
+ }
+ }
+ }
+
+ // Fun iteration over n dimensions.
+ std::vector<unsigned int> indices;
+ for (unsigned int i = 0; i < numberOfDimensions; i++)
+ {
+ indices.emplace_back(0);
+ }
+
+ std::stringstream errorString;
+ int numFailedElements = 0;
+ constexpr int maxReportedDifferences = 3;
+ unsigned int index = 0;
+
+ // Compare data element by element.
+ while (true)
+ {
+ bool comparison;
+ // As true for uint8_t is non-zero (1-255) we must have a dedicated compare for Booleans.
+ if(compareBoolean)
+ {
+ comparison = SelectiveCompareBoolean(actualData[index], expectedData[index]);
+ }
+ else
+ {
+ comparison = SelectiveCompare(actualData[index], expectedData[index]);
+ }
+
+ if (!comparison)
+ {
+ ++numFailedElements;
+
+ if (numFailedElements <= maxReportedDifferences)
+ {
+ if (numFailedElements >= 2)
+ {
+ errorString << ", ";
+ }
+ errorString << "[";
+ for (unsigned int i = 0; i < numberOfDimensions; ++i)
+ {
+ errorString << indices[i];
+ if (i != numberOfDimensions - 1)
+ {
+ errorString << ",";
+ }
+ }
+ errorString << "]";
+
+ errorString << " (" << +actualData[index] << " != " << +expectedData[index] << ")";
+ }
+ }
+
+ ++indices[numberOfDimensions - 1];
+ for (unsigned int i=numberOfDimensions-1; i>0; i--)
+ {
+ if (indices[i] == actualShape[i])
+ {
+ indices[i] = 0;
+ ++indices[i - 1];
+ }
+ }
+ if (indices[0] == actualShape[0])
+ {
+ break;
+ }
+
+ index++;
+ }
+
+ armnn::PredicateResult comparisonResult(true);
+ if (numFailedElements > 0)
+ {
+ comparisonResult.SetResult(false);
+ comparisonResult.Message() << numFailedElements << " different values at: ";
+ if (numFailedElements > maxReportedDifferences)
+ {
+ errorString << ", ... (and " << (numFailedElements - maxReportedDifferences) << " other differences)";
+ }
+ comparisonResult.Message() << errorString.str();
+ }
+
+ return comparisonResult;
+}
+
+template <typename T>
+std::vector<T> MakeRandomTensor(const armnn::TensorInfo& tensorInfo,
+ unsigned int seed,
+ float min = -10.0f,
+ float max = 10.0f)
+{
+ std::mt19937 gen(seed);
+ std::uniform_real_distribution<float> dist(min, max);
+
+ std::vector<float> init(tensorInfo.GetNumElements());
+ for (unsigned int i = 0; i < init.size(); i++)
+ {
+ init[i] = dist(gen);
+ }
+
+ const float qScale = tensorInfo.GetQuantizationScale();
+ const int32_t qOffset = tensorInfo.GetQuantizationOffset();
+
+ return armnnUtils::QuantizedVector<T>(init, qScale, qOffset);
+}
diff --git a/src/armnnTestUtils/TestUtils.cpp b/src/armnnTestUtils/TestUtils.cpp
new file mode 100644
index 0000000000..9ac0b3986e
--- /dev/null
+++ b/src/armnnTestUtils/TestUtils.cpp
@@ -0,0 +1,62 @@
+//
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "TestUtils.hpp"
+
+#include <armnn/utility/Assert.hpp>
+
+using namespace armnn;
+
+void Connect(armnn::IConnectableLayer* from, armnn::IConnectableLayer* to, const armnn::TensorInfo& tensorInfo,
+ unsigned int fromIndex, unsigned int toIndex)
+{
+ ARMNN_ASSERT(from);
+ ARMNN_ASSERT(to);
+
+ try
+ {
+ from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex));
+ }
+ catch (const std::out_of_range& exc)
+ {
+ std::ostringstream message;
+
+ if (to->GetType() == armnn::LayerType::FullyConnected && toIndex == 2)
+ {
+ message << "Tried to connect bias to FullyConnected layer when bias is not enabled: ";
+ }
+
+ message << "Failed to connect to input slot "
+ << toIndex
+ << " on "
+ << GetLayerTypeAsCString(to->GetType())
+ << " layer "
+ << std::quoted(to->GetName())
+ << " as the slot does not exist or is unavailable";
+ throw LayerValidationException(message.str());
+ }
+
+ from->GetOutputSlot(fromIndex).SetTensorInfo(tensorInfo);
+}
+
+namespace armnn
+{
+
+Graph& GetGraphForTesting(IOptimizedNetwork* optNet)
+{
+ return optNet->pOptimizedNetworkImpl->GetGraph();
+}
+
+ModelOptions& GetModelOptionsForTesting(IOptimizedNetwork* optNet)
+{
+ return optNet->pOptimizedNetworkImpl->GetModelOptions();
+}
+
+profiling::ProfilingService& GetProfilingService(armnn::RuntimeImpl* runtime)
+{
+ return runtime->m_ProfilingService;
+}
+
+} \ No newline at end of file
diff --git a/src/armnnTestUtils/TestUtils.hpp b/src/armnnTestUtils/TestUtils.hpp
new file mode 100644
index 0000000000..d5b6d1b805
--- /dev/null
+++ b/src/armnnTestUtils/TestUtils.hpp
@@ -0,0 +1,58 @@
+//
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/INetwork.hpp>
+#include <Graph.hpp>
+#include <Runtime.hpp>
+
+void Connect(armnn::IConnectableLayer* from, armnn::IConnectableLayer* to, const armnn::TensorInfo& tensorInfo,
+ unsigned int fromIndex = 0, unsigned int toIndex = 0);
+
+template <typename LayerT>
+bool IsLayerOfType(const armnn::Layer* const layer)
+{
+ return (layer->GetType() == armnn::LayerEnumOf<LayerT>());
+}
+
+inline bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
+{
+ return (first == last);
+}
+
+/// Checks each unary function in Us evaluates true for each correspondent layer in the sequence [first, last).
+template <typename U, typename... Us>
+bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last, U&& u, Us&&... us)
+{
+ return u(*first) && CheckSequence(std::next(first), last, us...);
+}
+
+template <typename LayerT>
+bool CheckRelatedLayers(armnn::Graph& graph, const std::list<std::string>& testRelatedLayers)
+{
+ for (auto& layer : graph)
+ {
+ if (layer->GetType() == armnn::LayerEnumOf<LayerT>())
+ {
+ auto& relatedLayers = layer->GetRelatedLayerNames();
+ if (!std::equal(relatedLayers.begin(), relatedLayers.end(), testRelatedLayers.begin(),
+ testRelatedLayers.end()))
+ {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+namespace armnn
+{
+Graph& GetGraphForTesting(IOptimizedNetwork* optNetPtr);
+ModelOptions& GetModelOptionsForTesting(IOptimizedNetwork* optNetPtr);
+profiling::ProfilingService& GetProfilingService(RuntimeImpl* runtime);
+
+} // namespace armnn \ No newline at end of file
diff --git a/src/armnnTestUtils/UnitTests.cpp b/src/armnnTestUtils/UnitTests.cpp
new file mode 100644
index 0000000000..cf532a76fd
--- /dev/null
+++ b/src/armnnTestUtils/UnitTests.cpp
@@ -0,0 +1,67 @@
+//
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#ifndef DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
+#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
+#endif
+#include <doctest/doctest.h>
+
+#include "UnitTests.hpp"
+
+struct ConfigureLoggingFixture
+{
+ ConfigureLoggingFixture()
+ {
+ ConfigureLoggingTest();
+ }
+};
+
+
+
+TEST_SUITE("LoggerSuite")
+{
+TEST_CASE_FIXTURE(ConfigureLoggingFixture, "LoggerTest")
+{
+ std::stringstream ss;
+ {
+ struct StreamRedirector
+ {
+ public:
+ StreamRedirector(std::ostream& stream, std::streambuf* newStreamBuffer)
+ : m_Stream(stream)
+ , m_BackupBuffer(m_Stream.rdbuf(newStreamBuffer))
+ {}
+ ~StreamRedirector() { m_Stream.rdbuf(m_BackupBuffer); }
+
+ private:
+ std::ostream& m_Stream;
+ std::streambuf* m_BackupBuffer;
+ };
+
+ StreamRedirector redirect(std::cout, ss.rdbuf());
+
+ using namespace armnn;
+ SetLogFilter(LogSeverity::Trace);
+ SetAllLoggingSinks(true, false, false);
+
+ ARMNN_LOG(trace) << "My trace message; " << -2;
+ ARMNN_LOG(debug) << "My debug message; " << -1;
+ ARMNN_LOG(info) << "My info message; " << 0;
+ ARMNN_LOG(warning) << "My warning message; " << 1;
+ ARMNN_LOG(error) << "My error message; " << 2;
+ ARMNN_LOG(fatal) << "My fatal message; " << 3;
+
+ SetLogFilter(LogSeverity::Fatal);
+ }
+
+ CHECK(ss.str().find("Trace: My trace message; -2") != std::string::npos);
+ CHECK(ss.str().find("Debug: My debug message; -1") != std::string::npos);
+ CHECK(ss.str().find("Info: My info message; 0") != std::string::npos);
+ CHECK(ss.str().find("Warning: My warning message; 1") != std::string::npos);
+ CHECK(ss.str().find("Error: My error message; 2") != std::string::npos);
+ CHECK(ss.str().find("Fatal: My fatal message; 3") != std::string::npos);
+}
+
+} \ No newline at end of file
diff --git a/src/armnnTestUtils/UnitTests.hpp b/src/armnnTestUtils/UnitTests.hpp
new file mode 100644
index 0000000000..788ad87718
--- /dev/null
+++ b/src/armnnTestUtils/UnitTests.hpp
@@ -0,0 +1,191 @@
+//
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "TensorHelpers.hpp"
+#include "WorkloadTestUtils.hpp"
+
+#include <armnn/Logging.hpp>
+#include <armnn/Utils.hpp>
+#include <reference/RefWorkloadFactory.hpp>
+#include <reference/test/RefWorkloadFactoryHelper.hpp>
+
+#include <backendsCommon/test/WorkloadFactoryHelper.hpp>
+
+#include <armnnTestUtils/LayerTestResult.hpp>
+#include <armnnTestUtils/TensorCopyUtils.hpp>
+
+#include <doctest/doctest.h>
+
+inline void ConfigureLoggingTest()
+{
+ // Configures logging for both the ARMNN library and this test program.
+ armnn::ConfigureLogging(true, true, armnn::LogSeverity::Fatal);
+}
+
+// The following macros require the caller to have defined FactoryType, with one of the following using statements:
+//
+// using FactoryType = armnn::RefWorkloadFactory;
+// using FactoryType = armnn::ClWorkloadFactory;
+// using FactoryType = armnn::NeonWorkloadFactory;
+
+/// Executes CHECK_MESSAGE on CompareTensors() return value so that the predicate_result message is reported.
+/// If the test reports itself as not supported then the tensors are not compared.
+/// Additionally this checks that the supportedness reported by the test matches the name of the test.
+/// Unsupported tests must be 'tagged' by including "UNSUPPORTED" in their name.
+/// This is useful because it clarifies that the feature being tested is not actually supported
+/// (a passed test with the name of a feature would imply that feature was supported).
+/// If support is added for a feature, the test case will fail because the name incorrectly contains UNSUPPORTED.
+/// If support is removed for a feature, the test case will fail because the name doesn't contain UNSUPPORTED.
+template <typename T, std::size_t n>
+void CompareTestResultIfSupported(const std::string& testName, const LayerTestResult<T, n>& testResult)
+{
+ bool testNameIndicatesUnsupported = testName.find("UNSUPPORTED") != std::string::npos;
+ CHECK_MESSAGE(testNameIndicatesUnsupported != testResult.m_Supported,
+ "The test name does not match the supportedness it is reporting");
+ if (testResult.m_Supported)
+ {
+ auto result = CompareTensors(testResult.m_ActualData,
+ testResult.m_ExpectedData,
+ testResult.m_ActualShape,
+ testResult.m_ExpectedShape,
+ testResult.m_CompareBoolean);
+ CHECK_MESSAGE(result.m_Result, result.m_Message.str());
+ }
+}
+
+template <typename T, std::size_t n>
+void CompareTestResultIfSupported(const std::string& testName, const std::vector<LayerTestResult<T, n>>& testResult)
+{
+ bool testNameIndicatesUnsupported = testName.find("UNSUPPORTED") != std::string::npos;
+ for (unsigned int i = 0; i < testResult.size(); ++i)
+ {
+ CHECK_MESSAGE(testNameIndicatesUnsupported != testResult[i].m_Supported,
+ "The test name does not match the supportedness it is reporting");
+ if (testResult[i].m_Supported)
+ {
+ auto result = CompareTensors(testResult[i].m_ActualData,
+ testResult[i].m_ExpectedData,
+ testResult[i].m_ActualShape,
+ testResult[i].m_ExpectedShape);
+ CHECK_MESSAGE(result.m_Result, result.m_Message.str());
+ }
+ }
+}
+
+template<typename FactoryType, typename TFuncPtr, typename... Args>
+void RunTestFunction(const char* testName, TFuncPtr testFunction, Args... args)
+{
+ std::unique_ptr<armnn::IProfiler> profiler = std::make_unique<armnn::IProfiler>();
+ armnn::ProfilerManager::GetInstance().RegisterProfiler(profiler.get());
+
+ auto memoryManager = WorkloadFactoryHelper<FactoryType>::GetMemoryManager();
+ FactoryType workloadFactory = WorkloadFactoryHelper<FactoryType>::GetFactory(memoryManager);
+
+ auto testResult = (*testFunction)(workloadFactory, memoryManager, args...);
+ CompareTestResultIfSupported(testName, testResult);
+
+ armnn::ProfilerManager::GetInstance().RegisterProfiler(nullptr);
+}
+
+
+template<typename FactoryType, typename TFuncPtr, typename... Args>
+void RunTestFunctionUsingTensorHandleFactory(const char* testName, TFuncPtr testFunction, Args... args)
+{
+ std::unique_ptr<armnn::IProfiler> profiler = std::make_unique<armnn::IProfiler>();
+ armnn::ProfilerManager::GetInstance().RegisterProfiler(profiler.get());
+
+ auto memoryManager = WorkloadFactoryHelper<FactoryType>::GetMemoryManager();
+ FactoryType workloadFactory = WorkloadFactoryHelper<FactoryType>::GetFactory(memoryManager);
+
+ auto tensorHandleFactory = WorkloadFactoryHelper<FactoryType>::GetTensorHandleFactory(memoryManager);
+
+ auto testResult = (*testFunction)(workloadFactory, memoryManager, tensorHandleFactory, args...);
+ CompareTestResultIfSupported(testName, testResult);
+
+ armnn::ProfilerManager::GetInstance().RegisterProfiler(nullptr);
+}
+
+#define ARMNN_SIMPLE_TEST_CASE(TestName, TestFunction) \
+ TEST_CASE(#TestName) \
+ { \
+ TestFunction(); \
+ }
+
+#define ARMNN_AUTO_TEST_CASE(TestName, TestFunction, ...) \
+ TEST_CASE(#TestName) \
+ { \
+ RunTestFunction<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
+ }
+
+#define ARMNN_AUTO_TEST_FIXTURE(TestName, Fixture, TestFunction, ...) \
+ TEST_CASE_FIXTURE(Fixture, #TestName) \
+ { \
+ RunTestFunction<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
+ }
+
+#define ARMNN_AUTO_TEST_CASE_WITH_THF(TestName, TestFunction, ...) \
+ TEST_CASE(#TestName) \
+ { \
+ RunTestFunctionUsingTensorHandleFactory<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
+ }
+
+#define ARMNN_AUTO_TEST_FIXTURE_WITH_THF(TestName, Fixture, TestFunction, ...) \
+ TEST_CASE_FIXTURE(Fixture, #TestName) \
+ { \
+ RunTestFunctionUsingTensorHandleFactory<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
+ }
+
+template<typename FactoryType, typename TFuncPtr, typename... Args>
+void CompareRefTestFunction(const char* testName, TFuncPtr testFunction, Args... args)
+{
+ auto memoryManager = WorkloadFactoryHelper<FactoryType>::GetMemoryManager();
+ FactoryType workloadFactory = WorkloadFactoryHelper<FactoryType>::GetFactory(memoryManager);
+
+ armnn::RefWorkloadFactory refWorkloadFactory;
+
+ auto testResult = (*testFunction)(workloadFactory, memoryManager, refWorkloadFactory, args...);
+ CompareTestResultIfSupported(testName, testResult);
+}
+
+template<typename FactoryType, typename TFuncPtr, typename... Args>
+void CompareRefTestFunctionUsingTensorHandleFactory(const char* testName, TFuncPtr testFunction, Args... args)
+{
+ auto memoryManager = WorkloadFactoryHelper<FactoryType>::GetMemoryManager();
+ FactoryType workloadFactory = WorkloadFactoryHelper<FactoryType>::GetFactory(memoryManager);
+
+ armnn::RefWorkloadFactory refWorkloadFactory;
+ auto tensorHandleFactory = WorkloadFactoryHelper<FactoryType>::GetTensorHandleFactory(memoryManager);
+ auto refTensorHandleFactory =
+ RefWorkloadFactoryHelper::GetTensorHandleFactory(memoryManager);
+
+ auto testResult = (*testFunction)(
+ workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, args...);
+ CompareTestResultIfSupported(testName, testResult);
+}
+
+#define ARMNN_COMPARE_REF_AUTO_TEST_CASE(TestName, TestFunction, ...) \
+ TEST_CASE(#TestName) \
+ { \
+ CompareRefTestFunction<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
+ }
+
+#define ARMNN_COMPARE_REF_AUTO_TEST_CASE_WITH_THF(TestName, TestFunction, ...) \
+ TEST_CASE(#TestName) \
+ { \
+ CompareRefTestFunctionUsingTensorHandleFactory<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
+ }
+
+#define ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(TestName, Fixture, TestFunction, ...) \
+ TEST_CASE_FIXTURE(Fixture, #TestName) \
+ { \
+ CompareRefTestFunction<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
+ }
+
+#define ARMNN_COMPARE_REF_FIXTURE_TEST_CASE_WITH_THF(TestName, Fixture, TestFunction, ...) \
+ TEST_CASE_FIXTURE(Fixture, #TestName) \
+ { \
+ CompareRefTestFunctionUsingTensorHandleFactory<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
+ }
diff --git a/src/armnnTestUtils/WorkloadTestUtils.hpp b/src/armnnTestUtils/WorkloadTestUtils.hpp
new file mode 100644
index 0000000000..856e54a72a
--- /dev/null
+++ b/src/armnnTestUtils/WorkloadTestUtils.hpp
@@ -0,0 +1,113 @@
+//
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/Tensor.hpp>
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <armnn/backends/IMemoryManager.hpp>
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadInfo.hpp>
+
+namespace armnn
+{
+class ITensorHandle;
+} // namespace armnn
+
+namespace
+{
+
+template <typename QueueDescriptor>
+void AddInputToWorkload(QueueDescriptor& descriptor,
+ armnn::WorkloadInfo& info,
+ const armnn::TensorInfo& tensorInfo,
+ armnn::ITensorHandle* tensorHandle)
+{
+ descriptor.m_Inputs.push_back(tensorHandle);
+ info.m_InputTensorInfos.push_back(tensorInfo);
+}
+
+template <typename QueueDescriptor>
+void AddOutputToWorkload(QueueDescriptor& descriptor,
+ armnn::WorkloadInfo& info,
+ const armnn::TensorInfo& tensorInfo,
+ armnn::ITensorHandle* tensorHandle)
+{
+ descriptor.m_Outputs.push_back(tensorHandle);
+ info.m_OutputTensorInfos.push_back(tensorInfo);
+}
+
+template <typename QueueDescriptor>
+void SetWorkloadInput(QueueDescriptor& descriptor,
+ armnn::WorkloadInfo& info,
+ unsigned int index,
+ const armnn::TensorInfo& tensorInfo,
+ armnn::ITensorHandle* tensorHandle)
+{
+ descriptor.m_Inputs[index] = tensorHandle;
+ info.m_InputTensorInfos[index] = tensorInfo;
+}
+
+template <typename QueueDescriptor>
+void SetWorkloadOutput(QueueDescriptor& descriptor,
+ armnn::WorkloadInfo& info,
+ unsigned int index,
+ const armnn::TensorInfo& tensorInfo,
+ armnn::ITensorHandle* tensorHandle)
+{
+ descriptor.m_Outputs[index] = tensorHandle;
+ info.m_OutputTensorInfos[index] = tensorInfo;
+}
+
+inline void ExecuteWorkload(armnn::IWorkload& workload,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ bool memoryManagementRequested = true)
+{
+ const bool manageMemory = memoryManager && memoryManagementRequested;
+
+ // Acquire working memory (if needed)
+ if (manageMemory)
+ {
+ memoryManager->Acquire();
+ }
+
+ // Perform PostAllocationConfiguration
+ workload.PostAllocationConfigure();
+
+ // Execute the workload
+ workload.Execute();
+
+ // Release working memory (if needed)
+ if (manageMemory)
+ {
+ memoryManager->Release();
+ }
+}
+
+inline armnn::Optional<armnn::DataType> GetBiasTypeFromWeightsType(armnn::Optional<armnn::DataType> weightsType)
+{
+ if (!weightsType)
+ {
+ return weightsType;
+ }
+
+ switch(weightsType.value())
+ {
+ case armnn::DataType::BFloat16:
+ case armnn::DataType::Float16:
+ case armnn::DataType::Float32:
+ return weightsType;
+ case armnn::DataType::QAsymmS8:
+ case armnn::DataType::QAsymmU8:
+ case armnn::DataType::QSymmS8:
+ case armnn::DataType::QSymmS16:
+ return armnn::DataType::Signed32;
+ default:
+ ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
+ }
+ return armnn::EmptyOptional();
+}
+
+} // anonymous namespace