aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/backendsCommon/test')
-rw-r--r--src/backends/backendsCommon/test/CommonTestUtils.hpp23
-rw-r--r--src/backends/backendsCommon/test/OptimizedNetworkTests.cpp129
2 files changed, 152 insertions, 0 deletions
diff --git a/src/backends/backendsCommon/test/CommonTestUtils.hpp b/src/backends/backendsCommon/test/CommonTestUtils.hpp
index 99412b9694..c7537f1eed 100644
--- a/src/backends/backendsCommon/test/CommonTestUtils.hpp
+++ b/src/backends/backendsCommon/test/CommonTestUtils.hpp
@@ -18,6 +18,8 @@
#include <test/TestUtils.hpp>
#include <algorithm>
+#include <random>
+#include <vector>
// Checks that two collections have the exact same contents (in any order)
// The given collections do not have to contain duplicates
@@ -94,3 +96,24 @@ armnn::TensorShape MakeTensorShape(unsigned int batches,
unsigned int height,
unsigned int width,
armnn::DataLayout layout);
+
+template<typename DataType>
+static std::vector<DataType> GenerateRandomData(size_t size)
+{
+ constexpr bool isIntegerType = std::is_integral<DataType>::value;
+ using Distribution =
+ typename std::conditional<isIntegerType,
+ std::uniform_int_distribution<DataType>,
+ std::uniform_real_distribution<DataType>>::type;
+
+ static constexpr DataType lowerLimit = std::numeric_limits<DataType>::min();
+ static constexpr DataType upperLimit = std::numeric_limits<DataType>::max();
+
+ static Distribution distribution(lowerLimit, upperLimit);
+ static std::default_random_engine generator;
+
+ std::vector<DataType> randomData(size);
+ generate(randomData.begin(), randomData.end(), []() { return distribution(generator); });
+
+ return randomData;
+}
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index 2c74690e6e..dcea9ef72e 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -3,6 +3,8 @@
// SPDX-License-Identifier: MIT
//
+#include "CommonTestUtils.hpp"
+
#include <Graph.hpp>
#include <Network.hpp>
@@ -358,4 +360,131 @@ TEST_CASE("OptimizeValidateWorkloadsDuplicateComputeDeviceWithFallback")
}
}
+TEST_CASE("OptimizeNetworkCopy")
+{
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime = armnn::IRuntime::Create(options);
+ std::vector<armnn::NetworkId> networkIds;
+
+ const std::string layerName("convolution2d");
+ const armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32);
+ const armnn::TensorInfo outputInfo({ 1, 2, 2, 1 }, armnn::DataType::Float32);
+
+ const armnn::TensorInfo weightsInfo({ 1, 3, 3, 1 }, armnn::DataType::Float32);
+ const armnn::TensorInfo biasesInfo ({ 1 }, armnn::DataType::Float32);
+
+ std::vector<float> weightsData = GenerateRandomData<float>(weightsInfo.GetNumElements());
+ armnn::ConstTensor weights(weightsInfo, weightsData);
+
+ std::vector<float> biasesData = GenerateRandomData<float>(biasesInfo.GetNumElements());
+ armnn::ConstTensor biases(biasesInfo, biasesData);
+
+ armnn::Convolution2dDescriptor descriptor;
+ descriptor.m_PadLeft = 1;
+ descriptor.m_PadRight = 1;
+ descriptor.m_PadTop = 1;
+ descriptor.m_PadBottom = 1;
+ descriptor.m_StrideX = 2;
+ descriptor.m_StrideY = 2;
+ descriptor.m_DilationX = 2;
+ descriptor.m_DilationY = 2;
+ descriptor.m_BiasEnabled = true;
+ descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+
+ armnn::INetworkPtr network = armnn::INetwork::Create();
+ armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
+ armnn::IConnectableLayer* const convLayer =
+ network->AddConvolution2dLayer(descriptor,
+ weights,
+ armnn::Optional<armnn::ConstTensor>(biases),
+ layerName.c_str());
+ armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
+
+ inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
+ convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+ inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
+ convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+ std::vector<armnn::BackendId> preferredBackends { "CpuRef" };
+ armnn::ModelOptions modelOptions;
+ armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions);
+ std::vector<std::string> errorMessages;
+
+ // optimize the network.
+ armnn::IOptimizedNetworkPtr optNet = Optimize(*network,
+ preferredBackends,
+ runtime->GetDeviceSpec(),
+ optimizerOptions,
+ armnn::Optional<std::vector<std::string>&>(errorMessages));
+
+ for (unsigned int i = 0; i < 2; ++i)
+ {
+ armnn::ModelOptions optimizedModelOptions;
+ auto copy = armnn::IOptimizedNetworkPtr(new armnn::IOptimizedNetwork(*optNet.get(), optimizedModelOptions),
+ &armnn::IOptimizedNetwork::Destroy);
+
+ CHECK(copy);
+
+ armnn::NetworkId netId;
+ std::string errorMessage;
+
+ CHECK(armnn::Status::Success == runtime->LoadNetwork(netId, std::move(copy), errorMessage));
+
+ // Record the networkID for the loaded network
+ networkIds.emplace_back(netId);
+ }
+ armnn::NetworkId optNetId;
+ std::string errorMessage;
+
+ // Load the original optNet
+ CHECK(armnn::Status::Success == runtime->LoadNetwork(optNetId, std::move(optNet), errorMessage));
+
+ std::vector<float> inputData = GenerateRandomData<float>(runtime->GetInputTensorInfo(optNetId, 0).GetNumElements());
+ std::vector<float> outputData(runtime->GetOutputTensorInfo(optNetId, 0).GetNumElements());
+
+ armnn::InputTensors inputTensors
+ {
+ {
+ 0 ,armnn::ConstTensor(runtime->GetInputTensorInfo(optNetId, 0), inputData.data())
+ }
+ };
+ armnn::OutputTensors outputTensors
+ {
+ {
+ 0, armnn::Tensor(runtime->GetOutputTensorInfo(optNetId, 0), outputData.data())
+ }
+ };
+ runtime->EnqueueWorkload(optNetId, inputTensors, outputTensors);
+ runtime->UnloadNetwork(optNetId);
+
+ // Record the networkID for the loaded network
+ for (unsigned int i = 0; i < networkIds.size(); ++i)
+ {
+ armnn::NetworkId netId = networkIds[i];
+ std::vector<float> copyOutputData(runtime->GetOutputTensorInfo(netId, 0).GetNumElements());
+
+ armnn::InputTensors copyInputTensors
+ {
+ {
+ 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())
+ }
+ };
+ armnn::OutputTensors copyOutputTensors
+ {
+ {
+ 0, armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), copyOutputData.data())
+ }
+ };
+ runtime->EnqueueWorkload(netId, copyInputTensors, copyOutputTensors);
+ runtime->UnloadNetwork(netId);
+
+ // Check results are identical to "original" version
+ for (unsigned int j = 0; j < outputData.size(); ++j)
+ {
+ CHECK(outputData[j] == copyOutputData[j]);
+ }
+ }
+}
+
}