aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
diff options
context:
space:
mode:
authornarpra01 <narumol.prangnawarat@arm.com>2018-11-20 15:21:28 +0000
committerAron Virginas-Tar <aron.virginas-tar@arm.com>2018-11-20 15:52:24 +0000
commitb9546cf1ffde83f63436c4087711dcf098ea4196 (patch)
tree9e8bfe040effdc56e39f9cc9d920fb515352f52d /src/backends/backendsCommon/test/EndToEndTestImpl.hpp
parent97f71306bdfdadb7ff7a55043ebce40f6c280223 (diff)
downloadarmnn-b9546cf1ffde83f63436c4087711dcf098ea4196.tar.gz
IVGCVSW-2173 - Add end to end layer test implementation and example usage
* Add CommonTestUtils * Add end to end layer test implementation * Add example usage for Merger layer on Ref, Cl, Neon Change-Id: I8931136288cd68b80bcdad8f5ae087ae1a70a60a
Diffstat (limited to 'src/backends/backendsCommon/test/EndToEndTestImpl.hpp')
-rw-r--r--src/backends/backendsCommon/test/EndToEndTestImpl.hpp50
1 files changed, 50 insertions, 0 deletions
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index e16116ee10..15a3937aca 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -5,9 +5,12 @@
#pragma once
#include <armnn/ArmNN.hpp>
+#include <armnn/INetwork.hpp>
#include <backendsCommon/test/QuantizeHelper.hpp>
+#include <boost/test/unit_test.hpp>
+
#include <vector>
namespace
@@ -99,4 +102,51 @@ inline bool ConstantUsageUint8Test(const std::vector<BackendId>& backends)
);
}
+template<typename T>
+void EndToEndLayerTestImpl(INetworkPtr network,
+ const std::map<int, std::vector<T>>& inputTensorData,
+ const std::map<int, std::vector<T>>& expectedOutputData,
+ std::vector<BackendId> backends)
+{
+ // Create runtime in which test will run
+ IRuntime::CreationOptions options;
+ IRuntimePtr runtime(IRuntime::Create(options));
+
+ // optimize the network
+ IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec());
+
+ // Loads it into the runtime.
+ NetworkId netId;
+ runtime->LoadNetwork(netId, std::move(optNet));
+
+ InputTensors inputTensors;
+ inputTensors.reserve(inputTensorData.size());
+ for (auto&& it : inputTensorData)
+ {
+ inputTensors.push_back({it.first,
+ ConstTensor(runtime->GetInputTensorInfo(netId, it.first), it.second.data())});
+ }
+ OutputTensors outputTensors;
+ outputTensors.reserve(expectedOutputData.size());
+ std::map<int, std::vector<T>> outputStorage;
+ for (auto&& it : expectedOutputData)
+ {
+ std::vector<T> out(it.second.size());
+ outputStorage.emplace(it.first, out);
+ outputTensors.push_back({it.first,
+ Tensor(runtime->GetOutputTensorInfo(netId, it.first),
+ outputStorage.at(it.first).data())});
+ }
+
+ // Does the inference.
+ runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+
+ // Checks the results.
+ for (auto&& it : expectedOutputData)
+ {
+ std::vector<T> out = outputStorage.at(it.first);
+ BOOST_TEST(it.second == out);
+ }
+}
+
} // anonymous namespace \ No newline at end of file