aboutsummaryrefslogtreecommitdiff
path: root/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp
diff options
context:
space:
mode:
authorJim Flynn <jim.flynn@arm.com>2020-04-29 21:12:13 +0100
committerJim Flynn <jim.flynn@arm.com>2020-05-26 16:33:21 +0100
commit01d0281404183c84d26e863502cac8d83044c0bf (patch)
tree526f19f39fb826d0df1035729182af27ec5a44d8 /src/profiling/test/FileOnlyProfilingDecoratorTests.cpp
parent42b3d7da750ab6ad39ea228985f422685f89eb45 (diff)
downloadarmnn-01d0281404183c84d26e863502cac8d83044c0bf.tar.gz
IVGCVSW-4595 Change FileOnlyProfilingConnection to all packet processor model
Change-Id: Ieccb26190d80e570ddef8d7c22e824eda1b92d7f Signed-off-by: Jim Flynn <jim.flynn@arm.com>
Diffstat (limited to 'src/profiling/test/FileOnlyProfilingDecoratorTests.cpp')
-rw-r--r--src/profiling/test/FileOnlyProfilingDecoratorTests.cpp26
1 files changed, 23 insertions, 3 deletions
diff --git a/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp b/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp
index 80236ae4eb..aa877a10e9 100644
--- a/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp
+++ b/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp
@@ -49,8 +49,11 @@ std::string UniqueFileName()
BOOST_AUTO_TEST_CASE(TestFileOnlyProfiling)
{
- // This test requires the CpuRef backend to be enabled
- if(!BackendRegistryInstance().IsBackendRegistered("CpuRef"))
+ // This test requires at least one backend registry to be enabled
+ // which can execute a NormalizationLayer
+ if (BackendRegistryInstance().IsBackendRegistered(GetComputeDeviceAsCString(armnn::Compute::CpuRef)) ||
+ BackendRegistryInstance().IsBackendRegistered(GetComputeDeviceAsCString(armnn::Compute::CpuAcc)) ||
+ BackendRegistryInstance().IsBackendRegistered(GetComputeDeviceAsCString(armnn::Compute::GpuAcc)))
{
return;
}
@@ -87,13 +90,30 @@ BOOST_AUTO_TEST_CASE(TestFileOnlyProfiling)
normalize->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
// optimize the network
- std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ std::vector<armnn::BackendId> backends =
+ { armnn::Compute::CpuRef, armnn::Compute::CpuAcc, armnn::Compute::GpuAcc };
IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime.GetDeviceSpec());
// Load it into the runtime. It should succeed.
armnn::NetworkId netId;
BOOST_TEST(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success);
+ // Creates structures for input & output.
+ std::vector<float> inputData(16);
+ std::vector<float> outputData(16);
+
+ InputTensors inputTensors
+ {
+ {0, ConstTensor(runtime.GetInputTensorInfo(netId, 0), inputData.data())}
+ };
+ OutputTensors outputTensors
+ {
+ {0, Tensor(runtime.GetOutputTensorInfo(netId, 0), outputData.data())}
+ };
+
+ // Does the inference.
+ runtime.EnqueueWorkload(netId, inputTensors, outputTensors);
+
static_cast<TestTimelinePacketHandler*>(localPacketHandlerPtr.get())->WaitOnInferenceCompletion(3000);
}