From 01d0281404183c84d26e863502cac8d83044c0bf Mon Sep 17 00:00:00 2001 From: Jim Flynn Date: Wed, 29 Apr 2020 21:12:13 +0100 Subject: IVGCVSW-4595 Change FileOnlyProfilingConnection to all packet processor model Change-Id: Ieccb26190d80e570ddef8d7c22e824eda1b92d7f Signed-off-by: Jim Flynn --- .../test/FileOnlyProfilingDecoratorTests.cpp | 26 +++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) (limited to 'src/profiling/test/FileOnlyProfilingDecoratorTests.cpp') diff --git a/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp b/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp index 80236ae4eb..aa877a10e9 100644 --- a/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp +++ b/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp @@ -49,8 +49,11 @@ std::string UniqueFileName() BOOST_AUTO_TEST_CASE(TestFileOnlyProfiling) { - // This test requires the CpuRef backend to be enabled - if(!BackendRegistryInstance().IsBackendRegistered("CpuRef")) + // This test requires at least one backend registry to be enabled + // which can execute a NormalizationLayer + if (BackendRegistryInstance().IsBackendRegistered(GetComputeDeviceAsCString(armnn::Compute::CpuRef)) || + BackendRegistryInstance().IsBackendRegistered(GetComputeDeviceAsCString(armnn::Compute::CpuAcc)) || + BackendRegistryInstance().IsBackendRegistered(GetComputeDeviceAsCString(armnn::Compute::GpuAcc))) { return; } @@ -87,13 +90,30 @@ BOOST_AUTO_TEST_CASE(TestFileOnlyProfiling) normalize->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32)); // optimize the network - std::vector backends = { armnn::Compute::CpuRef }; + std::vector backends = + { armnn::Compute::CpuRef, armnn::Compute::CpuAcc, armnn::Compute::GpuAcc }; IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime.GetDeviceSpec()); // Load it into the runtime. It should succeed. armnn::NetworkId netId; BOOST_TEST(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success); + // Creates structures for input & output. + std::vector inputData(16); + std::vector outputData(16); + + InputTensors inputTensors + { + {0, ConstTensor(runtime.GetInputTensorInfo(netId, 0), inputData.data())} + }; + OutputTensors outputTensors + { + {0, Tensor(runtime.GetOutputTensorInfo(netId, 0), outputData.data())} + }; + + // Does the inference. + runtime.EnqueueWorkload(netId, inputTensors, outputTensors); + static_cast(localPacketHandlerPtr.get())->WaitOnInferenceCompletion(3000); } -- cgit v1.2.1