aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
diff options
context:
space:
mode:
authorColm Donelan <colm.donelan@arm.com>2022-07-06 12:09:05 +0100
committerNikhil Raj <nikhil.raj@arm.com>2022-07-27 15:56:33 +0100
commitd7ceec59ce45f690deba2c0d452ec91fabbdadf9 (patch)
treeddea3d4092d1d9df21a751bf8cf1750c746ad644 /src/backends/backendsCommon/test/EndToEndTestImpl.hpp
parent9d9dd223ba9fbc509ea8ff1c211d3c63943a5989 (diff)
downloadarmnn-d7ceec59ce45f690deba2c0d452ec91fabbdadf9.tar.gz
IVGCVSW-6896 Fix pre-import when using sync execute.
* Refactor backend capability checks in LoadedNetwork. * ImportInputs should check the number of tensors does not exceed the number of inputs. * In EnqueueWorkload the check for for the count of input tensors was ignoring pre-imported inputs. * Added checks to verify ImportInputs/ImportOutputs worked as expected in EndToEndTestImpl. * Improve documentation on ImportInputs/ImportOutputs in IRuntime.hpp. * Disabled import tests in CL and Neon EndToEndTests that cannot work. Signed-off-by: Colm Donelan <colm.donelan@arm.com> Change-Id: Iae4b2644a1c9f01ee72bce1afb211661cc9ae2e3
Diffstat (limited to 'src/backends/backendsCommon/test/EndToEndTestImpl.hpp')
-rw-r--r--src/backends/backendsCommon/test/EndToEndTestImpl.hpp36
1 files changed, 30 insertions, 6 deletions
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index cc5aa23ca3..44ae2beb76 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -887,10 +887,12 @@ inline void ForceImportWithAlignedBuffersEndToEndTest(std::vector<BackendId> bac
runtime->GetProfiler(netId)->EnableProfiling(true);
std::vector<ImportedInputId> importedInputIds =
runtime->ImportInputs(netId, inputTensors, MemorySource::Malloc);
+ CHECK(importedInputIds.size() == 1);
std::vector<ImportedOutputId> importedOutputIds =
runtime->ImportOutputs(netId, outputTensors, MemorySource::Malloc);
+ CHECK(importedOutputIds.size() == 1);
// Do the inference and force the import as the memory is aligned.
- runtime->EnqueueWorkload(netId, inputTensors, outputTensors, importedInputIds, importedOutputIds);
+ runtime->EnqueueWorkload(netId, InputTensors(), OutputTensors(), importedInputIds, importedOutputIds);
// Retrieve the Profiler.Print() output to get the workload execution
ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
@@ -997,11 +999,14 @@ inline void ForceImportWithMisalignedInputBuffersEndToEndTest(std::vector<Backen
runtime->GetProfiler(netId)->EnableProfiling(true);
std::vector<ImportedInputId> importedInputIds =
runtime->ImportInputs(netId, inputTensors, MemorySource::Malloc);
+ // We expect the import to have failed.
+ CHECK(importedInputIds.size() == 0);
std::vector<ImportedOutputId> importedOutputIds =
runtime->ImportOutputs(netId, outputTensors, MemorySource::Malloc);
+ CHECK(importedOutputIds.size() == 1);
// Do the inference and force the import as the memory is misaligned.
- runtime->EnqueueWorkload(netId, inputTensors, outputTensors, importedInputIds, importedOutputIds);
+ runtime->EnqueueWorkload(netId, inputTensors, OutputTensors(), importedInputIds, importedOutputIds);
// Retrieve the Profiler.Print() output to get the workload execution
ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
@@ -1113,11 +1118,14 @@ inline void ForceImportWithMisalignedOutputBuffersEndToEndTest(std::vector<Backe
runtime->GetProfiler(netId)->EnableProfiling(true);
std::vector<ImportedInputId> importedInputIds =
runtime->ImportInputs(netId, inputTensors, MemorySource::Malloc);
+ CHECK(importedInputIds.size() == 1);
+ // We expect this to fail.
std::vector<ImportedOutputId> importedOutputIds =
runtime->ImportOutputs(netId, outputTensors, MemorySource::Malloc);
+ CHECK(importedOutputIds.size() == 0);
- // Do the inference and force the import as the memory is misaligned.
- runtime->EnqueueWorkload(netId, inputTensors, outputTensors, importedInputIds, importedOutputIds);
+ // Even if importing the output failed we still expect to be able to get it to work.
+ runtime->EnqueueWorkload(netId, InputTensors(), outputTensors, importedInputIds, importedOutputIds);
// Retrieve the Profiler.Print() output to get the workload execution
ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
@@ -1233,8 +1241,12 @@ inline void ForceImportWithMisalignedInputAndOutputBuffersEndToEndTest(std::vect
runtime->GetProfiler(netId)->EnableProfiling(true);
std::vector<ImportedInputId> importedInputIds =
runtime->ImportInputs(netId, inputTensors, MemorySource::Malloc);
+ // Import should have failed.
+ CHECK(importedInputIds.size() == 0);
std::vector<ImportedOutputId> importedOutputIds =
runtime->ImportOutputs(netId, outputTensors, MemorySource::Malloc);
+ // Import should have failed.
+ CHECK(importedOutputIds.size() == 0);
// Do the inference and force the import as the memory is misaligned.
runtime->EnqueueWorkload(netId, inputTensors, outputTensors, importedInputIds, importedOutputIds);
@@ -1339,10 +1351,12 @@ inline void ForceImportRepeatedInferencesEndToEndTest(std::vector<BackendId> bac
runtime->GetProfiler(netId)->EnableProfiling(true);
std::vector<ImportedInputId> importedInputIds =
runtime->ImportInputs(netId, inputTensors, MemorySource::Malloc);
+ CHECK(importedInputIds.size() == 1);
std::vector<ImportedOutputId> importedOutputIds =
runtime->ImportOutputs(netId, outputTensors, MemorySource::Malloc);
+ CHECK(importedOutputIds.size() == 1);
// Do the inference and force the import as the memory is aligned.
- runtime->EnqueueWorkload(netId, inputTensors, outputTensors, importedInputIds, importedOutputIds);
+ runtime->EnqueueWorkload(netId, InputTensors(), OutputTensors(), importedInputIds, importedOutputIds);
// Retrieve the Profiler.AnalyzeEventsAndWriteResults() output to get the workload execution
ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
@@ -1408,7 +1422,11 @@ inline void ForceImportRepeatedInferencesEndToEndTest(std::vector<BackendId> bac
{0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), misalignedOutputPtr)}
};
importedInputIds = runtime->ImportInputs(netId, inputTensorsMisaligned, MemorySource::Malloc);
+ // Import should fail.
+ CHECK(importedInputIds.size() == 0);
importedOutputIds = runtime->ImportOutputs(netId, outputTensorsMisaligned, MemorySource::Malloc);
+ // Import should fail.
+ CHECK(importedOutputIds.size() == 0);
// Do the inference and force the import as the memory is misaligned.
runtime->EnqueueWorkload(netId,
@@ -1527,8 +1545,12 @@ inline void ForceImportRepeatedInferencesInvertedEndToEndTest(std::vector<Backen
runtime->GetProfiler(netId)->EnableProfiling(true);
std::vector<ImportedInputId> importedInputIds =
runtime->ImportInputs(netId, inputTensorsMisaligned, MemorySource::Malloc);
+ // Import should fail.
+ CHECK(importedInputIds.size() == 0);
std::vector<ImportedOutputId> importedOutputIds =
runtime->ImportOutputs(netId, outputTensorsMisaligned, MemorySource::Malloc);
+ // Import should fail.
+ CHECK(importedOutputIds.size() == 0);
// Do the inference and force the import as the memory is misaligned.
runtime->EnqueueWorkload(netId,
@@ -1593,9 +1615,11 @@ inline void ForceImportRepeatedInferencesInvertedEndToEndTest(std::vector<Backen
};
importedInputIds = runtime->ImportInputs(netId, inputTensors, MemorySource::Malloc);
+ CHECK(importedInputIds.size() == 1);
importedOutputIds = runtime->ImportOutputs(netId, outputTensors, MemorySource::Malloc);
+ CHECK(importedOutputIds.size() == 1);
// Do the inference and force the import as the memory is aligned.
- runtime->EnqueueWorkload(netId, inputTensors, outputTensors, importedInputIds, importedOutputIds);
+ runtime->EnqueueWorkload(netId, InputTensors(), OutputTensors(), importedInputIds, importedOutputIds);
// Retrieve the Profiler.AnalyzeEventsAndWriteResults() output to get the workload execution
// We need to use AnalyzeEventsAndWriteResults here to make sure the second inference has been profiled