diff options
Diffstat (limited to 'src/backends/cl')
-rw-r--r-- | src/backends/cl/test/ClEndToEndTests.cpp | 10 | ||||
-rw-r--r-- | src/backends/cl/test/ClImportTensorHandleTests.cpp | 35 |
2 files changed, 35 insertions, 10 deletions
diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp index fa6e027865..f28679cc96 100644 --- a/src/backends/cl/test/ClEndToEndTests.cpp +++ b/src/backends/cl/test/ClEndToEndTests.cpp @@ -514,12 +514,18 @@ TEST_CASE("ClQLstmEndToEndTest") QLstmEndToEnd(clDefaultBackends); } -TEST_CASE("ClForceImportWithMisalignedInputBuffersEndToEndTest") +TEST_CASE("ClForceImportWithMisalignedInputBuffersEndToEndTest" + // Currently, the CL workload for activation does not support tensor handle replacement so this test case + // will always fail. + * doctest::skip(true)) { ForceImportWithMisalignedInputBuffersEndToEndTest(clDefaultBackends); } -TEST_CASE("ClForceImportWithMisalignedOutputBuffersEndToEndTest") +TEST_CASE("ClForceImportWithMisalignedOutputBuffersEndToEndTest" + // Currently, the CL workload for activation does not support tensor handle replacement so this test case + // will always fail. + * doctest::skip(true)) { ForceImportWithMisalignedOutputBuffersEndToEndTest(clDefaultBackends); } diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp index 9a075d2b7d..1198cade61 100644 --- a/src/backends/cl/test/ClImportTensorHandleTests.cpp +++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp @@ -397,11 +397,14 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConv2dEndToEnd") INFO("Run ImportInputs"); std::vector<ImportedInputId> importedInputIds = runtime->ImportInputs(netId, inputTensors, MemorySource::Malloc); + // We expect the import to have succeeded. + CHECK(importedInputIds.size() == 1); std::vector<ImportedOutputId> importedOutputIds = runtime->ImportOutputs(netId, outputTensors, MemorySource::Malloc); - + // We expect the import to have succeeded. + CHECK(importedOutputIds.size() == 1); // Do the inference - runtime->EnqueueWorkload(netId, inputTensors, outputTensors, importedInputIds, importedOutputIds); + runtime->EnqueueWorkload(netId, InputTensors(), OutputTensors(), importedInputIds, importedOutputIds); // Retrieve the Profiler.Print() output to get the workload execution ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance(); @@ -536,11 +539,15 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp16toFp32EndToE INFO("Run ImportInputs"); std::vector<ImportedInputId> importedInputIds = runtime->ImportInputs(netId, inputTensors, MemorySource::Malloc); + // We expect the import to have succeeded. + CHECK(importedInputIds.size() == 1); std::vector<ImportedOutputId> importedOutputIds = runtime->ImportOutputs(netId, outputTensors, MemorySource::Malloc); + // We expect the import to have succeeded. + CHECK(importedOutputIds.size() == 1); // Do the inference - runtime->EnqueueWorkload(netId, inputTensors, outputTensors, importedInputIds, importedOutputIds); + runtime->EnqueueWorkload(netId, InputTensors(), OutputTensors(), importedInputIds, importedOutputIds); // Retrieve the Profiler.Print() output to get the workload execution ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance(); @@ -680,11 +687,15 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportConvertFp32toFp16EndToE INFO("Run ImportInputs"); std::vector<ImportedInputId> importedInputIds = runtime->ImportInputs(netId, inputTensors, MemorySource::Malloc); + // We expect the import to have succeeded. + CHECK(importedInputIds.size() == 1); std::vector<ImportedOutputId> importedOutputIds = runtime->ImportOutputs(netId, outputTensors, MemorySource::Malloc); + // We expect the import to have succeeded. + CHECK(importedOutputIds.size() == 1); // Do the inference - runtime->EnqueueWorkload(netId, inputTensors, outputTensors, importedInputIds, importedOutputIds); + runtime->EnqueueWorkload(netId, InputTensors(), OutputTensors(), importedInputIds, importedOutputIds); // Retrieve the Profiler.Print() output to get the workload execution ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance(); @@ -798,11 +809,13 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportSimpleConvertFp32toFp16 INFO("Run ImportInputs"); std::vector<ImportedInputId> importedInputIds = runtime->ImportInputs(netId, inputTensors, MemorySource::Malloc); + CHECK(importedInputIds.size() == 1); std::vector<ImportedOutputId> importedOutputIds = runtime->ImportOutputs(netId, outputTensors, MemorySource::Malloc); + CHECK(importedOutputIds.size() == 1); // Do the inference - runtime->EnqueueWorkload(netId, inputTensors, outputTensors, importedInputIds, importedOutputIds); + runtime->EnqueueWorkload(netId, InputTensors(), OutputTensors(), importedInputIds, importedOutputIds); // Retrieve the Profiler.Print() output to get the workload execution ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance(); @@ -838,7 +851,7 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesEndTo /* * This is a test to check the functionality of the Forced Import functionality when using repeated inferences that * require switching from importing to copy. For the first inference we create aligned Pointers and check they are - * imported correctly. For the second we use similar pointers but don't use PreImporting to force fall back to copy. + * imported correctly. For the second we use similar pointers but don't use PreImporting. */ // Create runtime in which test will run IRuntime::CreationOptions options; @@ -959,11 +972,15 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesEndTo INFO("Run ImportInputs"); std::vector<ImportedInputId> importedInputIds = runtime->ImportInputs(netId, inputTensors, MemorySource::Malloc); + // We expect the import to have succeeded. + CHECK(importedInputIds.size() == 1); std::vector<ImportedOutputId> importedOutputIds = runtime->ImportOutputs(netId, outputTensors, MemorySource::Malloc); + // We expect the import to have succeeded. + CHECK(importedOutputIds.size() == 1); // Do the inference - runtime->EnqueueWorkload(netId, inputTensors, outputTensors, importedInputIds, importedOutputIds); + runtime->EnqueueWorkload(netId, InputTensors(), OutputTensors(), importedInputIds, importedOutputIds); // Retrieve the Profiler.AnalyzeEventsAndWriteResults() output to get the workload execution ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance(); @@ -1246,11 +1263,13 @@ TEST_CASE_FIXTURE(ClContextControlFixture, "ClForceImportRepeatedInferencesInver INFO("Run ImportInputs"); std::vector<ImportedInputId> importedInputIds = runtime->ImportInputs(netId, inputTensorsImport, MemorySource::Malloc); + CHECK(importedInputIds.size() == 1); std::vector<ImportedOutputId> importedOutputIds = runtime->ImportOutputs(netId, outputTensorsImport, MemorySource::Malloc); + CHECK(importedOutputIds.size() == 1); // Do the inference with pre-imported inputs/outputs - runtime->EnqueueWorkload(netId, inputTensorsImport, outputTensorsImport, importedInputIds, importedOutputIds); + runtime->EnqueueWorkload(netId, InputTensors(), OutputTensors(), importedInputIds, importedOutputIds); // Sync the outputs so we can read the data arm_compute::CLScheduler::get().sync(); |