From eff204aa3ae75277b0cf689eed0e2073ff644ef8 Mon Sep 17 00:00:00 2001 From: Colm Donelan Date: Tue, 28 Nov 2023 15:46:09 +0000 Subject: IVGCVSW-7675 Rework DelegateUnitTests so backends are subcases. The intent of this change is to remove the per backend test cases in the delegate unit tests. They will be replaced by using DocTest SUBCASES. The sub cases are paramaterized by the available backends. The list of available backends are determined by the compilation flags. Signed-off-by: Colm Donelan Change-Id: Ia377c7a7399d0e30dc287d7217b3e3b52e1ea074 --- delegate/test/ConvolutionTestHelper.hpp | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'delegate/test/ConvolutionTestHelper.hpp') diff --git a/delegate/test/ConvolutionTestHelper.hpp b/delegate/test/ConvolutionTestHelper.hpp index bb8852eaa5..f651ad5e7e 100644 --- a/delegate/test/ConvolutionTestHelper.hpp +++ b/delegate/test/ConvolutionTestHelper.hpp @@ -201,7 +201,6 @@ void ConvolutionTest(tflite::BuiltinOperator convolutionOperatorCode, uint32_t dilationY, tflite::Padding padding, tflite::ActivationFunctionType fused_activation_function, - std::vector& backends, std::vector& inputShape, std::vector& filterShape, std::vector& outputShape, @@ -219,8 +218,8 @@ void ConvolutionTest(tflite::BuiltinOperator convolutionOperatorCode, float quantScale = 1.0f, int quantOffset = 0, int32_t depth_multiplier = 1, - int32_t filterQuantizationDim = 3) - + int32_t filterQuantizationDim = 3, + const std::vector& backends = {}) { using namespace delegateTestInterpreter; @@ -259,7 +258,7 @@ void ConvolutionTest(tflite::BuiltinOperator convolutionOperatorCode, std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); // Setup interpreter with Arm NN Delegate applied. - auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends)); CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); CHECK(armnnInterpreter.Invoke() == kTfLiteOk); @@ -437,7 +436,6 @@ void Convolution3dTest(tflite::BuiltinOperator convolutionOperatorCode, std::vector dilation, tflite::Padding padding, tflite::ActivationFunctionType fused_activation_function, - std::vector& backends, std::vector& inputShape, std::vector& filterShape, std::vector& outputShape, @@ -455,7 +453,8 @@ void Convolution3dTest(tflite::BuiltinOperator convolutionOperatorCode, float quantScale = 1.0f, int quantOffset = 0, int32_t depth_multiplier = 1, - int32_t filterQuantizationDim = 3) + int32_t filterQuantizationDim = 3, + const std::vector& backends = {}) { using namespace delegateTestInterpreter; @@ -492,7 +491,7 @@ void Convolution3dTest(tflite::BuiltinOperator convolutionOperatorCode, std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); // Setup interpreter with Arm NN Delegate applied. - auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends)); CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk); CHECK(armnnInterpreter.Invoke() == kTfLiteOk); @@ -631,8 +630,7 @@ std::vector CreateTransposeConvTfLiteModel(tflite::TensorType tensorType, } template -void TransposeConvTest(std::vector& backends, - tflite::TensorType tensorType, +void TransposeConvTest(tflite::TensorType tensorType, uint32_t strideX, uint32_t strideY, tflite::Padding padding, @@ -649,7 +647,8 @@ void TransposeConvTest(std::vector& backends, float outputQuantScale = 1.0f, int outputQuantOffset = 0, float quantScale = 1.0f, - int quantOffset = 0) + int quantOffset = 0, + const std::vector& backends = {}) { using namespace delegateTestInterpreter; @@ -681,7 +680,7 @@ void TransposeConvTest(std::vector& backends, std::vector tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0); // Setup interpreter with Arm NN Delegate applied. - auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends); + auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, CaptureAvailableBackends(backends)); CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk); CHECK(armnnInterpreter.FillInputTensor(inputValues, 2) == kTfLiteOk); CHECK(armnnInterpreter.Invoke() == kTfLiteOk); -- cgit v1.2.1