From 32ca144fc8b4f0a1e2eda274da55ffd0a6016c02 Mon Sep 17 00:00:00 2001 From: Sadik Armagan Date: Fri, 13 Nov 2020 17:51:56 +0000 Subject: IVGCVSW-5338 TfLiteDelegate: Implement the Convolution operators * Add Convolution, DepthwiseConvolution and TransposeConvolution Signed-off-by: Kevin May Signed-off-by: Sadik Armagan Change-Id: I797e42844dfee0cc80beb64eabc3111b96320daf --- delegate/src/test/Convolution2dTest.cpp | 428 ++++++++++++++++++++++++++++++++ 1 file changed, 428 insertions(+) create mode 100644 delegate/src/test/Convolution2dTest.cpp (limited to 'delegate/src/test/Convolution2dTest.cpp') diff --git a/delegate/src/test/Convolution2dTest.cpp b/delegate/src/test/Convolution2dTest.cpp new file mode 100644 index 0000000000..4e9377a24d --- /dev/null +++ b/delegate/src/test/Convolution2dTest.cpp @@ -0,0 +1,428 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ConvolutionTestHelper.hpp" + +#include + +#include +#include +#include +#include +#include +#include + +#include + +namespace armnnDelegate +{ + +void Conv2DWithBiasesFp32Test(std::vector& backends) +{ + // Set input data + std::vector inputShape { 1, 5, 5, 1 }; + std::vector filterShape { 1, 3, 3, 1 }; + std::vector biasShape { 1 }; + std::vector outputShape { 1, 3, 3, 1 }; + + static std::vector inputValues = + { + 1, 5, 2, 3, 5, + 8, 7, 3, 6, 3, + 3, 3, 9, 1, 9, + 4, 1, 8, 1, 3, + 6, 8, 1, 9, 2 + }; + + std::vector filterValues = + { + 4, 5, 6, + 0, 0, 0, + 3, 2, 1 + }; + + std::vector biasValues = { 0 }; + + std::vector expectedOutputValues = + { + 23, 33, 24, + 91, 99, 48, + 26, 50, 19 + }; + + tflite::Padding padding = tflite::Padding_SAME; + + ConvolutionTest(tflite::BuiltinOperator_CONV_2D, + ::tflite::TensorType_FLOAT32, + 2, // strideX + 2, // strideY + 1, // dilationX + 1, // dilationY + padding, + tflite::ActivationFunctionType_NONE, + backends, + inputShape, + filterShape, + outputShape, + inputValues, + filterValues, + expectedOutputValues, + biasShape, + biasValues); +} + +void Conv2DWithBiasesUint8Test(std::vector& backends) +{ + // Set input data + std::vector inputShape { 1, 2, 2, 1 }; + std::vector filterShape { 1, 2, 2, 1 }; + std::vector biasShape { 1 }; + std::vector outputShape { 1, 2, 2, 1 }; + + static std::vector inputValues = { 1, 2, 3, 4 }; + + std::vector filterValues = { 2, 1, 0, 6 }; + + std::vector biasValues = { 10 }; + + std::vector expectedOutputValues = + { + (1 * 2 + 2 * 1 + 3 * 0 + 4 * 6 + 10) / 2, // 19 + (2 * 2 + 0 * 1 + 4 * 0 + 0 * 6 + 10) / 2, // 7 + (3 * 2 + 4 * 1 + 0 * 0 + 0 * 6 + 10) / 2, // 10 + (4 * 2 + 0 * 1 + 0 * 0 + 0 * 6 + 10) / 2, // 9 + }; + + tflite::Padding padding = tflite::Padding_SAME; + + ConvolutionTest(tflite::BuiltinOperator_CONV_2D, + ::tflite::TensorType_UINT8, + 1, // strideX + 1, // strideY + 1, // dilationX + 1, // dilationY + padding, + tflite::ActivationFunctionType_NONE, + backends, + inputShape, + filterShape, + outputShape, + inputValues, + filterValues, + expectedOutputValues, + biasShape, + biasValues); +} + +void Conv2DWithBiasesReluUint8Test(std::vector& backends) +{ + // Set input data + std::vector inputShape { 1, 2, 2, 1 }; + std::vector filterShape { 1, 2, 2, 1 }; + std::vector biasShape { 1 }; + std::vector outputShape { 1, 2, 2, 1 }; + + static std::vector inputValues = { 1, 2, 4, 8 }; + + std::vector filterValues = { 2, 1, 0, 6 }; + + std::vector biasValues = { 16 }; + + // factors to consider: + // - the filter zero point is non zero, hence the (x-fz) + // - the output scale is 2 hence the /2 + // - output zero point is non zero, hence the +outZero + // - RELU cuts negative values and then we add the output zero point + uint8_t bias = 16; + uint8_t outZero = 20; + uint8_t fz = 4; // filter zero point + + std::vector expectedOutputValues = + { + std::max(outZero, static_cast((1*(2-fz) + 2*(1-fz) + 4*(0-fz) + 8*(6-fz) + bias)/2 + outZero)), + std::max(outZero, static_cast((2*(2-fz) + 0*(1-fz) + 8*(0-fz) + 0*(6-fz) + bias)/2 + outZero)), + std::max(outZero, static_cast((4*(2-fz) + 8*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero)), + std::max(outZero, static_cast((8*(2-fz) + 0*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero)) + }; + + tflite::Padding padding = tflite::Padding_SAME; + + ConvolutionTest(tflite::BuiltinOperator_CONV_2D, + ::tflite::TensorType_UINT8, + 1, // strideX + 1, // strideY + 1, // dilationX + 1, // dilationY + padding, + tflite::ActivationFunctionType_RELU, + backends, + inputShape, + filterShape, + outputShape, + inputValues, + filterValues, + expectedOutputValues, + biasShape, + biasValues, + 1, // filter scale + 4, // filter offset + 2, // output scale + 20); // output offset +} + +void Conv2DWithBiasesRelu6Uint8Test(std::vector& backends) +{ + // Set input data + std::vector inputShape { 1, 2, 2, 1 }; + std::vector filterShape { 1, 2, 2, 1 }; + std::vector biasShape { 1 }; + std::vector outputShape { 1, 2, 2, 1 }; + + static std::vector inputValues = { 1, 2, 4, 1 }; + + std::vector filterValues = { 2, 1, 0, 6 }; + + std::vector biasValues = { 0 }; + + // factors to consider: + // - the output scale is 2 hence the /2 + // - RELU6 cuts output values at +6 + uint8_t relu6Min = 6 / 2; // divide by output scale + + std::vector expectedOutputValues = + { + std::min(relu6Min, static_cast((1 * 2 + 2 * 1 + 4 * 0 + 1 * 6) / 2)), + std::min(relu6Min, static_cast((2 * 2 + 0 * 1 + 1 * 0 + 0 * 6) / 2)), + std::min(relu6Min, static_cast((4 * 2 + 1 * 1 + 0 * 0 + 0 * 6) / 2)), + std::min(relu6Min, static_cast((1 * 2 + 0 * 1 + 0 * 0 + 0 * 6) / 2)) + }; + + tflite::Padding padding = tflite::Padding_SAME; + + ConvolutionTest(tflite::BuiltinOperator_CONV_2D, + ::tflite::TensorType_UINT8, + 1, // strideX + 1, // strideY + 1, // dilationX + 1, // dilationY + padding, + tflite::ActivationFunctionType_RELU6, + backends, + inputShape, + filterShape, + outputShape, + inputValues, + filterValues, + expectedOutputValues, + biasShape, + biasValues); +} + +TEST_SUITE("Convolution2dTest_CpuRef") +{ + +TEST_CASE ("Conv2DWithBiases_Fp32_CpuRef_Test") +{ + std::vector backends = {armnn::Compute::CpuRef}; + Conv2DWithBiasesFp32Test(backends); +} + +TEST_CASE ("Conv2DWithBiases_Uint8_CpuRef_Test") +{ + std::vector backends = {armnn::Compute::CpuRef}; + Conv2DWithBiasesUint8Test(backends); +} + +TEST_CASE ("Conv2DWithBiases_Relu_Uint8_CpuRef_Test") +{ + std::vector backends = {armnn::Compute::CpuRef}; + Conv2DWithBiasesReluUint8Test(backends); +} + +TEST_CASE ("Conv2DWithBiases_Relu6_Uint8_CpuRef_Test") +{ + std::vector backends = {armnn::Compute::CpuRef}; + Conv2DWithBiasesRelu6Uint8Test(backends); +} + +} //End of TEST_SUITE("Convolution2dTest_CpuRef") + +TEST_SUITE("Convolution2dTest_CpuAcc") +{ + +TEST_CASE ("Conv2DWithBiases_Fp32_CpuAcc_Test") +{ +std::vector backends = {armnn::Compute::CpuAcc}; +Conv2DWithBiasesFp32Test(backends); +} + +TEST_CASE ("Conv2DWithBiases_Uint8_CpuAcc_Test") +{ +std::vector backends = {armnn::Compute::CpuAcc}; +Conv2DWithBiasesUint8Test(backends); +} + +TEST_CASE ("Conv2DWithBiases_Relu_Uint8_CpuAcc_Test") +{ +std::vector backends = {armnn::Compute::CpuAcc}; +Conv2DWithBiasesReluUint8Test(backends); +} + +TEST_CASE ("Conv2DWithBiases_Relu6Uint8_CpuAcc_Test") +{ +std::vector backends = {armnn::Compute::CpuAcc}; +Conv2DWithBiasesRelu6Uint8Test(backends); +} + +} //End of TEST_SUITE("Convolution2dTest_CpuAcc") + +TEST_SUITE("Convolution2dTest_GpuAcc") +{ + +TEST_CASE ("Conv2DWithBiases_Fp32_GpuAcc_Test") +{ +std::vector backends = {armnn::Compute::GpuAcc}; +Conv2DWithBiasesFp32Test(backends); +} + +TEST_CASE ("Conv2DWithBiases_Uint8_GpuAcc_Test") +{ +std::vector backends = {armnn::Compute::GpuAcc}; +Conv2DWithBiasesUint8Test(backends); +} + +TEST_CASE ("Conv2DWithBiases_Relu_Uint8_GpuAcc_Test") +{ +std::vector backends = {armnn::Compute::GpuAcc}; +Conv2DWithBiasesReluUint8Test(backends); +} + +TEST_CASE ("Conv2DWithBiases_Relu_Uint8_GpuAcc_Test") +{ +std::vector backends = {armnn::Compute::GpuAcc}; +Conv2DWithBiasesRelu6Uint8Test(backends); +} + +} //End of TEST_SUITE("Convolution2dTest_GpuAcc") + +void TransposeConvUint8Test(std::vector& backends) +{ + // Set input data + std::vector transposeTensorShape { 4 }; + std::vector filterShape { 1, 2, 2, 1 }; + std::vector inputShape { 1, 2, 2, 1 }; + std::vector outputShape { 1, 3, 3, 1 }; + + std::vector transposeData = { 1, 3, 3, 1 }; + static std::vector inputValues = { 1, 2, 3, 4 }; + std::vector filterValues = { 0, 1, 2, 4 }; + std::vector expectedOutputValues = + { + 0, 1, 2, + 2, 11, 12, + 6, 20, 16 + }; + + tflite::Padding padding = tflite::Padding_VALID; + TransposeConvTest(backends, + ::tflite::TensorType_UINT8, + 1, // strideX + 1, // strideY + padding, + transposeTensorShape, + filterShape, + inputShape, + outputShape, + transposeData, + filterValues, + inputValues, + expectedOutputValues); +} + +void TransposeConvFp32Test(std::vector& backends) +{ + std::vector transposeTensorShape { 4 }; + std::vector filterShape { 1, 2, 2, 1 }; + std::vector inputShape { 1, 2, 2, 1 }; + std::vector outputShape { 1, 3, 3, 1 }; + + std::vector transposeData = { 1, 3, 3, 1 }; + static std::vector inputValues = { 1, 2, 3, 4 }; + std::vector filterValues = { 0, 1, 2, 4 }; + std::vector expectedOutputValues = + { + 0, 1, 2, + 2, 11, 12, + 6, 20, 16 + }; + + tflite::Padding padding = tflite::Padding_VALID; + TransposeConvTest(backends, + ::tflite::TensorType_FLOAT32, + 1, // strideX + 1, // strideY + padding, + transposeTensorShape, + filterShape, + inputShape, + outputShape, + transposeData, + filterValues, + inputValues, + expectedOutputValues); +} + +TEST_SUITE("TransposeConv_CpuRef_Test") +{ + +TEST_CASE ("TransposeConv_Fp32_Test") +{ + std::vector backends = {armnn::Compute::CpuRef}; + TransposeConvFp32Test(backends); +} + +TEST_CASE ("TransposeConv_Uint8_Test") +{ + std::vector backends = {armnn::Compute::CpuRef}; + TransposeConvUint8Test(backends); +} + +} // End of TEST_SUITE(TransposeConv_CpuRef_Test) + +TEST_SUITE("TransposeConv_CpuAcc_Test") +{ + +TEST_CASE ("TransposeConv_Fp32_Test") +{ +std::vector backends = {armnn::Compute::CpuAcc}; +TransposeConvFp32Test(backends); +} + +TEST_CASE ("TransposeConv_Uint8_Test") +{ +std::vector backends = {armnn::Compute::CpuAcc}; +TransposeConvUint8Test(backends); +} + +} // End of TEST_SUITE(TransposeConv_CpuAcc_Test) + +TEST_SUITE("TransposeConv_GpuAcc_Test") +{ + +TEST_CASE ("TransposeConv_Fp32_Test") +{ +std::vector backends = {armnn::Compute::GpuAcc}; +TransposeConvFp32Test(backends); +} + +TEST_CASE ("TransposeConv_Uint8_Test") +{ +std::vector backends = {armnn::Compute::GpuAcc}; +TransposeConvUint8Test(backends); +} + +} // End of TEST_SUITE(TransposeConv_GpuAcc_Test) + +} // namespace armnnDelegate \ No newline at end of file -- cgit v1.2.1