diff options
author | David Monahan <david.monahan@arm.com> | 2020-11-18 14:40:27 +0000 |
---|---|---|
committer | Francis Murtagh <francis.murtagh@arm.com> | 2020-11-18 17:14:50 +0000 |
commit | 1670b0c047ab56c0b3b68088a3c53f38a91355b4 (patch) | |
tree | 3205725db5950af9884807e113f4a147c882d855 /delegate/src/test/ReshapeTest.cpp | |
parent | 23969e8b538ce09489b108fb9efdde9af7f97a3f (diff) | |
download | armnn-1670b0c047ab56c0b3b68088a3c53f38a91355b4.tar.gz |
IVGCVSW-5397 TfLiteDelegate: Implement the redefine operators
* Adding Reshape definition to ArmNN TfLite Delegate
* Added Reshape tests and RedefineTestHelper
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Signed-off-by: David Monahan <david.monahan@arm.com>
Change-Id: I6d3909689c820387ac0fd4fd3f7ab856ebc25f47
Diffstat (limited to 'delegate/src/test/ReshapeTest.cpp')
-rw-r--r-- | delegate/src/test/ReshapeTest.cpp | 449 |
1 files changed, 449 insertions, 0 deletions
diff --git a/delegate/src/test/ReshapeTest.cpp b/delegate/src/test/ReshapeTest.cpp new file mode 100644 index 0000000000..715fed6279 --- /dev/null +++ b/delegate/src/test/ReshapeTest.cpp @@ -0,0 +1,449 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "RedefineTestHelper.hpp" + +#include <armnn_delegate.hpp> + +#include <flatbuffers/flatbuffers.h> +#include <tensorflow/lite/schema/schema_generated.h> + +#include <doctest/doctest.h> + +namespace armnnDelegate +{ + +void ReshapeSimpleTest(std::vector<armnn::BackendId>& backends, bool useOption = true) +{ + // Set input data + std::vector<int32_t> inputShape { 1, 3, 4, 1 }; + std::vector<int32_t> outputShape { 1, 3, 2, 2 }; + std::vector<int32_t> targetShape { 1, 3, 2, 2 }; + + std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f, + 8.0f, 12.0f, -15.0f, 2.0f, + 3.0f, -4.0f, -1.0f, -11.0f }; + + std::vector<float> expectedOutputValues = { -5.0f, 8.0f, -10.0f, 7.0f, + 8.0f, 12.0f, -15.0f, 2.0f, + 3.0f, -4.0f, -1.0f, -11.0f }; + + RedefineTest<float>(tflite::BuiltinOperator_RESHAPE, + ::tflite::TensorType_FLOAT32, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + targetShape, + useOption); +} + +void ReshapeReduceDimTest(std::vector<armnn::BackendId>& backends, bool useOption = true) +{ + // Set input data + std::vector<int32_t> inputShape { 1, 3, 4, 1 }; + std::vector<int32_t> outputShape { 1, 4, 3 }; + std::vector<int32_t> targetShape { 1, 4, 3 }; + + std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f, + 8.0f, 12.0f, -15.0f, 2.0f, + 3.0f, -4.0f, -1.0f, -11.0f }; + + std::vector<float> expectedOutputValues = { -5.0f, 8.0f, -10.0f, 7.0f, + 8.0f, 12.0f, -15.0f, 2.0f, + 3.0f, -4.0f, -1.0f, -11.0f }; + + RedefineTest<float>(tflite::BuiltinOperator_RESHAPE, + ::tflite::TensorType_FLOAT32, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + targetShape, + useOption); +} + +void ReshapeFlattenTest(std::vector<armnn::BackendId>& backends, bool useOption = true) +{ + // Set input data + std::vector<int32_t> inputShape { 1, 3, 4, 1 }; + std::vector<int32_t> outputShape { 6, 2 }; + std::vector<int32_t> targetShape { -1, 2 }; + + std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f, + 8.0f, 12.0f, -15.0f, 2.0f, + 3.0f, -4.0f, -1.0f, -11.0f }; + + std::vector<float> expectedOutputValues = { -5.0f, 8.0f, -10.0f, 7.0f, + 8.0f, 12.0f, -15.0f, 2.0f, + 3.0f, -4.0f, -1.0f, -11.0f }; + + RedefineTest<float>(tflite::BuiltinOperator_RESHAPE, + ::tflite::TensorType_FLOAT32, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + targetShape, + useOption); +} + +void ReshapeFlattenAllTest(std::vector<armnn::BackendId>& backends, bool useOption = true) +{ + // Set input data + std::vector<int32_t> inputShape { 1, 3, 4, 1 }; + std::vector<int32_t> outputShape { 12 }; + std::vector<int32_t> targetShape { -1 }; + + std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f, + 8.0f, 12.0f, -15.0f, 2.0f, + 3.0f, -4.0f, -1.0f, -11.0f }; + + std::vector<float> expectedOutputValues = { -5.0f, 8.0f, -10.0f, 7.0f, + 8.0f, 12.0f, -15.0f, 2.0f, + 3.0f, -4.0f, -1.0f, -11.0f }; + + RedefineTest<float>(tflite::BuiltinOperator_RESHAPE, + ::tflite::TensorType_FLOAT32, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + targetShape, + useOption); +} + +void ReshapeInt8Test(std::vector<armnn::BackendId>& backends, bool useOption = true) +{ + // Set input data + std::vector<int32_t> inputShape { 1, 3, 4, 1 }; + std::vector<int32_t> outputShape { 6, 2 }; + std::vector<int32_t> targetShape { -1, 2 }; + + std::vector<int8_t> inputValues = { -5, 8, -10, 7, + 8, 12, -15, 2, + 3, -4, -1, -11 }; + + std::vector<int8_t> expectedOutputValues = { -5, 8, -10, 7, + 8, 12, -15, 2, + 3, -4, -1, -11 }; + + RedefineTest<int8_t>(tflite::BuiltinOperator_RESHAPE, + ::tflite::TensorType_INT8, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + targetShape, + useOption, + 2.5f, + 1); +} + +void ReshapeUint8Test(std::vector<armnn::BackendId>& backends, bool useOption = true) +{ + // Set input data + std::vector<int32_t> inputShape { 1, 3, 4, 1 }; + std::vector<int32_t> outputShape { 6, 2 }; + std::vector<int32_t> targetShape { -1, 2 }; + + std::vector<uint8_t> inputValues = { 5, 8, 10, 7, + 8, 12, 15, 2, + 3, 4, 1, 11 }; + + std::vector<uint8_t> expectedOutputValues = { 5, 8, 10, 7, + 8, 12, 15, 2, + 3, 4, 1, 11 }; + + RedefineTest<uint8_t>(tflite::BuiltinOperator_RESHAPE, + ::tflite::TensorType_UINT8, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + targetShape, + useOption, + 2.5f, + 1); +} + +void ReshapeInt16Test(std::vector<armnn::BackendId>& backends, bool useOption = true) +{ + // Set input data + std::vector<int32_t> inputShape { 1, 3, 4, 1 }; + std::vector<int32_t> outputShape { 6, 2 }; + std::vector<int32_t> targetShape { -1, 2 }; + + std::vector<int16_t> inputValues = { -5, 8, -10, 7, + 8, 12, -15, 2, + 3, -4, -1, -11 }; + + std::vector<int16_t> expectedOutputValues = { -5, 8, -10, 7, + 8, 12, -15, 2, + 3, -4, -1, -11 }; + + RedefineTest<int16_t>(tflite::BuiltinOperator_RESHAPE, + ::tflite::TensorType_INT16, + backends, + inputShape, + outputShape, + inputValues, + expectedOutputValues, + targetShape, + useOption, + 2.5f, + 0); +} + +TEST_SUITE("Reshape_GpuAccTests") +{ + +TEST_CASE ("Reshape_Simple_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc }; + ReshapeSimpleTest(backends); +} + +TEST_CASE ("Reshape_ReduceDimension_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc }; + ReshapeReduceDimTest(backends); +} + +TEST_CASE ("Reshape_Flatten_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc }; + ReshapeFlattenTest(backends); +} + +TEST_CASE ("Reshape_FlattenAll_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc }; + ReshapeFlattenAllTest(backends); +} + +TEST_CASE ("Reshape_Int8_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc }; + ReshapeInt8Test(backends); +} + +TEST_CASE ("Reshape_Uint8_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc }; + ReshapeUint8Test(backends); +} + +TEST_CASE ("Reshape_Simple_ShapeTensor_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc }; + ReshapeSimpleTest(backends, false); +} + +TEST_CASE ("Reshape_ReduceDimension_ShapeTensor_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc }; + ReshapeReduceDimTest(backends, false); +} + +TEST_CASE ("Reshape_Flatten_ShapeTensor_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc }; + ReshapeFlattenTest(backends, false); +} + +TEST_CASE ("Reshape_FlattenAll_ShapeTensor_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc }; + ReshapeFlattenAllTest(backends, false); +} + +TEST_CASE ("Reshape_Int8_ShapeTensor_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc }; + ReshapeInt8Test(backends, false); +} + +TEST_CASE ("Reshape_Uint8_ShapeTensor_GpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc }; + ReshapeUint8Test(backends, false); +} + +} // TEST_SUITE("Reshape_GpuAccTests") + +TEST_SUITE("Reshape_CpuAccTests") +{ + +TEST_CASE ("Reshape_Simple_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + ReshapeSimpleTest(backends); +} + +TEST_CASE ("Reshape_ReduceDimension_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + ReshapeReduceDimTest(backends); +} + +TEST_CASE ("Reshape_Flatten_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + ReshapeFlattenTest(backends); +} + +TEST_CASE ("Reshape_FlattenAll_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + ReshapeFlattenAllTest(backends); +} + +TEST_CASE ("Reshape_Int8_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + ReshapeInt8Test(backends); +} + +TEST_CASE ("Reshape_Uint8_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + ReshapeUint8Test(backends); +} + +TEST_CASE ("Reshape_Simple_ShapeTensor_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + ReshapeSimpleTest(backends, false); +} + +TEST_CASE ("Reshape_ReduceDimension_ShapeTensor_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + ReshapeReduceDimTest(backends, false); +} + +TEST_CASE ("Reshape_Flatten_ShapeTensor_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + ReshapeFlattenTest(backends, false); +} + +TEST_CASE ("Reshape_FlattenAll_ShapeTensor_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + ReshapeFlattenAllTest(backends, false); +} + +TEST_CASE ("Reshape_Int8_ShapeTensor_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + ReshapeInt8Test(backends, false); +} + +TEST_CASE ("Reshape_Uint8_ShapeTensor_CpuAcc_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc }; + ReshapeUint8Test(backends, false); +} + +} // TEST_SUITE("Reshape_CpuAccTests") + +TEST_SUITE("Reshape_CpuRefTests") +{ + +TEST_CASE ("Reshape_Simple_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ReshapeSimpleTest(backends); +} + +TEST_CASE ("Reshape_ReduceDimension_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ReshapeReduceDimTest(backends); +} + +TEST_CASE ("Reshape_Flatten_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ReshapeFlattenTest(backends); +} + +TEST_CASE ("Reshape_FlattenAll_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ReshapeFlattenAllTest(backends); +} + +TEST_CASE ("Reshape_Int8_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ReshapeInt8Test(backends); +} + +TEST_CASE ("Reshape_Uint8_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ReshapeUint8Test(backends); +} + +TEST_CASE ("Reshape_Int16_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ReshapeInt16Test(backends); +} + +TEST_CASE ("Reshape_Simple_ShapeTensor_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ReshapeSimpleTest(backends, false); +} + +TEST_CASE ("Reshape_ReduceDimension_ShapeTensor_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ReshapeReduceDimTest(backends, false); +} + +TEST_CASE ("Reshape_Flatten_ShapeTensor_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ReshapeFlattenTest(backends, false); +} + +TEST_CASE ("Reshape_FlattenAll_ShapeTensor_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ReshapeFlattenAllTest(backends, false); +} + +TEST_CASE ("Reshape_Int8_ShapeTensor_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ReshapeInt8Test(backends, false); +} + +TEST_CASE ("Reshape_Uint8_ShapeTensor_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ReshapeUint8Test(backends, false); +} + +TEST_CASE ("Reshape_Int16_ShapeTensor_CpuRef_Test") +{ + std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef }; + ReshapeInt16Test(backends, false); +} + +} // TEST_SUITE("Reshape_CpuRefTests") + +} // namespace armnnDelegate
\ No newline at end of file |