diff options
author | Sadik Armagan <sadik.armagan@arm.com> | 2020-10-23 17:14:43 +0100 |
---|---|---|
committer | Jim Flynn <jim.flynn@arm.com> | 2020-10-27 13:51:58 +0000 |
commit | 62483bee640e7d8accf6ac77b24c6e9828841851 (patch) | |
tree | ba7025bc86819c3d787428dd16b5be73b90a4353 /delegate/src/test/SqrtTest.cpp | |
parent | 3d1323ff87fa92ff9cfc74097148b97fa1784416 (diff) | |
download | armnn-62483bee640e7d8accf6ac77b24c6e9828841851.tar.gz |
IVGCVSW-5366 'Add a do nothing SubGraph class'
IVGCVSW-5373 'Implement the ABS operator in the Delegate'
* Added a Switch statement into the VisitNode() function
* Separated the Visit functions into the categorized source files
* Implemented VisitElementwiseUnary() function
* Added tests for ABS and SQRT
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: If9654d0a8d8ff7dcd6fb5cbe0dc312941772affb
Diffstat (limited to 'delegate/src/test/SqrtTest.cpp')
-rw-r--r-- | delegate/src/test/SqrtTest.cpp | 97 |
1 files changed, 97 insertions, 0 deletions
diff --git a/delegate/src/test/SqrtTest.cpp b/delegate/src/test/SqrtTest.cpp new file mode 100644 index 0000000000..df3534dcdb --- /dev/null +++ b/delegate/src/test/SqrtTest.cpp @@ -0,0 +1,97 @@ +// +// Copyright © 2020 Arm Ltd and Contributors. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "ElementwiseUnaryTestHelper.hpp" + +#include <armnn_delegate.hpp> + +#include <flatbuffers/flatbuffers.h> +#include <tensorflow/lite/interpreter.h> +#include <tensorflow/lite/kernels/register.h> +#include <tensorflow/lite/model.h> +#include <tensorflow/lite/schema/schema_generated.h> +#include <tensorflow/lite/version.h> + +#include <doctest/doctest.h> + +namespace armnnDelegate +{ + +TEST_SUITE("SqrtTest") +{ + +TEST_CASE ("SqrtTestFloat32") +{ + using namespace tflite; + const std::vector<int32_t> inputShape { { 3, 1, 2} }; + std::vector<char> modelBuffer = CreateElementwiseUnaryTfLiteModel(BuiltinOperator_SQRT, + ::tflite::TensorType_FLOAT32, + inputShape); + + const Model* tfLiteModel = GetModel(modelBuffer.data()); + // Create TfLite Interpreters + std::unique_ptr<Interpreter> armnnDelegateInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&armnnDelegateInterpreter) == kTfLiteOk); + CHECK(armnnDelegateInterpreter != nullptr); + CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk); + + std::unique_ptr<Interpreter> tfLiteInterpreter; + CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver()) + (&tfLiteInterpreter) == kTfLiteOk); + CHECK(tfLiteInterpreter != nullptr); + CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk); + + // Create the ArmNN Delegate + auto delegateOptions = TfLiteArmnnDelegateOptionsDefault(); + auto armnnDelegate = TfLiteArmnnDelegateCreate(delegateOptions); + CHECK(armnnDelegate != nullptr); + // Modify armnnDelegateInterpreter to use armnnDelegate + CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(armnnDelegate) == kTfLiteOk); + + // Set input data + std::vector<float> inputValues + { + 9.0f, 4.25f, 81.9f, + 0.1f, 0.9f, 169.0f + }; + + auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0]; + auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId); + for (unsigned int i = 0; i < inputValues.size(); ++i) + { + tfLiteDelageInputData[i] = inputValues[i]; + } + + auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0]; + auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId); + for (unsigned int i = 0; i < inputValues.size(); ++i) + { + armnnDelegateInputData[i] = inputValues[i]; + } + + // Run EnqueWorkload + CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk); + CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk); + + // Compare output data + auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0]; + auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId); + auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0]; + auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId); + for (size_t i = 0; i < inputValues.size(); i++) + { + CHECK(std::sqrt(inputValues[i]) == armnnDelegateOutputData[i]); + CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]); + } + +} + +} + +} // namespace armnnDelegate + + + |