aboutsummaryrefslogtreecommitdiff
path: root/delegate/src/test/ActivationTestHelper.hpp
diff options
context:
space:
mode:
Diffstat (limited to 'delegate/src/test/ActivationTestHelper.hpp')
-rw-r--r--delegate/src/test/ActivationTestHelper.hpp130
1 files changed, 0 insertions, 130 deletions
diff --git a/delegate/src/test/ActivationTestHelper.hpp b/delegate/src/test/ActivationTestHelper.hpp
deleted file mode 100644
index 6475083da0..0000000000
--- a/delegate/src/test/ActivationTestHelper.hpp
+++ /dev/null
@@ -1,130 +0,0 @@
-//
-// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "TestUtils.hpp"
-
-#include <armnn_delegate.hpp>
-
-#include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <tensorflow/lite/schema/schema_generated.h>
-#include <tensorflow/lite/version.h>
-
-#include <doctest/doctest.h>
-
-namespace
-{
-
-std::vector<char> CreateActivationTfLiteModel(tflite::BuiltinOperator activationOperatorCode,
- tflite::TensorType tensorType,
- const std::vector <int32_t>& tensorShape)
-{
- using namespace tflite;
- flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
- std::array<flatbuffers::Offset<tflite::Buffer>, 1> buffers;
- buffers[0] = CreateBuffer(flatBufferBuilder);
-
- std::array<flatbuffers::Offset<Tensor>, 2> tensors;
- tensors[0] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()),
- tensorType);
- tensors[1] = CreateTensor(flatBufferBuilder,
- flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()),
- tensorType);
-
- // create operator
- const std::vector<int> operatorInputs{0};
- const std::vector<int> operatorOutputs{1};
- flatbuffers::Offset <Operator> unaryOperator =
- CreateOperator(flatBufferBuilder,
- 0,
- flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()));
-
- const std::vector<int> subgraphInputs{0};
- const std::vector<int> subgraphOutputs{1};
- flatbuffers::Offset <SubGraph> subgraph =
- CreateSubGraph(flatBufferBuilder,
- flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
- flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
- flatBufferBuilder.CreateVector(&unaryOperator, 1));
-
- flatbuffers::Offset <flatbuffers::String> modelDescription =
- flatBufferBuilder.CreateString("ArmnnDelegate: Activation Operator Model");
- flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, activationOperatorCode);
-
- flatbuffers::Offset <Model> flatbufferModel =
- CreateModel(flatBufferBuilder,
- TFLITE_SCHEMA_VERSION,
- flatBufferBuilder.CreateVector(&operatorCode, 1),
- flatBufferBuilder.CreateVector(&subgraph, 1),
- modelDescription,
- flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
- flatBufferBuilder.Finish(flatbufferModel);
-
- return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
- flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
-}
-
-void ActivationTest(tflite::BuiltinOperator activationOperatorCode,
- std::vector<armnn::BackendId>& backends,
- std::vector<float>& inputValues,
- std::vector<float>& expectedOutputValues)
-{
- using namespace tflite;
- std::vector<int32_t> inputShape { { 4, 1, 4} };
- std::vector<char> modelBuffer = CreateActivationTfLiteModel(activationOperatorCode,
- ::tflite::TensorType_FLOAT32,
- inputShape);
-
- const Model* tfLiteModel = GetModel(modelBuffer.data());
- // Create TfLite Interpreters
- std::unique_ptr<Interpreter> armnnDelegateInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&armnnDelegateInterpreter) == kTfLiteOk);
- CHECK(armnnDelegateInterpreter != nullptr);
- CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
- std::unique_ptr<Interpreter> tfLiteInterpreter;
- CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
- (&tfLiteInterpreter) == kTfLiteOk);
- CHECK(tfLiteInterpreter != nullptr);
- CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
- // Create the ArmNN Delegate
- armnnDelegate::DelegateOptions delegateOptions(backends);
- std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
- theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
- armnnDelegate::TfLiteArmnnDelegateDelete);
- CHECK(theArmnnDelegate != nullptr);
- // Modify armnnDelegateInterpreter to use armnnDelegate
- CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
- // Set input data
- armnnDelegate::FillInput<float>(tfLiteInterpreter, 0, inputValues);
- armnnDelegate::FillInput<float>(armnnDelegateInterpreter, 0, inputValues);
-
- // Run EnqueWorkload
- CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
- CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
- // Compare output data
- armnnDelegate::CompareOutputData<float>(tfLiteInterpreter,
- armnnDelegateInterpreter,
- inputShape,
- expectedOutputValues);
-
- tfLiteInterpreter.reset(nullptr);
- armnnDelegateInterpreter.reset(nullptr);
-}
-
-} // anonymous namespace \ No newline at end of file