summaryrefslogtreecommitdiff
path: root/source/application/tensorflow-lite-micro
diff options
context:
space:
mode:
authorKshitij Sisodia <kshitij.sisodia@arm.com>2022-05-06 09:13:03 +0100
committerKshitij Sisodia <kshitij.sisodia@arm.com>2022-05-06 17:11:41 +0100
commitaa4bcb14d0cbee910331545dd2fc086b58c37170 (patch)
treee67a43a43f61c6f8b6aad19018b0827baf7e31a6 /source/application/tensorflow-lite-micro
parentfcca863bafd5f33522bc14c23dde4540e264ec94 (diff)
downloadml-embedded-evaluation-kit-aa4bcb14d0cbee910331545dd2fc086b58c37170.tar.gz
MLECO-3183: Refactoring application sources
Platform agnostic application sources are moved into application api module with their own independent CMake projects. Changes for MLECO-3080 also included - they create CMake projects individial API's (again, platform agnostic) that dependent on the common logic. The API for KWS_API "joint" API has been removed and now the use case relies on individual KWS, and ASR API libraries. Change-Id: I1f7748dc767abb3904634a04e0991b74ac7b756d Signed-off-by: Kshitij Sisodia <kshitij.sisodia@arm.com>
Diffstat (limited to 'source/application/tensorflow-lite-micro')
-rw-r--r--source/application/tensorflow-lite-micro/Model.cc365
-rw-r--r--source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc46
-rw-r--r--source/application/tensorflow-lite-micro/include/BufAttributes.hpp72
-rw-r--r--source/application/tensorflow-lite-micro/include/Model.hpp144
-rw-r--r--source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp91
5 files changed, 0 insertions, 718 deletions
diff --git a/source/application/tensorflow-lite-micro/Model.cc b/source/application/tensorflow-lite-micro/Model.cc
deleted file mode 100644
index 22a1a4d..0000000
--- a/source/application/tensorflow-lite-micro/Model.cc
+++ /dev/null
@@ -1,365 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "Model.hpp"
-#include "log_macros.h"
-
-#include <cinttypes>
-
-/* Initialise the model */
-arm::app::Model::~Model()
-{
- delete this->m_pInterpreter;
- /**
- * No clean-up function available for allocator in TensorFlow Lite Micro yet.
- **/
-}
-
-arm::app::Model::Model() :
- m_inited (false),
- m_type(kTfLiteNoType)
-{
- this->m_pErrorReporter = tflite::GetMicroErrorReporter();
-}
-
-bool arm::app::Model::Init(tflite::MicroAllocator* allocator)
-{
- /* Following tf lite micro example:
- * Map the model into a usable data structure. This doesn't involve any
- * copying or parsing, it's a very lightweight operation. */
- const uint8_t* model_addr = ModelPointer();
- debug("loading model from @ 0x%p\n", model_addr);
- this->m_pModel = ::tflite::GetModel(model_addr);
-
- if (this->m_pModel->version() != TFLITE_SCHEMA_VERSION) {
- this->m_pErrorReporter->Report(
- "[ERROR] model's schema version %d is not equal "
- "to supported version %d.",
- this->m_pModel->version(), TFLITE_SCHEMA_VERSION);
- return false;
- }
-
- /* Pull in only the operation implementations we need.
- * This relies on a complete list of all the ops needed by this graph.
- * An easier approach is to just use the AllOpsResolver, but this will
- * incur some penalty in code space for op implementations that are not
- * needed by this graph.
- * static ::tflite::ops::micro::AllOpsResolver resolver; */
- /* NOLINTNEXTLINE(runtime-global-variables) */
- debug("loading op resolver\n");
-
- this->EnlistOperations();
-
-#if !defined(ARM_NPU)
- /* If it is not a NPU build check if the model contains a NPU operator */
- bool contains_ethosu_operator = this->ContainsEthosUOperator();
- if (contains_ethosu_operator)
- {
- printf_err("Ethos-U operator present in the model but this build does not include Ethos-U drivers\n");
- return false;
- }
-#endif /* ARM_NPU */
-
- /* Create allocator instance, if it doesn't exist */
- this->m_pAllocator = allocator;
- if (!this->m_pAllocator) {
- /* Create an allocator instance */
- info("Creating allocator using tensor arena in %s\n",
- ACTIVATION_BUF_SECTION_NAME);
-
- this->m_pAllocator = tflite::MicroAllocator::Create(
- this->GetTensorArena(),
- this->GetActivationBufferSize(),
- this->m_pErrorReporter);
-
- if (!this->m_pAllocator) {
- printf_err("Failed to create allocator\n");
- return false;
- }
- debug("Created new allocator @ 0x%p\n", this->m_pAllocator);
- } else {
- debug("Using existing allocator @ 0x%p\n", this->m_pAllocator);
- }
-
- this->m_pInterpreter = new ::tflite::MicroInterpreter(
- this->m_pModel, this->GetOpResolver(),
- this->m_pAllocator, this->m_pErrorReporter);
-
- if (!this->m_pInterpreter) {
- printf_err("Failed to allocate interpreter\n");
- return false;
- }
-
- /* Allocate memory from the tensor_arena for the model's tensors. */
- info("Allocating tensors\n");
- TfLiteStatus allocate_status = this->m_pInterpreter->AllocateTensors();
-
- if (allocate_status != kTfLiteOk) {
- printf_err("tensor allocation failed!\n");
- delete this->m_pInterpreter;
- return false;
- }
-
- /* Get information about the memory area to use for the model's input. */
- this->m_input.resize(this->GetNumInputs());
- for (size_t inIndex = 0; inIndex < this->GetNumInputs(); inIndex++)
- this->m_input[inIndex] = this->m_pInterpreter->input(inIndex);
-
- this->m_output.resize(this->GetNumOutputs());
- for (size_t outIndex = 0; outIndex < this->GetNumOutputs(); outIndex++)
- this->m_output[outIndex] = this->m_pInterpreter->output(outIndex);
-
- if (this->m_input.empty() || this->m_output.empty()) {
- printf_err("failed to get tensors\n");
- return false;
- } else {
- this->m_type = this->m_input[0]->type; /* Input 0 should be the main input */
-
- /* Clear the input & output tensors */
- for (size_t inIndex = 0; inIndex < this->GetNumInputs(); inIndex++) {
- std::memset(this->m_input[inIndex]->data.data, 0, this->m_input[inIndex]->bytes);
- }
- for (size_t outIndex = 0; outIndex < this->GetNumOutputs(); outIndex++) {
- std::memset(this->m_output[outIndex]->data.data, 0, this->m_output[outIndex]->bytes);
- }
-
- this->LogInterpreterInfo();
- }
-
- this->m_inited = true;
- return true;
-}
-
-tflite::MicroAllocator* arm::app::Model::GetAllocator()
-{
- if (this->IsInited()) {
- return this->m_pAllocator;
- }
- return nullptr;
-}
-
-void arm::app::Model::LogTensorInfo(TfLiteTensor* tensor)
-{
- if (!tensor) {
- printf_err("Invalid tensor\n");
- assert(tensor);
- return;
- }
-
- debug("\ttensor is assigned to 0x%p\n", tensor);
- info("\ttensor type is %s\n", TfLiteTypeGetName(tensor->type));
- info("\ttensor occupies %zu bytes with dimensions\n",
- tensor->bytes);
- for (int i = 0 ; i < tensor->dims->size; ++i) {
- info ("\t\t%d: %3d\n", i, tensor->dims->data[i]);
- }
-
- TfLiteQuantization quant = tensor->quantization;
- if (kTfLiteAffineQuantization == quant.type) {
- auto* quantParams = (TfLiteAffineQuantization*)quant.params;
- info("Quant dimension: %" PRIi32 "\n", quantParams->quantized_dimension);
- for (int i = 0; i < quantParams->scale->size; ++i) {
- info("Scale[%d] = %f\n", i, quantParams->scale->data[i]);
- }
- for (int i = 0; i < quantParams->zero_point->size; ++i) {
- info("ZeroPoint[%d] = %d\n", i, quantParams->zero_point->data[i]);
- }
- }
-}
-
-void arm::app::Model::LogInterpreterInfo()
-{
- if (!this->m_pInterpreter) {
- printf_err("Invalid interpreter\n");
- return;
- }
-
- info("Model INPUT tensors: \n");
- for (auto input : this->m_input) {
- this->LogTensorInfo(input);
- }
-
- info("Model OUTPUT tensors: \n");
- for (auto output : this->m_output) {
- this->LogTensorInfo(output);
- }
-
- info("Activation buffer (a.k.a tensor arena) size used: %zu\n",
- this->m_pInterpreter->arena_used_bytes());
-
- /* We expect there to be only one subgraph. */
- const uint32_t nOperators = tflite::NumSubgraphOperators(this->m_pModel, 0);
- info("Number of operators: %" PRIu32 "\n", nOperators);
-
- const tflite::SubGraph* subgraph = this->m_pModel->subgraphs()->Get(0);
-
- auto* opcodes = this->m_pModel->operator_codes();
-
- /* For each operator, display registration information. */
- for (size_t i = 0 ; i < nOperators; ++i) {
- const tflite::Operator* op = subgraph->operators()->Get(i);
- const tflite::OperatorCode* opcode = opcodes->Get(op->opcode_index());
- const TfLiteRegistration* reg = nullptr;
-
- tflite::GetRegistrationFromOpCode(opcode, this->GetOpResolver(),
- this->m_pErrorReporter, &reg);
- std::string opName;
-
- if (reg) {
- if (tflite::BuiltinOperator_CUSTOM == reg->builtin_code) {
- opName = std::string(reg->custom_name);
- } else {
- opName = std::string(EnumNameBuiltinOperator(
- tflite::BuiltinOperator(reg->builtin_code)));
- }
- }
- info("\tOperator %zu: %s\n", i, opName.c_str());
- }
-}
-
-bool arm::app::Model::IsInited() const
-{
- return this->m_inited;
-}
-
-bool arm::app::Model::IsDataSigned() const
-{
- return this->GetType() == kTfLiteInt8;
-}
-
-bool arm::app::Model::ContainsEthosUOperator() const
-{
- /* We expect there to be only one subgraph. */
- const uint32_t nOperators = tflite::NumSubgraphOperators(this->m_pModel, 0);
- const tflite::SubGraph* subgraph = this->m_pModel->subgraphs()->Get(0);
- const auto* opcodes = this->m_pModel->operator_codes();
-
- /* check for custom operators */
- for (size_t i = 0; (i < nOperators); ++i)
- {
- const tflite::Operator* op = subgraph->operators()->Get(i);
- const tflite::OperatorCode* opcode = opcodes->Get(op->opcode_index());
-
- auto builtin_code = tflite::GetBuiltinCode(opcode);
- if ((builtin_code == tflite::BuiltinOperator_CUSTOM) &&
- ( nullptr != opcode->custom_code()) &&
- ( "ethos-u" == std::string(opcode->custom_code()->c_str())))
- {
- return true;
- }
- }
- return false;
-}
-
-bool arm::app::Model::RunInference()
-{
- bool inference_state = false;
- if (this->m_pModel && this->m_pInterpreter) {
- if (kTfLiteOk != this->m_pInterpreter->Invoke()) {
- printf_err("Invoke failed.\n");
- } else {
- inference_state = true;
- }
- } else {
- printf_err("Error: No interpreter!\n");
- }
- return inference_state;
-}
-
-TfLiteTensor* arm::app::Model::GetInputTensor(size_t index) const
-{
- if (index < this->GetNumInputs()) {
- return this->m_input.at(index);
- }
- return nullptr;
-}
-
-TfLiteTensor* arm::app::Model::GetOutputTensor(size_t index) const
-{
- if (index < this->GetNumOutputs()) {
- return this->m_output.at(index);
- }
- return nullptr;
-}
-
-size_t arm::app::Model::GetNumInputs() const
-{
- if (this->m_pModel && this->m_pInterpreter) {
- return this->m_pInterpreter->inputs_size();
- }
- return 0;
-}
-
-size_t arm::app::Model::GetNumOutputs() const
-{
- if (this->m_pModel && this->m_pInterpreter) {
- return this->m_pInterpreter->outputs_size();
- }
- return 0;
-}
-
-
-TfLiteType arm::app::Model::GetType() const
-{
- return this->m_type;
-}
-
-TfLiteIntArray* arm::app::Model::GetInputShape(size_t index) const
-{
- if (index < this->GetNumInputs()) {
- return this->m_input.at(index)->dims;
- }
- return nullptr;
-}
-
-TfLiteIntArray* arm::app::Model::GetOutputShape(size_t index) const
-{
- if (index < this->GetNumOutputs()) {
- return this->m_output.at(index)->dims;
- }
- return nullptr;
-}
-
-bool arm::app::Model::ShowModelInfoHandler()
-{
- if (!this->IsInited()) {
- printf_err("Model is not initialised! Terminating processing.\n");
- return false;
- }
-
- PrintTensorFlowVersion();
- info("Model info:\n");
- this->LogInterpreterInfo();
-
- info("The model is optimised for Ethos-U NPU: %s.\n", this->ContainsEthosUOperator()? "yes": "no");
-
- return true;
-}
-namespace arm {
-namespace app {
- static uint8_t tensor_arena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE;
-} /* namespace app */
-} /* namespace arm */
-
-size_t arm::app::Model::GetActivationBufferSize()
-{
- return ACTIVATION_BUF_SZ;
-}
-
-uint8_t *arm::app::Model::GetTensorArena()
-{
- return tensor_arena;
-} \ No newline at end of file
diff --git a/source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc b/source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc
deleted file mode 100644
index 8738e5c..0000000
--- a/source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "TensorFlowLiteMicro.hpp"
-
-void PrintTensorFlowVersion()
-{}
-
-arm::app::QuantParams arm::app::GetTensorQuantParams(TfLiteTensor* tensor)
-{
- arm::app::QuantParams params;
- if (kTfLiteAffineQuantization == tensor->quantization.type) {
- auto* quantParams = (TfLiteAffineQuantization*) (tensor->quantization.params);
- if (quantParams && 0 == quantParams->quantized_dimension) {
- if (quantParams->scale->size) {
- params.scale = quantParams->scale->data[0];
- }
- if (quantParams->zero_point->size) {
- params.offset = quantParams->zero_point->data[0];
- }
- } else if (tensor->params.scale != 0.0) {
- /* Legacy tensorflow quantisation parameters */
- params.scale = tensor->params.scale;
- params.offset = tensor->params.zero_point;
- }
- }
- return params;
-}
-
-extern "C" void DebugLog(const char* s)
-{
- puts(s);
-}
diff --git a/source/application/tensorflow-lite-micro/include/BufAttributes.hpp b/source/application/tensorflow-lite-micro/include/BufAttributes.hpp
deleted file mode 100644
index 0146443..0000000
--- a/source/application/tensorflow-lite-micro/include/BufAttributes.hpp
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef BUF_ATTRIBUTES_HPP
-#define BUF_ATTRIBUTES_HPP
-
-#if defined(ARM_NPU)
- /* When Arm NPU is defined, we use the config set by NPU mem parameters */
- #include "ethosu_mem_config.h"
- #define BYTE_ALIGNMENT ETHOS_U_MEM_BYTE_ALIGNMENT
-#else /* defined(ARM_NPU) */
- /* otherwise, we use the default ones here. */
- #define ACTIVATION_BUF_SECTION section(".bss.NoInit.activation_buf_sram")
- #define ACTIVATION_BUF_SECTION_NAME ("SRAM")
- #define BYTE_ALIGNMENT 16
-#endif /* defined(ARM_NPU) */
-
-#ifdef __has_attribute
-#define HAVE_ATTRIBUTE(x) __has_attribute(x)
-#else /* __has_attribute */
-#define HAVE_ATTRIBUTE(x) 0
-#endif /* __has_attribute */
-
-#if HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__))
-
-/* We want all buffers/sections to be aligned to 16 byte. */
-#define ALIGNMENT_REQ aligned(BYTE_ALIGNMENT)
-
-#define MODEL_SECTION section("nn_model")
-
-/* Label section name */
-#define LABEL_SECTION section("labels")
-
-#ifndef ACTIVATION_BUF_SZ
- #warning "ACTIVATION_BUF_SZ needs to be defined. Using default value"
- #define ACTIVATION_BUF_SZ 0x00200000
-#endif /* ACTIVATION_BUF_SZ */
-
-/* IFM section name. */
-#define IFM_BUF_SECTION section("ifm")
-
-/* Form the attributes, alignment is mandatory. */
-#define MAKE_ATTRIBUTE(x) __attribute__((ALIGNMENT_REQ, x))
-#define MODEL_TFLITE_ATTRIBUTE MAKE_ATTRIBUTE(MODEL_SECTION)
-#define ACTIVATION_BUF_ATTRIBUTE MAKE_ATTRIBUTE(ACTIVATION_BUF_SECTION)
-#define IFM_BUF_ATTRIBUTE MAKE_ATTRIBUTE(IFM_BUF_SECTION)
-#define LABELS_ATTRIBUTE MAKE_ATTRIBUTE(LABEL_SECTION)
-
-#else /* HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) */
-
-#define MODEL_TFLITE_ATTRIBUTE
-#define ACTIVATION_BUF_ATTRIBUTE
-#define IFM_BUF_ATTRIBUTE
-#define LABELS_ATTRIBUTE
-
-#endif /* HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) */
-
-#endif /* BUF_ATTRIBUTES_HPP */
diff --git a/source/application/tensorflow-lite-micro/include/Model.hpp b/source/application/tensorflow-lite-micro/include/Model.hpp
deleted file mode 100644
index 151b680..0000000
--- a/source/application/tensorflow-lite-micro/include/Model.hpp
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef MODEL_HPP
-#define MODEL_HPP
-
-#include "TensorFlowLiteMicro.hpp"
-#include "BufAttributes.hpp"
-
-#include <cstdint>
-
-namespace arm {
-namespace app {
-
- /**
- * @brief NN model class wrapping the underlying TensorFlow-Lite-Micro API.
- */
- class Model {
- public:
- /** @brief Constructor. */
- Model();
-
- /** @brief Destructor. */
- ~Model();
-
- /** @brief Gets the pointer to the model's input tensor at given input index. */
- TfLiteTensor* GetInputTensor(size_t index) const;
-
- /** @brief Gets the pointer to the model's output tensor at given output index. */
- TfLiteTensor* GetOutputTensor(size_t index) const;
-
- /** @brief Gets the model's data type. */
- TfLiteType GetType() const;
-
- /** @brief Gets the pointer to the model's input shape. */
- TfLiteIntArray* GetInputShape(size_t index) const;
-
- /** @brief Gets the pointer to the model's output shape at given output index. */
- TfLiteIntArray* GetOutputShape(size_t index) const;
-
- /** @brief Gets the number of input tensors the model has. */
- size_t GetNumInputs() const;
-
- /** @brief Gets the number of output tensors the model has. */
- size_t GetNumOutputs() const;
-
- /** @brief Logs the tensor information to stdout. */
- void LogTensorInfo(TfLiteTensor* tensor);
-
- /** @brief Logs the interpreter information to stdout. */
- void LogInterpreterInfo();
-
- /** @brief Initialise the model class object.
- * @param[in] allocator Optional: a pre-initialised micro allocator pointer,
- * if available. If supplied, this allocator will be used
- * to create the interpreter instance.
- * @return true if initialisation succeeds, false otherwise.
- **/
- bool Init(tflite::MicroAllocator* allocator = nullptr);
-
- /**
- * @brief Gets the allocator pointer for this instance.
- * @return Pointer to a tflite::MicroAllocator object, if
- * available; nullptr otherwise.
- **/
- tflite::MicroAllocator* GetAllocator();
-
- /** @brief Checks if this object has been initialised. */
- bool IsInited() const;
-
- /** @brief Checks if the model uses signed data. */
- bool IsDataSigned() const;
-
- /** @brief Checks if the model uses Ethos-U operator */
- bool ContainsEthosUOperator() const;
-
- /** @brief Runs the inference (invokes the interpreter). */
- virtual bool RunInference();
-
- /** @brief Model information handler common to all models.
- * @return true or false based on execution success.
- **/
- bool ShowModelInfoHandler();
-
- /** @brief Gets a pointer to the tensor arena. */
- uint8_t* GetTensorArena();
-
- protected:
- /** @brief Gets the pointer to the NN model data array.
- * @return Pointer of uint8_t type.
- **/
- virtual const uint8_t* ModelPointer() = 0;
-
- /** @brief Gets the model size.
- * @return size_t, size in bytes.
- **/
- virtual size_t ModelSize() = 0;
-
- /**
- * @brief Gets the op resolver for the model instance.
- * @return const reference to a tflite::MicroOpResolver object.
- **/
- virtual const tflite::MicroOpResolver& GetOpResolver() = 0;
-
- /**
- * @brief Add all the operators required for the given model.
- * Implementation of this should come from the use case.
- * @return true is ops are successfully added, false otherwise.
- **/
- virtual bool EnlistOperations() = 0;
-
- /** @brief Gets the total size of tensor arena available for use. */
- size_t GetActivationBufferSize();
-
- private:
- tflite::ErrorReporter* m_pErrorReporter = nullptr; /* Pointer to the error reporter. */
- const tflite::Model* m_pModel = nullptr; /* Tflite model pointer. */
- tflite::MicroInterpreter* m_pInterpreter = nullptr; /* Tflite interpreter. */
- tflite::MicroAllocator* m_pAllocator = nullptr; /* Tflite micro allocator. */
- bool m_inited = false; /* Indicates whether this object has been initialised. */
-
- std::vector<TfLiteTensor*> m_input = {}; /* Model's input tensor pointers. */
- std::vector<TfLiteTensor*> m_output = {}; /* Model's output tensor pointers. */
- TfLiteType m_type = kTfLiteNoType;/* Model's data type. */
-
- };
-
-} /* namespace app */
-} /* namespace arm */
-
-#endif /* MODEL_HPP */
diff --git a/source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp b/source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp
deleted file mode 100644
index f6639fd..0000000
--- a/source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
- * SPDX-License-Identifier: Apache-2.0
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef TENSORFLOW_LITE_MICRO_LOCAL_HPP
-#define TENSORFLOW_LITE_MICRO_LOCAL_HPP
-
-/* We include all our TensorFlow Lite Micro headers here */
-
-/**
- * TensorFlow Lite Micro sources can generate a lot of warnings from the usage
- * of a single macro (TF_LITE_REMOVE_VIRTUAL_DELETE). Suppress the known ones
- * here to prevent them from masking warnings that might be generated by our
- * application sources.
- */
-#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
- #pragma clang diagnostic push
- #pragma clang diagnostic ignored "-Wunused-parameter"
- #include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
- #include "tensorflow/lite/micro/micro_interpreter.h"
- #include "tensorflow/lite/micro/micro_error_reporter.h"
- #include "tensorflow/lite/micro/all_ops_resolver.h"
- #pragma clang diagnostic pop
-#elif defined(__GNUC__)
- #pragma GCC diagnostic push
- #pragma GCC diagnostic ignored "-Wunused-parameter"
- #include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
- #include "tensorflow/lite/micro/micro_interpreter.h"
- #include "tensorflow/lite/micro/micro_error_reporter.h"
- #include "tensorflow/lite/micro/all_ops_resolver.h"
- #pragma GCC diagnostic pop
-#else
- #include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
- #include "tensorflow/lite/micro/micro_interpreter.h"
- #include "tensorflow/lite/micro/micro_error_reporter.h"
- #include "tensorflow/lite/micro/all_ops_resolver.h"
-#endif
-
-#include "tensorflow/lite/c/common.h"
-#include "tensorflow/lite/micro/kernels/micro_ops.h"
-#include "tensorflow/lite/schema/schema_generated.h"
-#include "tensorflow/lite/schema/schema_utils.h"
-
-#if defined (TESTS)
- #include "tensorflow/lite/micro/test_helpers.h"
-#endif /* defined (TESTS) */
-
-namespace arm {
-namespace app {
-
- /** Struct for quantization parameters. */
- struct QuantParams {
- float scale = 1.0;
- int offset = 0;
- };
-
- /**
- * @brief Gets the quantization parameters from a tensor
- * @param[in] tensor pointer to the tensor.
- * @return QuantParams object.
- */
- QuantParams GetTensorQuantParams(TfLiteTensor* tensor);
-
- /**
- * @brief String logging functionality expected to be defined
- * by TensorFlow Lite Micro's error reporter.
- * @param[in] s Pointer to the string.
- */
- extern "C" void DebugLog(const char* s);
-
-} /* namespace app */
-} /* namespace arm */
-
-/**
- * @brief Prints the tensor flow version in use to stdout.
- */
-void PrintTensorFlowVersion();
-
-#endif /* TENSORFLOW_LITE_MICRO_LOCAL_HPP */