diff options
Diffstat (limited to 'source/application/tensorflow-lite-micro/include')
3 files changed, 305 insertions, 0 deletions
diff --git a/source/application/tensorflow-lite-micro/include/BufAttributes.hpp b/source/application/tensorflow-lite-micro/include/BufAttributes.hpp new file mode 100644 index 0000000..126172b --- /dev/null +++ b/source/application/tensorflow-lite-micro/include/BufAttributes.hpp @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef BUF_ATTRIBUTES_HPP +#define BUF_ATTRIBUTES_HPP + +#ifdef __has_attribute +#define HAVE_ATTRIBUTE(x) __has_attribute(x) +#else /* __has_attribute */ +#define HAVE_ATTRIBUTE(x) 0 +#endif /* __has_attribute */ + +#if HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) + +/* We want all buffers/sections to be aligned to 16 byte. */ +#define ALIGNMENT_REQ aligned(16) + +/* Model data section name. */ +#define MODEL_SECTION section("nn_model") + +/* Label section name */ +#define LABEL_SECTION section("labels") + +#ifndef ACTIVATION_BUF_SZ + #warning "ACTIVATION_BUF_SZ needs to be defined. Using default value" + #define ACTIVATION_BUF_SZ 0x00200000 +#endif /* ACTIVATION_BUF_SZ */ + +#ifndef ACTIVATION_BUF_SRAM_SZ + #warning "ACTIVATION_BUF_SRAM_SZ needs to be defined. Using default value = 0" + #define ACTIVATION_BUF_SRAM_SZ 0x00000000 +#endif /* ACTIVATION_BUF_SRAM_SZ */ + +/** + * Activation buffer aka tensor arena section name + * We have to place the tensor arena in different region based on its size. + * If it fits in SRAM, we place it there, and also mark it by giving it a + * different section name. The scatter file places the ZI data in DDR and + * the uninitialised region in the SRAM. + **/ +#define ACTIVATION_BUF_SECTION_SRAM section(".bss.NoInit.activation_buf") +#define ACTIVATION_BUF_SECTION_DRAM section("activation_buf") + +#if ACTIVATION_BUF_SZ > ACTIVATION_BUF_SRAM_SZ /* Will buffer not fit in SRAM? */ + #define ACTIVATION_BUF_SECTION ACTIVATION_BUF_SECTION_DRAM + #define ACTIVATION_BUF_SECTION_NAME ("DDR") +#else /* ACTIVATION_BUF_SZ > 0x00200000 */ + #define ACTIVATION_BUF_SECTION ACTIVATION_BUF_SECTION_SRAM + #define ACTIVATION_BUF_SECTION_NAME ("SRAM") +#endif /* ACTIVATION_BUF_SZ > 0x00200000 */ + +/* IFM section name. */ +#define IFM_BUF_SECTION section("ifm") + +/* Form the attributes, alignment is mandatory. */ +#define MAKE_ATTRIBUTE(x) __attribute__((ALIGNMENT_REQ, x)) +#define MODEL_TFLITE_ATTRIBUTE MAKE_ATTRIBUTE(MODEL_SECTION) +#define ACTIVATION_BUF_ATTRIBUTE MAKE_ATTRIBUTE(ACTIVATION_BUF_SECTION) +#define IFM_BUF_ATTRIBUTE MAKE_ATTRIBUTE(IFM_BUF_SECTION) +#define LABELS_ATTRIBUTE MAKE_ATTRIBUTE(LABEL_SECTION) + +#else /* HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) */ + +#define MODEL_TFLITE_ATTRIBUTE +#define ACTIVATION_BUF_ATTRIBUTE +#define IFM_BUF_ATTRIBUTE +#define LABELS_ATTRIBUTE + +#endif /* HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) */ + +#endif /* BUF_ATTRIBUTES_HPP */
\ No newline at end of file diff --git a/source/application/tensorflow-lite-micro/include/Model.hpp b/source/application/tensorflow-lite-micro/include/Model.hpp new file mode 100644 index 0000000..70cf9ca --- /dev/null +++ b/source/application/tensorflow-lite-micro/include/Model.hpp @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MODEL_HPP +#define MODEL_HPP + +#include "TensorFlowLiteMicro.hpp" +#include "BufAttributes.hpp" + +#include <cstdint> + +namespace arm { +namespace app { + + /** + * @brief NN model class wrapping the underlying TensorFlow-Lite-Micro API. + */ + class Model { + public: + /** @brief Constructor. */ + Model(); + + /** @brief Destructor. */ + ~Model(); + + /** @brief Gets the pointer to the model's input tensor at given input index. */ + TfLiteTensor* GetInputTensor(size_t index) const; + + /** @brief Gets the pointer to the model's output tensor at given output index. */ + TfLiteTensor* GetOutputTensor(size_t index) const; + + /** @brief Gets the model's data type. */ + TfLiteType GetType() const; + + /** @brief Gets the pointer to the model's input shape. */ + TfLiteIntArray* GetInputShape(size_t index) const; + + /** @brief Gets the pointer to the model's output shape at given output index. */ + TfLiteIntArray* GetOutputShape(size_t index) const; + + /** @brief Gets the number of input tensors the model has. */ + size_t GetNumInputs() const; + + /** @brief Gets the number of output tensors the model has. */ + size_t GetNumOutputs() const; + + /** @brief Logs the tensor information to stdout. */ + void LogTensorInfo(TfLiteTensor* tensor); + + /** @brief Logs the interpreter information to stdout. */ + void LogInterpreterInfo(); + + /** @brief Initialise the model class object. + * @param[in] allocator Optional: a pre-initialised micro allocator pointer, + * if available. If supplied, this allocator will be used + * to create the interpreter instance. + * @return true if initialisation succeeds, false otherwise. + **/ + bool Init(tflite::MicroAllocator* allocator = nullptr); + + /** + * @brief Gets the allocator pointer for this instance. + * @return Pointer to a tflite::MicroAllocator object, if + * available; nullptr otherwise. + **/ + tflite::MicroAllocator* GetAllocator(); + + /** @brief Checks if this object has been initialised. */ + bool IsInited() const; + + /** @brief Checks if the model uses signed data. */ + bool IsDataSigned() const; + + /** @brief Runs the inference (invokes the interpreter). */ + bool RunInference(); + + /** @brief Model information handler common to all models. + * @return true or false based on execution success. + **/ + bool ShowModelInfoHandler(); + + /** @brief Gets a pointer to the tensor arena. */ + uint8_t* GetTensorArena(); + + protected: + /** @brief Gets the pointer to the NN model data array. + * @return Pointer of uint8_t type. + **/ + virtual const uint8_t* ModelPointer() = 0; + + /** @brief Gets the model size. + * @return size_t, size in bytes. + **/ + virtual size_t ModelSize() = 0; + + /** + * @brief Gets the op resolver for the model instance. + * @return const reference to a tflite::MicroOpResolver object. + **/ + virtual const tflite::MicroOpResolver& GetOpResolver() = 0; + + /** + * @brief Add all the operators required for the given model. + * Implementation of this should come from the use case. + * @return true is ops are successfully added, false otherwise. + **/ + virtual bool EnlistOperations() = 0; + + /** @brief Gets the total size of tensor arena available for use. */ + size_t GetActivationBufferSize(); + + private: + tflite::MicroErrorReporter _m_uErrorReporter; /* Error reporter object. */ + tflite::ErrorReporter* _m_pErrorReporter = nullptr; /* Pointer to the error reporter. */ + const tflite::Model* _m_pModel = nullptr; /* Tflite model pointer. */ + tflite::MicroInterpreter* _m_pInterpreter = nullptr; /* Tflite interpreter. */ + tflite::MicroAllocator* _m_pAllocator = nullptr; /* Tflite micro allocator. */ + bool _m_inited = false; /* Indicates whether this object has been initialised. */ + + std::vector<TfLiteTensor*> _m_input = {}; /* Model's input tensor pointers. */ + std::vector<TfLiteTensor*> _m_output = {}; /* Model's output tensor pointers. */ + TfLiteType _m_type = kTfLiteNoType;/* Model's data type. */ + + }; + +} /* namespace app */ +} /* namespace arm */ + +#endif /* MODEL_HPP */ diff --git a/source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp b/source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp new file mode 100644 index 0000000..677b4ba --- /dev/null +++ b/source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef TENSORFLOW_LITE_MICRO_LOCAL_HPP +#define TENSORFLOW_LITE_MICRO_LOCAL_HPP + +/* We include all our TensorFlow Lite Micro headers here */ + +/** + * TensorFlow Lite Micro sources can generate a lot of warnings from the usage + * of a single macro (TF_LITE_REMOVE_VIRTUAL_DELETE). Suppress the known ones + * here to prevent them from masking warnings that might be generated by our + * application sources. + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wunused-parameter" + #include "tensorflow/lite/micro/micro_mutable_op_resolver.h" + #include "tensorflow/lite/micro/micro_interpreter.h" + #include "tensorflow/lite/micro/micro_error_reporter.h" + #include "tensorflow/lite/micro/all_ops_resolver.h" + #pragma clang diagnostic pop +#elif defined(__GNUC__) + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wunused-parameter" + #include "tensorflow/lite/micro/micro_mutable_op_resolver.h" + #include "tensorflow/lite/micro/micro_interpreter.h" + #include "tensorflow/lite/micro/micro_error_reporter.h" + #include "tensorflow/lite/micro/all_ops_resolver.h" + #pragma GCC diagnostic pop +#else + #include "tensorflow/lite/micro/micro_mutable_op_resolver.h" + #include "tensorflow/lite/micro/micro_interpreter.h" + #include "tensorflow/lite/micro/micro_error_reporter.h" + #include "tensorflow/lite/micro/all_ops_resolver.h" +#endif + +#include "tensorflow/lite/c/common.h" +#include "tensorflow/lite/micro/kernels/micro_ops.h" +#include "tensorflow/lite/schema/schema_generated.h" +#include "tensorflow/lite/version.h" + +#if defined (TESTS) + #include "tensorflow/lite/micro/test_helpers.h" +#endif /* defined (TESTS) */ + +namespace arm { +namespace app { + + struct QuantParams { + float scale = 1.0; + int offset = 0; + }; + + QuantParams GetTensorQuantParams(TfLiteTensor* tensor); + +} /* namespace app */ +} /* namespace arm */ + +/** + * @brief Prints the tensor flow version in use to stdout. + */ +void PrintTensorFlowVersion(); + +#endif /* TENSORFLOW_LITE_MICRO_LOCAL_HPP */ |