aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavide Grohmann <davide.grohmann@arm.com>2022-04-07 15:02:12 +0200
committerDavide Grohmann <davide.grohmann@arm.com>2022-04-12 12:55:48 +0200
commit37fd8a38af2e2a2a9d532d72a00a99f41769f87b (patch)
tree6b153799d7fa57ea0e677884297ce380ae58abab
parent3a3d6fa1b7fd848637353694cc9922b4fc724cde (diff)
downloadethos-u-core-software-37fd8a38af2e2a2a9d532d72a00a99f41769f87b.tar.gz
Add inference parsing to inference process library
Change-Id: If90ebe17cc33b8fef9e6a4ebae43a2e5749d8a37
-rw-r--r--applications/inference_process/include/inference_parser.hpp167
1 files changed, 167 insertions, 0 deletions
diff --git a/applications/inference_process/include/inference_parser.hpp b/applications/inference_process/include/inference_parser.hpp
new file mode 100644
index 0000000..3d90818
--- /dev/null
+++ b/applications/inference_process/include/inference_parser.hpp
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the License); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "tensorflow/lite/schema/schema_generated.h"
+
+#include <stdlib.h>
+#include <string>
+
+namespace InferenceProcess {
+
+template <typename T, typename U>
+class Array {
+public:
+ Array() = delete;
+ Array(T *const data, U &size, size_t capacity) : _data{data}, _size{size}, _capacity{capacity} {}
+
+ auto size() const {
+ return _size;
+ }
+
+ auto capacity() const {
+ return _capacity;
+ }
+
+ void push_back(const T &data) {
+ _data[_size++] = data;
+ }
+
+private:
+ T *const _data;
+ U &_size;
+ const size_t _capacity{};
+};
+
+template <typename T, typename U>
+Array<T, U> makeArray(T *const data, U &size, size_t capacity) {
+ return Array<T, U>{data, size, capacity};
+}
+
+class InferenceParser {
+public:
+ template <typename T, typename U, size_t S>
+ bool parseModel(const void *buffer, char (&description)[S], T &&ifmDims, U &&ofmDims) {
+ // Create model handle
+ const tflite::Model *model = tflite::GetModel(buffer);
+ if (model->subgraphs() == nullptr) {
+ printf("Warning: nullptr subgraph\n");
+ return true;
+ }
+
+ strncpy(description, model->description()->c_str(), sizeof(description));
+
+ // Get input dimensions for first subgraph
+ auto *subgraph = *model->subgraphs()->begin();
+ bool failed = getSubGraphDims(subgraph, subgraph->inputs(), ifmDims);
+ if (failed) {
+ return true;
+ }
+
+ // Get output dimensions for last subgraph
+ subgraph = *model->subgraphs()->rbegin();
+ failed = getSubGraphDims(subgraph, subgraph->outputs(), ofmDims);
+ if (failed) {
+ return true;
+ }
+
+ return false;
+ }
+
+private:
+ bool getShapeSize(const flatbuffers::Vector<int32_t> *shape, size_t &size) {
+ size = 1;
+
+ if (shape == nullptr) {
+ printf("Warning: nullptr shape size.\n");
+ return true;
+ }
+
+ if (shape->Length() == 0) {
+ printf("Warning: shape zero length.\n");
+ return true;
+ }
+
+ for (auto it = shape->begin(); it != shape->end(); ++it) {
+ size *= *it;
+ }
+
+ return false;
+ }
+
+ bool getTensorTypeSize(const enum tflite::TensorType type, size_t &size) {
+ switch (type) {
+ case tflite::TensorType::TensorType_UINT8:
+ case tflite::TensorType::TensorType_INT8:
+ size = 1;
+ break;
+ case tflite::TensorType::TensorType_INT16:
+ size = 2;
+ break;
+ case tflite::TensorType::TensorType_INT32:
+ case tflite::TensorType::TensorType_FLOAT32:
+ size = 4;
+ break;
+ default:
+ printf("Warning: Unsupported tensor type\n");
+ return true;
+ }
+
+ return false;
+ }
+
+ template <typename T>
+ bool getSubGraphDims(const tflite::SubGraph *subgraph, const flatbuffers::Vector<int32_t> *tensorMap, T &dims) {
+ if (subgraph == nullptr || tensorMap == nullptr) {
+ printf("Warning: nullptr subgraph or tensormap.\n");
+ return true;
+ }
+
+ if ((dims.capacity() - dims.size()) < tensorMap->size()) {
+ printf("Warning: tensormap size is larger than dimension capacity.\n");
+ return true;
+ }
+
+ for (auto index = tensorMap->begin(); index != tensorMap->end(); ++index) {
+ auto tensor = subgraph->tensors()->Get(*index);
+ size_t size;
+ size_t tensorSize;
+
+ bool failed = getShapeSize(tensor->shape(), size);
+ if (failed) {
+ return true;
+ }
+
+ failed = getTensorTypeSize(tensor->type(), tensorSize);
+ if (failed) {
+ return true;
+ }
+
+ size *= tensorSize;
+
+ if (size > 0) {
+ dims.push_back(size);
+ }
+ }
+
+ return false;
+ }
+};
+
+} // namespace InferenceProcess