From 47406feb4b2aed29942d7e89cab882ea797dbf7e Mon Sep 17 00:00:00 2001 From: Kshitij Sisodia Date: Mon, 5 Dec 2022 17:18:50 +0000 Subject: MLECO-3710: Fix for freshly updated submodule build issue. TensorFlow Lite Micro can have stale third party components downloaded by an older version. On a submodule update, our recommendation to users would be to use `-DTENSORFLOW_LITE_MICRO_CLEAN_DOWNLOADS=ON` to clean the older downloads. Moving the triggered cleaning and refresh to CMake configuration stage (with no parallel build option). This should have a better chance of success with the subsequent build step. Change-Id: I305439c09658f49765ecc15eb1ce0c8b914dd30a --- scripts/cmake/tensorflow.cmake | 11 ++++- scripts/py/setup_hooks.py | 34 +++++++------- source/application/api/common/source/Model.cc | 67 ++++++++++++--------------- 3 files changed, 56 insertions(+), 56 deletions(-) diff --git a/scripts/cmake/tensorflow.cmake b/scripts/cmake/tensorflow.cmake index d807416..29d5bce 100644 --- a/scripts/cmake/tensorflow.cmake +++ b/scripts/cmake/tensorflow.cmake @@ -71,7 +71,16 @@ else() endif() if (TENSORFLOW_LITE_MICRO_CLEAN_DOWNLOADS) - list(APPEND MAKE_TARGETS_LIST "clean_downloads") + message(STATUS "Refreshing TensorFlow Lite Micro's third party downloads...") + execute_process( + COMMAND make -f ${TENSORFLOW_LITE_MICRO_PATH}/tools/make/Makefile clean_downloads third_party_downloads + RESULT_VARIABLE return_code + WORKING_DIRECTORY ${TENSORFLOW_SRC_PATH}) + if (NOT return_code EQUAL "0") + message(FATAL_ERROR "Failed to clean TensorFlow Lite Micro's third party downloads.") + else() + message(STATUS "Refresh completed.") + endif () endif() if (TENSORFLOW_LITE_MICRO_CLEAN_BUILD) diff --git a/scripts/py/setup_hooks.py b/scripts/py/setup_hooks.py index 97a2861..178765c 100644 --- a/scripts/py/setup_hooks.py +++ b/scripts/py/setup_hooks.py @@ -55,25 +55,25 @@ def add_pre_push_hooks(hooks_dir): while read local_ref local_sha remote_ref remote_sha do # We should pass only added or modified C/C++ source files to cppcheck. - changed_files=$(git diff --name-only HEAD~1 HEAD | grep -E "*\.(c|cpp|cc|cxx)" | cut -f 2) - if [ -n "$changed_files" ]; then - clang-format -style=file --dry-run --Werror $changed_files + changed_files=$(git diff --name-only HEAD~1 HEAD | grep -iE "\.(c|cpp|cxx|cc|h|hpp|hxx)$" | cut -f 2) + if [ -n "$changed_files" ]; then + clang-format -style=file --dry-run --Werror $changed_files - exitcode1=$? - if [ $exitcode1 -ne 0 ]; then - echo "Formatting errors found in file: $changed_files. - \nPlease run:\n\ \"clang-format -style=file -i $changed_files\" - \nto correct these errors" - exit $exitcode1 - fi + exitcode1=$? + if [ $exitcode1 -ne 0 ]; then + echo "Formatting errors found in file: $changed_files. + \nPlease run:\n\ \"clang-format -style=file -i $changed_files\" + \nto correct these errors" + exit $exitcode1 + fi - cppcheck --enable=performance,portability --error-exitcode=1 $changed_files - exitcode2=$? - if [ $exitcode2 -ne 0 ]; then - exit $exitcode2 - fi - fi - exit 0 + cppcheck --enable=performance,portability --error-exitcode=1 $changed_files + exitcode2=$? + if [ $exitcode2 -ne 0 ]; then + exit $exitcode2 + fi + fi + exit 0 done exit 0''' diff --git a/source/application/api/common/source/Model.cc b/source/application/api/common/source/Model.cc index 8467d71..7de6fde 100644 --- a/source/application/api/common/source/Model.cc +++ b/source/application/api/common/source/Model.cc @@ -1,6 +1,6 @@ /* - * SPDX-FileCopyrightText: Copyright 2021-2022 Arm Limited and/or its affiliates - * SPDX-License-Identifier: Apache-2.0 + * SPDX-FileCopyrightText: Copyright 2021-2022 Arm Limited and/or its affiliates + * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,16 +22,13 @@ /* Initialise the model */ arm::app::Model::~Model() { - delete this->m_pInterpreter; + delete this->m_pInterpreter; /** * No clean-up function available for allocator in TensorFlow Lite Micro yet. **/ } -arm::app::Model::Model() : - m_inited (false), - m_type(kTfLiteNoType) -{} +arm::app::Model::Model() : m_inited(false), m_type(kTfLiteNoType) {} bool arm::app::Model::Init(uint8_t* tensorArenaAddr, uint32_t tensorArenaSize, @@ -48,10 +45,10 @@ bool arm::app::Model::Init(uint8_t* tensorArenaAddr, this->m_pModel = ::tflite::GetModel(nnModelAddr); if (this->m_pModel->version() != TFLITE_SCHEMA_VERSION) { - printf_err( - "Model's schema version %d is not equal " - "to supported version %d.", - this->m_pModel->version(), TFLITE_SCHEMA_VERSION); + printf_err("Model's schema version %" PRIu32 " is not equal " + "to supported version %d.", + this->m_pModel->version(), + TFLITE_SCHEMA_VERSION); return false; } @@ -75,9 +72,7 @@ bool arm::app::Model::Init(uint8_t* tensorArenaAddr, /* Create an allocator instance */ info("Creating allocator using tensor arena at 0x%p\n", tensorArenaAddr); - this->m_pAllocator = tflite::MicroAllocator::Create( - tensorArenaAddr, - tensorArenaSize); + this->m_pAllocator = tflite::MicroAllocator::Create(tensorArenaAddr, tensorArenaSize); if (!this->m_pAllocator) { printf_err("Failed to create allocator\n"); @@ -88,9 +83,8 @@ bool arm::app::Model::Init(uint8_t* tensorArenaAddr, debug("Using existing allocator @ 0x%p\n", this->m_pAllocator); } - this->m_pInterpreter = new ::tflite::MicroInterpreter( - this->m_pModel, this->GetOpResolver(), - this->m_pAllocator); + this->m_pInterpreter = + new ::tflite::MicroInterpreter(this->m_pModel, this->GetOpResolver(), this->m_pAllocator); if (!this->m_pInterpreter) { printf_err("Failed to allocate interpreter\n"); @@ -120,7 +114,7 @@ bool arm::app::Model::Init(uint8_t* tensorArenaAddr, printf_err("failed to get tensors\n"); return false; } else { - this->m_type = this->m_input[0]->type; /* Input 0 should be the main input */ + this->m_type = this->m_input[0]->type; /* Input 0 should be the main input */ /* Clear the input & output tensors */ for (size_t inIndex = 0; inIndex < this->GetNumInputs(); inIndex++) { @@ -155,10 +149,9 @@ void arm::app::Model::LogTensorInfo(TfLiteTensor* tensor) debug("\ttensor is assigned to 0x%p\n", tensor); info("\ttensor type is %s\n", TfLiteTypeGetName(tensor->type)); - info("\ttensor occupies %zu bytes with dimensions\n", - tensor->bytes); - for (int i = 0 ; i < tensor->dims->size; ++i) { - info ("\t\t%d: %3d\n", i, tensor->dims->data[i]); + info("\ttensor occupies %zu bytes with dimensions\n", tensor->bytes); + for (int i = 0; i < tensor->dims->size; ++i) { + info("\t\t%d: %3d\n", i, tensor->dims->data[i]); } TfLiteQuantization quant = tensor->quantization; @@ -192,7 +185,7 @@ void arm::app::Model::LogInterpreterInfo() } info("Activation buffer (a.k.a tensor arena) size used: %zu\n", - this->m_pInterpreter->arena_used_bytes()); + this->m_pInterpreter->arena_used_bytes()); /* We expect there to be only one subgraph. */ const uint32_t nOperators = tflite::NumSubgraphOperators(this->m_pModel, 0); @@ -203,10 +196,10 @@ void arm::app::Model::LogInterpreterInfo() auto* opcodes = this->m_pModel->operator_codes(); /* For each operator, display registration information. */ - for (size_t i = 0 ; i < nOperators; ++i) { - const tflite::Operator* op = subgraph->operators()->Get(i); + for (size_t i = 0; i < nOperators; ++i) { + const tflite::Operator* op = subgraph->operators()->Get(i); const tflite::OperatorCode* opcode = opcodes->Get(op->opcode_index()); - const TfLiteRegistration* reg = nullptr; + const TfLiteRegistration* reg = nullptr; tflite::GetRegistrationFromOpCode(opcode, this->GetOpResolver(), ®); std::string opName; @@ -215,8 +208,8 @@ void arm::app::Model::LogInterpreterInfo() if (tflite::BuiltinOperator_CUSTOM == reg->builtin_code) { opName = std::string(reg->custom_name); } else { - opName = std::string(EnumNameBuiltinOperator( - tflite::BuiltinOperator(reg->builtin_code))); + opName = std::string( + EnumNameBuiltinOperator(tflite::BuiltinOperator(reg->builtin_code))); } } info("\tOperator %zu: %s\n", i, opName.c_str()); @@ -236,21 +229,19 @@ bool arm::app::Model::IsDataSigned() const bool arm::app::Model::ContainsEthosUOperator() const { /* We expect there to be only one subgraph. */ - const uint32_t nOperators = tflite::NumSubgraphOperators(this->m_pModel, 0); + const uint32_t nOperators = tflite::NumSubgraphOperators(this->m_pModel, 0); const tflite::SubGraph* subgraph = this->m_pModel->subgraphs()->Get(0); - const auto* opcodes = this->m_pModel->operator_codes(); + const auto* opcodes = this->m_pModel->operator_codes(); /* check for custom operators */ - for (size_t i = 0; (i < nOperators); ++i) - { - const tflite::Operator* op = subgraph->operators()->Get(i); + for (size_t i = 0; (i < nOperators); ++i) { + const tflite::Operator* op = subgraph->operators()->Get(i); const tflite::OperatorCode* opcode = opcodes->Get(op->opcode_index()); auto builtin_code = tflite::GetBuiltinCode(opcode); if ((builtin_code == tflite::BuiltinOperator_CUSTOM) && - ( nullptr != opcode->custom_code()) && - ( "ethos-u" == std::string(opcode->custom_code()->c_str()))) - { + (nullptr != opcode->custom_code()) && + ("ethos-u" == std::string(opcode->custom_code()->c_str()))) { return true; } } @@ -304,7 +295,6 @@ size_t arm::app::Model::GetNumOutputs() const return 0; } - TfLiteType arm::app::Model::GetType() const { return this->m_type; @@ -339,7 +329,8 @@ bool arm::app::Model::ShowModelInfoHandler() info("Model info:\n"); this->LogInterpreterInfo(); - info("The model is optimised for Ethos-U NPU: %s.\n", this->ContainsEthosUOperator()? "yes": "no"); + info("The model is optimised for Ethos-U NPU: %s.\n", + this->ContainsEthosUOperator() ? "yes" : "no"); return true; } -- cgit v1.2.1