/* * Copyright (c) 2021 Arm Limited. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "Labels.hpp" /* For label strings. */ #include "UseCaseHandler.hpp" /* Handlers for different user options. */ #include "Wav2LetterModel.hpp" /* Model class for running inference. */ #include "UseCaseCommonUtils.hpp" /* Utils functions. */ #include "AsrClassifier.hpp" /* Classifier. */ #include "InputFiles.hpp" /* Generated audio clip header. */ #include "log_macros.h" /* Logging functions */ #include "BufAttributes.hpp" /* Buffer attributes to be applied */ namespace arm { namespace app { static uint8_t tensorArena[ACTIVATION_BUF_SZ] ACTIVATION_BUF_ATTRIBUTE; namespace asr { extern uint8_t* GetModelPointer(); extern size_t GetModelLen(); } /* namespace asr */ } /* namespace app */ } /* namespace arm */ enum opcodes { MENU_OPT_RUN_INF_NEXT = 1, /* Run on next vector. */ MENU_OPT_RUN_INF_CHOSEN, /* Run on a user provided vector index. */ MENU_OPT_RUN_INF_ALL, /* Run inference on all. */ MENU_OPT_SHOW_MODEL_INFO, /* Show model info. */ MENU_OPT_LIST_AUDIO_CLIPS /* List the current baked audio clips. */ }; static void DisplayMenu() { printf("\n\n"); printf("User input required\n"); printf("Enter option number from:\n\n"); printf(" %u. Classify next audio clip\n", MENU_OPT_RUN_INF_NEXT); printf(" %u. Classify audio clip at chosen index\n", MENU_OPT_RUN_INF_CHOSEN); printf(" %u. Run classification on all audio clips\n", MENU_OPT_RUN_INF_ALL); printf(" %u. Show NN model info\n", MENU_OPT_SHOW_MODEL_INFO); printf(" %u. List audio clips\n\n", MENU_OPT_LIST_AUDIO_CLIPS); printf(" Choice: "); fflush(stdout); } /** @brief Verify input and output tensor are of certain min dimensions. */ static bool VerifyTensorDimensions(const arm::app::Model& model); void main_loop() { arm::app::Wav2LetterModel model; /* Model wrapper object. */ /* Load the model. */ if (!model.Init(arm::app::tensorArena, sizeof(arm::app::tensorArena), arm::app::asr::GetModelPointer(), arm::app::asr::GetModelLen())) { printf_err("Failed to initialise model\n"); return; } else if (!VerifyTensorDimensions(model)) { printf_err("Model's input or output dimension verification failed\n"); return; } /* Instantiate application context. */ arm::app::ApplicationContext caseContext; std::vector labels; GetLabelsVector(labels); arm::app::AsrClassifier classifier; /* Classifier wrapper object. */ arm::app::Profiler profiler{"asr"}; caseContext.Set("profiler", profiler); caseContext.Set("model", model); caseContext.Set("clipIndex", 0); caseContext.Set("frameLength", arm::app::asr::g_FrameLength); caseContext.Set("frameStride", arm::app::asr::g_FrameStride); caseContext.Set("scoreThreshold", arm::app::asr::g_ScoreThreshold); /* Score threshold. */ caseContext.Set("ctxLen", arm::app::asr::g_ctxLen); /* Left and right context length (MFCC feat vectors). */ caseContext.Set&>("labels", labels); caseContext.Set("classifier", classifier); bool executionSuccessful = true; constexpr bool bUseMenu = NUMBER_OF_FILES > 1 ? true : false; /* Loop. */ do { int menuOption = MENU_OPT_RUN_INF_NEXT; if (bUseMenu) { DisplayMenu(); menuOption = arm::app::ReadUserInputAsInt(); printf("\n"); } switch (menuOption) { case MENU_OPT_RUN_INF_NEXT: executionSuccessful = ClassifyAudioHandler( caseContext, caseContext.Get("clipIndex"), false); break; case MENU_OPT_RUN_INF_CHOSEN: { printf(" Enter the audio clip index [0, %d]: ", NUMBER_OF_FILES-1); fflush(stdout); auto clipIndex = static_cast( arm::app::ReadUserInputAsInt()); executionSuccessful = ClassifyAudioHandler(caseContext, clipIndex, false); break; } case MENU_OPT_RUN_INF_ALL: executionSuccessful = ClassifyAudioHandler( caseContext, caseContext.Get("clipIndex"), true); break; case MENU_OPT_SHOW_MODEL_INFO: executionSuccessful = model.ShowModelInfoHandler(); break; case MENU_OPT_LIST_AUDIO_CLIPS: executionSuccessful = ListFilesHandler(caseContext); break; default: printf("Incorrect choice, try again."); break; } } while (executionSuccessful && bUseMenu); info("Main loop terminated.\n"); } static bool VerifyTensorDimensions(const arm::app::Model& model) { /* Populate tensor related parameters. */ TfLiteTensor* inputTensor = model.GetInputTensor(0); if (!inputTensor->dims) { printf_err("Invalid input tensor dims\n"); return false; } else if (inputTensor->dims->size < 3) { printf_err("Input tensor dimension should be >= 3\n"); return false; } TfLiteTensor* outputTensor = model.GetOutputTensor(0); if (!outputTensor->dims) { printf_err("Invalid output tensor dims\n"); return false; } else if (outputTensor->dims->size < 3) { printf_err("Output tensor dimension should be >= 3\n"); return false; } return true; }