summaryrefslogtreecommitdiff
path: root/source
diff options
context:
space:
mode:
authorRichard Burton <richard.burton@arm.com>2022-10-05 11:00:37 +0100
committerRichard Burton <richard.burton@arm.com>2022-10-06 14:08:13 +0100
commitec5e99be3ae6dd0d3811950f155b01e144431452 (patch)
treea5d6c4dd9267db2465063b8d0e1a5cb6d19dac8d /source
parent890b2b89cacc6f2291596a001d555d374c8c9edd (diff)
downloadml-embedded-evaluation-kit-ec5e99be3ae6dd0d3811950f155b01e144431452.tar.gz
MLECO-3164: Additional refactoring of KWS API
Part 1 * Add KwsClassifier * KwsPostProcess can now be told to average results * Averaging is handlded by KwsClassifier * Current sliding window index is now an argument of DoPreProcess Change-Id: I07626da595ad1cbd982e8366f0d1bb56d1040459
Diffstat (limited to 'source')
-rw-r--r--source/application/api/common/include/Classifier.hpp10
-rw-r--r--source/application/api/common/include/Model.hpp12
-rw-r--r--source/application/api/common/source/Classifier.cc22
-rw-r--r--source/application/api/use_case/ad/include/AdMelSpectrogram.hpp4
-rw-r--r--source/application/api/use_case/kws/CMakeLists.txt3
-rw-r--r--source/application/api/use_case/kws/include/KwsClassifier.hpp66
-rw-r--r--source/application/api/use_case/kws/include/KwsProcessing.hpp19
-rw-r--r--source/application/api/use_case/kws/include/KwsResult.hpp4
-rw-r--r--source/application/api/use_case/kws/src/KwsClassifier.cc142
-rw-r--r--source/application/api/use_case/kws/src/KwsProcessing.cc19
-rw-r--r--source/application/main/include/UseCaseCommonUtils.hpp1
-rw-r--r--source/use_case/kws/src/MainLoop.cc9
-rw-r--r--source/use_case/kws/src/UseCaseHandler.cc9
-rw-r--r--source/use_case/kws_asr/src/MainLoop.cc8
-rw-r--r--source/use_case/kws_asr/src/UseCaseHandler.cc5
15 files changed, 263 insertions, 70 deletions
diff --git a/source/application/api/common/include/Classifier.hpp b/source/application/api/common/include/Classifier.hpp
index d641c22..e4eab01 100644
--- a/source/application/api/common/include/Classifier.hpp
+++ b/source/application/api/common/include/Classifier.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022 Arm Limited. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -34,6 +34,8 @@ namespace app {
/** @brief Constructor. */
Classifier() = default;
+ virtual ~Classifier() = default;
+
/**
* @brief Gets the top N classification results from the
* output vector.
@@ -41,8 +43,8 @@ namespace app {
* @param[out] vecResults A vector of classification results.
* populated by this function.
* @param[in] labels Labels vector to match classified classes.
- * @param[in] topNCount Number of top classifications to pick. Default is 1.
- * @param[in] useSoftmax Whether Softmax normalisation should be applied to output. Default is false.
+ * @param[in] topNCount Number of top classifications to pick.
+ * @param[in] useSoftmax Whether Softmax normalisation should be applied to output.
* @return true if successful, false otherwise.
**/
@@ -65,7 +67,7 @@ namespace app {
std::vector<ClassificationResult>& vecResults,
const std::vector <std::string>& labels);
- private:
+ protected:
/**
* @brief Utility function that gets the top N classification results from the
* output vector.
diff --git a/source/application/api/common/include/Model.hpp b/source/application/api/common/include/Model.hpp
index 70c6245..4892757 100644
--- a/source/application/api/common/include/Model.hpp
+++ b/source/application/api/common/include/Model.hpp
@@ -137,13 +137,13 @@ namespace app {
const tflite::Model* m_pModel{nullptr}; /* Tflite model pointer. */
tflite::MicroInterpreter* m_pInterpreter{nullptr}; /* Tflite interpreter. */
tflite::MicroAllocator* m_pAllocator{nullptr}; /* Tflite micro allocator. */
- bool m_inited{false}; /* Indicates whether this object has been initialised. */
- const uint8_t* m_modelAddr{nullptr}; /* Model address */
- uint32_t m_modelSize{0}; /* Model size */
+ bool m_inited{false}; /* Indicates whether this object has been initialised. */
+ const uint8_t* m_modelAddr{nullptr}; /* Model address */
+ uint32_t m_modelSize{0}; /* Model size */
- std::vector<TfLiteTensor*> m_input{}; /* Model's input tensor pointers. */
- std::vector<TfLiteTensor*> m_output{}; /* Model's output tensor pointers. */
- TfLiteType m_type{kTfLiteNoType}; /* Model's data type. */
+ std::vector<TfLiteTensor*> m_input{}; /* Model's input tensor pointers. */
+ std::vector<TfLiteTensor*> m_output{}; /* Model's output tensor pointers. */
+ TfLiteType m_type{kTfLiteNoType}; /* Model's data type. */
};
} /* namespace app */
diff --git a/source/application/api/common/source/Classifier.cc b/source/application/api/common/source/Classifier.cc
index 6fabebe..1b5fc64 100644
--- a/source/application/api/common/source/Classifier.cc
+++ b/source/application/api/common/source/Classifier.cc
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022 Arm Limited. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -31,10 +31,9 @@ namespace arm {
namespace app {
void Classifier::SetVectorResults(std::set<std::pair<float, uint32_t>>& topNSet,
- std::vector<ClassificationResult>& vecResults,
- const std::vector <std::string>& labels)
+ std::vector<ClassificationResult>& vecResults,
+ const std::vector <std::string>& labels)
{
-
/* Reset the iterator to the largest element - use reverse iterator. */
auto topNIter = topNSet.rbegin();
@@ -46,11 +45,9 @@ namespace app {
}
bool Classifier::GetTopNResults(const std::vector<float>& tensor,
- std::vector<ClassificationResult>& vecResults,
- uint32_t topNCount,
- const std::vector <std::string>& labels)
+ std::vector<ClassificationResult>& vecResults,
+ uint32_t topNCount, const std::vector <std::string>& labels)
{
-
std::set<std::pair<float , uint32_t>> sortedSet;
/* NOTE: inputVec's size verification against labels should be
@@ -80,12 +77,9 @@ namespace app {
return true;
}
- bool Classifier::GetClassificationResults(
- TfLiteTensor* outputTensor,
- std::vector<ClassificationResult>& vecResults,
- const std::vector <std::string>& labels,
- uint32_t topNCount,
- bool useSoftmax)
+ bool Classifier::GetClassificationResults(TfLiteTensor* outputTensor,
+ std::vector<ClassificationResult>& vecResults, const std::vector <std::string>& labels,
+ uint32_t topNCount, bool useSoftmax)
{
if (outputTensor == nullptr) {
printf_err("Output vector is null pointer.\n");
diff --git a/source/application/api/use_case/ad/include/AdMelSpectrogram.hpp b/source/application/api/use_case/ad/include/AdMelSpectrogram.hpp
index 05c5bfc..b8a9dfc 100644
--- a/source/application/api/use_case/ad/include/AdMelSpectrogram.hpp
+++ b/source/application/api/use_case/ad/include/AdMelSpectrogram.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022 Arm Limited. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -41,7 +41,7 @@ namespace audio {
{}
AdMelSpectrogram() = delete;
- ~AdMelSpectrogram() = default;
+ virtual ~AdMelSpectrogram() = default;
protected:
diff --git a/source/application/api/use_case/kws/CMakeLists.txt b/source/application/api/use_case/kws/CMakeLists.txt
index 517a35a..ea0761c 100644
--- a/source/application/api/use_case/kws/CMakeLists.txt
+++ b/source/application/api/use_case/kws/CMakeLists.txt
@@ -27,7 +27,8 @@ project(${KWS_API_TARGET}
# Create static library
add_library(${KWS_API_TARGET} STATIC
src/KwsProcessing.cc
- src/MicroNetKwsModel.cc)
+ src/MicroNetKwsModel.cc
+ src/KwsClassifier.cc)
target_include_directories(${KWS_API_TARGET} PUBLIC include)
diff --git a/source/application/api/use_case/kws/include/KwsClassifier.hpp b/source/application/api/use_case/kws/include/KwsClassifier.hpp
new file mode 100644
index 0000000..d050e85
--- /dev/null
+++ b/source/application/api/use_case/kws/include/KwsClassifier.hpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef KWS_CLASSIFIER_HPP
+#define KWS_CLASSIFIER_HPP
+
+#include "ClassificationResult.hpp"
+#include "TensorFlowLiteMicro.hpp"
+#include "Classifier.hpp"
+
+#include <vector>
+
+namespace arm {
+namespace app {
+
+ /**
+ * @brief KWS Classifier - a helper class to get certain number of top
+ * results from the output vector from a classification NN.
+ * Allows for averaging of previous results.
+ **/
+ class KwsClassifier : public Classifier {
+ public:
+
+ /**
+ * @brief Gets the top N classification results from the
+ * output vector.
+ * @param[in] outputTensor Inference output tensor from an NN model.
+ * @param[out] vecResults A vector of classification results.
+ * populated by this function.
+ * @param[in] labels Labels vector to match classified classes.
+ * @param[in] topNCount Number of top classifications to pick. Default is 1.
+ * @param[in] useSoftmax Whether Softmax normalisation should be applied to output. Default is false.
+ * @param[in/out] resultHistory History of previous classification results to be updated.
+ * @return true if successful, false otherwise.
+ **/
+ using Classifier::GetClassificationResults; /* We are overloading not overriding. */
+ bool GetClassificationResults(TfLiteTensor* outputTensor, std::vector<ClassificationResult>& vecResults,
+ const std::vector <std::string>& labels, uint32_t topNCount,
+ bool use_softmax, std::vector<std::vector<float>>& resultHistory);
+
+ /**
+ * @brief Average the given history of results.
+ * @param[in] resultHistory The history of results to take on average of.
+ * @param[out] averageResult The calculated average.
+ **/
+ static void AveragResults(const std::vector<std::vector<float>>& resultHistory,
+ std::vector<float>& averageResult);
+ };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* KWS_CLASSIFIER_HPP */
diff --git a/source/application/api/use_case/kws/include/KwsProcessing.hpp b/source/application/api/use_case/kws/include/KwsProcessing.hpp
index 0ede425..e2d3ff9 100644
--- a/source/application/api/use_case/kws/include/KwsProcessing.hpp
+++ b/source/application/api/use_case/kws/include/KwsProcessing.hpp
@@ -19,7 +19,7 @@
#include "AudioUtils.hpp"
#include "BaseProcessing.hpp"
-#include "Classifier.hpp"
+#include "KwsClassifier.hpp"
#include "MicroNetKwsMfcc.hpp"
#include <functional>
@@ -55,9 +55,8 @@ namespace app {
* @param[in] inputSize Size of the input data.
* @return true if successful, false otherwise.
**/
- bool DoPreProcess(const void* input, size_t inputSize) override;
+ bool DoPreProcess(const void* input, size_t inferenceIndex = 0) override;
- size_t m_audioWindowIndex = 0; /* Index of audio slider, used when caching features in longer clips. */
size_t m_audioDataWindowSize; /* Amount of audio needed for 1 inference. */
size_t m_audioDataStride; /* Amount of audio to stride across if doing >1 inference in longer clips. */
@@ -106,11 +105,11 @@ namespace app {
class KwsPostProcess : public BasePostProcess {
private:
- TfLiteTensor* m_outputTensor; /* Model output tensor. */
- Classifier& m_kwsClassifier; /* KWS Classifier object. */
- const std::vector<std::string>& m_labels; /* KWS Labels. */
- std::vector<ClassificationResult>& m_results; /* Results vector for a single inference. */
-
+ TfLiteTensor* m_outputTensor; /* Model output tensor. */
+ KwsClassifier& m_kwsClassifier; /* KWS Classifier object. */
+ const std::vector<std::string>& m_labels; /* KWS Labels. */
+ std::vector<ClassificationResult>& m_results; /* Results vector for a single inference. */
+ std::vector<std::vector<float>> m_resultHistory; /* Store previous results so they can be averaged. */
public:
/**
* @brief Constructor
@@ -119,9 +118,9 @@ namespace app {
* @param[in] labels Vector of string labels to identify each output of the model.
* @param[in/out] results Vector of classification results to store decoded outputs.
**/
- KwsPostProcess(TfLiteTensor* outputTensor, Classifier& classifier,
+ KwsPostProcess(TfLiteTensor* outputTensor, KwsClassifier& classifier,
const std::vector<std::string>& labels,
- std::vector<ClassificationResult>& results);
+ std::vector<ClassificationResult>& results, size_t averagingWindowLen = 1);
/**
* @brief Should perform post-processing of the result of inference then
diff --git a/source/application/api/use_case/kws/include/KwsResult.hpp b/source/application/api/use_case/kws/include/KwsResult.hpp
index 38f32b4..e0bb868 100644
--- a/source/application/api/use_case/kws/include/KwsResult.hpp
+++ b/source/application/api/use_case/kws/include/KwsResult.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022 Arm Limited. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -47,7 +47,7 @@ namespace kws {
this->m_inferenceNumber = inferenceIdx;
this->m_resultVec = ResultVec();
- for (auto & i : resultVec) {
+ for (auto& i : resultVec) {
if (i.m_normalisedVal >= this->m_threshold) {
this->m_resultVec.emplace_back(i);
}
diff --git a/source/application/api/use_case/kws/src/KwsClassifier.cc b/source/application/api/use_case/kws/src/KwsClassifier.cc
new file mode 100644
index 0000000..fe409b1
--- /dev/null
+++ b/source/application/api/use_case/kws/src/KwsClassifier.cc
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "KwsClassifier.hpp"
+
+#include "TensorFlowLiteMicro.hpp"
+#include "PlatformMath.hpp"
+#include "log_macros.h"
+#include "../include/KwsClassifier.hpp"
+
+
+#include <vector>
+#include <algorithm>
+#include <string>
+#include <set>
+#include <cstdint>
+#include <cinttypes>
+
+
+namespace arm {
+namespace app {
+
+ bool KwsClassifier::GetClassificationResults(TfLiteTensor* outputTensor,
+ std::vector<ClassificationResult>& vecResults, const std::vector <std::string>& labels,
+ uint32_t topNCount, bool useSoftmax, std::vector<std::vector<float>>& resultHistory)
+ {
+ if (outputTensor == nullptr) {
+ printf_err("Output vector is null pointer.\n");
+ return false;
+ }
+
+ uint32_t totalOutputSize = 1;
+ for (int inputDim = 0; inputDim < outputTensor->dims->size; inputDim++) {
+ totalOutputSize *= outputTensor->dims->data[inputDim];
+ }
+
+ /* Sanity checks. */
+ if (totalOutputSize < topNCount) {
+ printf_err("Output vector is smaller than %" PRIu32 "\n", topNCount);
+ return false;
+ } else if (totalOutputSize != labels.size()) {
+ printf_err("Output size doesn't match the labels' size\n");
+ return false;
+ } else if (topNCount == 0) {
+ printf_err("Top N results cannot be zero\n");
+ return false;
+ }
+
+ bool resultState;
+ vecResults.clear();
+
+ /* De-Quantize Output Tensor */
+ QuantParams quantParams = GetTensorQuantParams(outputTensor);
+
+ /* Floating point tensor data to be populated
+ * NOTE: The assumption here is that the output tensor size isn't too
+ * big and therefore, there's neglibible impact on heap usage. */
+ std::vector<float> resultData(totalOutputSize);
+ resultData.resize(totalOutputSize);
+
+ /* Populate the floating point buffer */
+ switch (outputTensor->type) {
+ case kTfLiteUInt8: {
+ uint8_t* tensor_buffer = tflite::GetTensorData<uint8_t>(outputTensor);
+ for (size_t i = 0; i < totalOutputSize; ++i) {
+ resultData[i] = quantParams.scale *
+ (static_cast<float>(tensor_buffer[i]) - quantParams.offset);
+ }
+ break;
+ }
+ case kTfLiteInt8: {
+ int8_t* tensor_buffer = tflite::GetTensorData<int8_t>(outputTensor);
+ for (size_t i = 0; i < totalOutputSize; ++i) {
+ resultData[i] = quantParams.scale *
+ (static_cast<float>(tensor_buffer[i]) - quantParams.offset);
+ }
+ break;
+ }
+ case kTfLiteFloat32: {
+ float* tensor_buffer = tflite::GetTensorData<float>(outputTensor);
+ for (size_t i = 0; i < totalOutputSize; ++i) {
+ resultData[i] = tensor_buffer[i];
+ }
+ break;
+ }
+ default:
+ printf_err("Tensor type %s not supported by classifier\n",
+ TfLiteTypeGetName(outputTensor->type));
+ return false;
+ }
+
+ if (useSoftmax) {
+ math::MathUtils::SoftmaxF32(resultData);
+ }
+
+ /* If keeping track of recent results, update and take an average. */
+ if (resultHistory.size() > 1) {
+ std::rotate(resultHistory.begin(), resultHistory.begin() + 1, resultHistory.end());
+ resultHistory.back() = resultData;
+ AveragResults(resultHistory, resultData);
+ }
+
+ /* Get the top N results. */
+ resultState = GetTopNResults(resultData, vecResults, topNCount, labels);
+
+ if (!resultState) {
+ printf_err("Failed to get top N results set\n");
+ return false;
+ }
+
+ return true;
+ }
+
+ void app::KwsClassifier::AveragResults(const std::vector<std::vector<float>>& resultHistory,
+ std::vector<float>& averageResult)
+ {
+ /* Compute averages of each class across the window length. */
+ float sum;
+ for (size_t j = 0; j < averageResult.size(); j++) {
+ sum = 0;
+ for (size_t i = 0; i < resultHistory.size(); i++) {
+ sum += resultHistory[i][j];
+ }
+ averageResult[j] = (sum / resultHistory.size());
+ }
+ }
+
+} /* namespace app */
+} /* namespace arm */ \ No newline at end of file
diff --git a/source/application/api/use_case/kws/src/KwsProcessing.cc b/source/application/api/use_case/kws/src/KwsProcessing.cc
index 2d5c085..843ac58 100644
--- a/source/application/api/use_case/kws/src/KwsProcessing.cc
+++ b/source/application/api/use_case/kws/src/KwsProcessing.cc
@@ -66,9 +66,8 @@ namespace app {
}
}
- bool KwsPreProcess::DoPreProcess(const void* data, size_t inputSize)
+ bool KwsPreProcess::DoPreProcess(const void* data, size_t inferenceIndex)
{
- UNUSED(inputSize);
if (data == nullptr) {
printf_err("Data pointer is null");
}
@@ -77,8 +76,8 @@ namespace app {
auto input = static_cast<const int16_t*>(data);
this->m_mfccSlidingWindow.Reset(input);
- /* Cache is only usable if we have more than 1 inference in an audio clip. */
- bool useCache = this->m_audioWindowIndex > 0 && this->m_numReusedMfccVectors > 0;
+ /* Cache is only usable if we have more than 1 inference to do and it's not the first inference. */
+ bool useCache = inferenceIndex > 0 && this->m_numReusedMfccVectors > 0;
/* Use a sliding window to calculate MFCC features frame by frame. */
while (this->m_mfccSlidingWindow.HasNext()) {
@@ -163,7 +162,7 @@ namespace app {
TfLiteQuantization quant = inputTensor->quantization;
if (kTfLiteAffineQuantization == quant.type) {
- auto *quantParams = (TfLiteAffineQuantization *) quant.params;
+ auto* quantParams = (TfLiteAffineQuantization*) quant.params;
const float quantScale = quantParams->scale->data[0];
const int quantOffset = quantParams->zero_point->data[0];
@@ -191,20 +190,22 @@ namespace app {
return mfccFeatureCalc;
}
- KwsPostProcess::KwsPostProcess(TfLiteTensor* outputTensor, Classifier& classifier,
+ KwsPostProcess::KwsPostProcess(TfLiteTensor* outputTensor, KwsClassifier& classifier,
const std::vector<std::string>& labels,
- std::vector<ClassificationResult>& results)
+ std::vector<ClassificationResult>& results, size_t averagingWindowLen)
:m_outputTensor{outputTensor},
m_kwsClassifier{classifier},
m_labels{labels},
m_results{results}
- {}
+ {
+ this->m_resultHistory = {averagingWindowLen, std::vector<float>(labels.size())};
+ }
bool KwsPostProcess::DoPostProcess()
{
return this->m_kwsClassifier.GetClassificationResults(
this->m_outputTensor, this->m_results,
- this->m_labels, 1, true);
+ this->m_labels, 1, true, this->m_resultHistory);
}
} /* namespace app */
diff --git a/source/application/main/include/UseCaseCommonUtils.hpp b/source/application/main/include/UseCaseCommonUtils.hpp
index b0f2e7a..9b6d550 100644
--- a/source/application/main/include/UseCaseCommonUtils.hpp
+++ b/source/application/main/include/UseCaseCommonUtils.hpp
@@ -24,7 +24,6 @@
#include "UseCaseHandler.hpp" /* Handlers for different user options. */
#include "Classifier.hpp" /* Classifier. */
#include "InputFiles.hpp"
-#include "BufAttributes.hpp" /* Buffer attributes */
void DisplayCommonMenu();
diff --git a/source/use_case/kws/src/MainLoop.cc b/source/use_case/kws/src/MainLoop.cc
index e0518f2..2489df8 100644
--- a/source/use_case/kws/src/MainLoop.cc
+++ b/source/use_case/kws/src/MainLoop.cc
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * Copyright (c) 2021-2022 Arm Limited. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,7 +15,7 @@
* limitations under the License.
*/
#include "InputFiles.hpp" /* For input audio clips. */
-#include "Classifier.hpp" /* Classifier. */
+#include "KwsClassifier.hpp" /* Classifier. */
#include "MicroNetKwsModel.hpp" /* Model class for running inference. */
#include "hal.h" /* Brings in platform definitions. */
#include "Labels.hpp" /* For label strings. */
@@ -34,7 +34,6 @@ namespace app {
} /* namespace app */
} /* namespace arm */
-using KwsClassifier = arm::app::Classifier;
enum opcodes
{
@@ -83,8 +82,8 @@ void main_loop()
caseContext.Set<int>("frameStride", arm::app::kws::g_FrameStride);
caseContext.Set<float>("scoreThreshold", arm::app::kws::g_ScoreThreshold); /* Normalised score threshold. */
- KwsClassifier classifier; /* classifier wrapper object. */
- caseContext.Set<arm::app::Classifier&>("classifier", classifier);
+ arm::app::KwsClassifier classifier; /* classifier wrapper object. */
+ caseContext.Set<arm::app::KwsClassifier&>("classifier", classifier);
std::vector <std::string> labels;
GetLabelsVector(labels);
diff --git a/source/use_case/kws/src/UseCaseHandler.cc b/source/use_case/kws/src/UseCaseHandler.cc
index 61c6eb6..d61ba9d 100644
--- a/source/use_case/kws/src/UseCaseHandler.cc
+++ b/source/use_case/kws/src/UseCaseHandler.cc
@@ -17,7 +17,7 @@
#include "UseCaseHandler.hpp"
#include "InputFiles.hpp"
-#include "Classifier.hpp"
+#include "KwsClassifier.hpp"
#include "MicroNetKwsModel.hpp"
#include "hal.h"
#include "AudioUtils.hpp"
@@ -29,8 +29,6 @@
#include <vector>
-using KwsClassifier = arm::app::Classifier;
-
namespace arm {
namespace app {
@@ -124,14 +122,11 @@ namespace app {
while (audioDataSlider.HasNext()) {
const int16_t* inferenceWindow = audioDataSlider.Next();
- /* The first window does not have cache ready. */
- preProcess.m_audioWindowIndex = audioDataSlider.Index();
-
info("Inference %zu/%zu\n", audioDataSlider.Index() + 1,
audioDataSlider.TotalStrides() + 1);
/* Run the pre-processing, inference and post-processing. */
- if (!preProcess.DoPreProcess(inferenceWindow, audio::MicroNetKwsMFCC::ms_defaultSamplingFreq)) {
+ if (!preProcess.DoPreProcess(inferenceWindow, audioDataSlider.Index())) {
printf_err("Pre-processing failed.");
return false;
}
diff --git a/source/use_case/kws_asr/src/MainLoop.cc b/source/use_case/kws_asr/src/MainLoop.cc
index 0638ecd..a4f7db9 100644
--- a/source/use_case/kws_asr/src/MainLoop.cc
+++ b/source/use_case/kws_asr/src/MainLoop.cc
@@ -17,7 +17,7 @@
#include "InputFiles.hpp" /* For input images. */
#include "Labels_micronetkws.hpp" /* For MicroNetKws label strings. */
#include "Labels_wav2letter.hpp" /* For Wav2Letter label strings. */
-#include "Classifier.hpp" /* KWS classifier. */
+#include "KwsClassifier.hpp" /* KWS classifier. */
#include "AsrClassifier.hpp" /* ASR classifier. */
#include "MicroNetKwsModel.hpp" /* KWS model class for running inference. */
#include "Wav2LetterModel.hpp" /* ASR model class for running inference. */
@@ -42,8 +42,6 @@ namespace app {
} /* namespace app */
} /* namespace arm */
-using KwsClassifier = arm::app::Classifier;
-
enum opcodes
{
MENU_OPT_RUN_INF_NEXT = 1, /* Run on next vector. */
@@ -118,9 +116,9 @@ void main_loop()
caseContext.Set<int>("asrFrameStride", arm::app::asr::g_FrameStride);
caseContext.Set<float>("asrScoreThreshold", arm::app::asr::g_ScoreThreshold); /* Normalised score threshold. */
- KwsClassifier kwsClassifier; /* Classifier wrapper object. */
+ arm::app::KwsClassifier kwsClassifier; /* Classifier wrapper object. */
arm::app::AsrClassifier asrClassifier; /* Classifier wrapper object. */
- caseContext.Set<arm::app::Classifier&>("kwsClassifier", kwsClassifier);
+ caseContext.Set<arm::app::KwsClassifier&>("kwsClassifier", kwsClassifier);
caseContext.Set<arm::app::AsrClassifier&>("asrClassifier", asrClassifier);
std::vector<std::string> asrLabels;
diff --git a/source/use_case/kws_asr/src/UseCaseHandler.cc b/source/use_case/kws_asr/src/UseCaseHandler.cc
index 9427ae0..c5e6ad3 100644
--- a/source/use_case/kws_asr/src/UseCaseHandler.cc
+++ b/source/use_case/kws_asr/src/UseCaseHandler.cc
@@ -143,11 +143,8 @@ namespace app {
while (audioDataSlider.HasNext()) {
const int16_t* inferenceWindow = audioDataSlider.Next();
- /* The first window does not have cache ready. */
- preProcess.m_audioWindowIndex = audioDataSlider.Index();
-
/* Run the pre-processing, inference and post-processing. */
- if (!preProcess.DoPreProcess(inferenceWindow, audio::MicroNetKwsMFCC::ms_defaultSamplingFreq)) {
+ if (!preProcess.DoPreProcess(inferenceWindow, audioDataSlider.Index())) {
printf_err("KWS Pre-processing failed.");
return output;
}