aboutsummaryrefslogtreecommitdiff
path: root/samples/SpeechRecognition
diff options
context:
space:
mode:
Diffstat (limited to 'samples/SpeechRecognition')
-rw-r--r--samples/SpeechRecognition/CMakeLists.txt6
-rw-r--r--samples/SpeechRecognition/cmake/unit_tests.cmake5
-rw-r--r--samples/SpeechRecognition/include/AudioCapture.hpp62
-rw-r--r--samples/SpeechRecognition/include/DataStructures.hpp102
-rw-r--r--samples/SpeechRecognition/include/Decoder.hpp4
-rw-r--r--samples/SpeechRecognition/include/MFCC.hpp244
-rw-r--r--samples/SpeechRecognition/include/MathUtils.hpp85
-rw-r--r--samples/SpeechRecognition/include/SlidingWindow.hpp161
-rw-r--r--samples/SpeechRecognition/include/SpeechRecognitionPipeline.hpp59
-rw-r--r--samples/SpeechRecognition/include/Wav2LetterMFCC.hpp78
-rw-r--r--samples/SpeechRecognition/include/Wav2LetterPreprocessor.hpp (renamed from samples/SpeechRecognition/include/Preprocess.hpp)123
-rw-r--r--samples/SpeechRecognition/src/AudioCapture.cpp104
-rw-r--r--samples/SpeechRecognition/src/Decoder.cpp45
-rw-r--r--samples/SpeechRecognition/src/MFCC.cpp397
-rw-r--r--samples/SpeechRecognition/src/Main.cpp137
-rw-r--r--samples/SpeechRecognition/src/MathUtils.cpp112
-rw-r--r--samples/SpeechRecognition/src/Preprocess.cpp192
-rw-r--r--samples/SpeechRecognition/src/SpeechRecognitionPipeline.cpp81
-rw-r--r--samples/SpeechRecognition/src/Wav2LetterMFCC.cpp126
-rw-r--r--samples/SpeechRecognition/src/Wav2LetterPreprocessor.cpp187
-rw-r--r--samples/SpeechRecognition/test/AudioCaptureTest.cpp61
-rw-r--r--samples/SpeechRecognition/test/MFCCTest.cpp20
-rw-r--r--samples/SpeechRecognition/test/PreprocessTest.cpp9
23 files changed, 648 insertions, 1752 deletions
diff --git a/samples/SpeechRecognition/CMakeLists.txt b/samples/SpeechRecognition/CMakeLists.txt
index 6c6b0b6dfc..296a2511dd 100644
--- a/samples/SpeechRecognition/CMakeLists.txt
+++ b/samples/SpeechRecognition/CMakeLists.txt
@@ -1,4 +1,4 @@
-# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
cmake_minimum_required(VERSION 3.0.2)
@@ -43,9 +43,11 @@ include(../common/cmake/find_armnn.cmake)
include_directories(include)
include_directories(../common/include/ArmnnUtils)
include_directories(../common/include/Utils)
+include_directories(../common/include/Audio)
file(GLOB SOURCES "src/*.cpp")
file(GLOB COMMON_UTILS_SOURCES "../common/src/Utils/*.cpp")
+file(GLOB COMMON_AUDIO_SOURCES "../common/src/Audio/*.cpp")
list(REMOVE_ITEM SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/src/Main.cpp)
file(GLOB TEST_SOURCES "test/*.cpp")
file(GLOB APP_MAIN "src/Main.cpp")
@@ -56,7 +58,7 @@ endif()
set(APP_TARGET_NAME "${CMAKE_PROJECT_NAME}")
-add_executable("${APP_TARGET_NAME}" ${COMMON_UTILS_SOURCES} ${SOURCES} ${APP_MAIN})
+add_executable("${APP_TARGET_NAME}" ${COMMON_UTILS_SOURCES} ${COMMON_AUDIO_SOURCES} ${SOURCES} ${APP_MAIN})
target_link_libraries("${APP_TARGET_NAME}" PUBLIC ${ARMNN_LIBS} -lsndfile -lsamplerate)
target_include_directories("${APP_TARGET_NAME}" PUBLIC ${ARMNN_INCLUDE_DIR} )
diff --git a/samples/SpeechRecognition/cmake/unit_tests.cmake b/samples/SpeechRecognition/cmake/unit_tests.cmake
index 47c4f4b579..955eed4510 100644
--- a/samples/SpeechRecognition/cmake/unit_tests.cmake
+++ b/samples/SpeechRecognition/cmake/unit_tests.cmake
@@ -1,4 +1,4 @@
-# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
set(TEST_RESOURCES_DIR ${CMAKE_SOURCE_DIR}/test/resources)
@@ -7,11 +7,12 @@ add_definitions (-DTEST_RESOURCE_DIR="${TEST_RESOURCES_DIR}")
set(TEST_TARGET_NAME "${CMAKE_PROJECT_NAME}-tests")
file(GLOB TEST_SOURCES "test/*")
+file(GLOB TESTS_AUDIO_COMMON "../common/test/Audio/*")
file(MAKE_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/test/resources)
include(../common/cmake/find_catch.cmake)
-add_executable("${TEST_TARGET_NAME}" ${COMMON_UTILS_SOURCES} ${SOURCES} ${TEST_SOURCES} )
+add_executable("${TEST_TARGET_NAME}" ${COMMON_UTILS_SOURCES} ${COMMON_AUDIO_SOURCES} ${SOURCES} ${TEST_SOURCES} ${TESTS_AUDIO_COMMON})
ExternalProject_Add(passport
URL https://raw.githubusercontent.com/Azure-Samples/cognitive-services-speech-sdk/master/sampledata/audiofiles/myVoiceIsMyPassportVerifyMe04.wav
diff --git a/samples/SpeechRecognition/include/AudioCapture.hpp b/samples/SpeechRecognition/include/AudioCapture.hpp
deleted file mode 100644
index 90c2eccacf..0000000000
--- a/samples/SpeechRecognition/include/AudioCapture.hpp
+++ /dev/null
@@ -1,62 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <string>
-#include <iostream>
-
-#include <math.h>
-
-#include <vector>
-
-#include <exception>
-
-#include "SlidingWindow.hpp"
-
-namespace asr
-{
-
-/**
-* @brief Class used to capture the audio data loaded from file, and to provide a method of
- * extracting correctly positioned and appropriately sized audio windows
-*
-*/
- class AudioCapture
- {
- public:
-
- SlidingWindow<const float> m_window;
- int lastReadIdx= 0;
-
- /**
- * @brief Default constructor
- */
- AudioCapture()
- {};
-
- /**
- * @brief Function to load the audio data captured from the
- * input file to memory.
- */
- std::vector<float> LoadAudioFile(std::string filePath);
-
- /**
- * @brief Function to initialize the sliding window. This will set its position in memory, its
- * window size and its stride.
- */
- void InitSlidingWindow(float* data, size_t dataSize, int minSamples, size_t stride);
-
- /**
- * Checks whether there is another block of audio in memory to read
- */
- bool HasNext();
-
- /**
- * Retrieves the next block of audio if its available
- */
- std::vector<float> Next();
- };
-} // namespace asr \ No newline at end of file
diff --git a/samples/SpeechRecognition/include/DataStructures.hpp b/samples/SpeechRecognition/include/DataStructures.hpp
deleted file mode 100644
index 9922265299..0000000000
--- a/samples/SpeechRecognition/include/DataStructures.hpp
+++ /dev/null
@@ -1,102 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#pragma once
-
-#include <stdio.h>
-#include <iterator>
-
-/**
- * Class Array2d is a data structure that represents a two dimensional array.
- * The data is allocated in contiguous memory, arranged row-wise
- * and individual elements can be accessed with the () operator.
- * For example a two dimensional array D of size (M, N) can be accessed:
- *
- * _|<------------- col size = N -------->|
- * | D(r=0, c=0) D(r=0, c=1)... D(r=0, c=N)
- * | D(r=1, c=0) D(r=1, c=1)... D(r=1, c=N)
- * | ...
- * row size = M ...
- * | ...
- * _ D(r=M, c=0) D(r=M, c=1)... D(r=M, c=N)
- *
- */
-template<typename T>
-class Array2d
-{
-private:
- size_t m_rows;
- size_t m_cols;
- T* m_data;
-
-public:
- /**
- * Creates the array2d with the given sizes.
- *
- * @param rows number of rows.
- * @param cols number of columns.
- */
- Array2d(unsigned rows, unsigned cols)
- {
- if (rows == 0 || cols == 0) {
- printf("Array2d constructor has 0 size.\n");
- m_data = nullptr;
- return;
- }
- m_rows = rows;
- m_cols = cols;
- m_data = new T[rows * cols];
- }
-
- ~Array2d()
- {
- delete[] m_data;
- }
-
- T& operator() (unsigned int row, unsigned int col)
- {
- return m_data[m_cols * row + col];
- }
-
- T operator() (unsigned int row, unsigned int col) const
- {
- return m_data[m_cols * row + col];
- }
-
- /**
- * Gets rows number of the current array2d.
- * @return number of rows.
- */
- size_t size(size_t dim)
- {
- switch (dim)
- {
- case 0:
- return m_rows;
- case 1:
- return m_cols;
- default:
- return 0;
- }
- }
-
- /**
- * Gets the array2d total size.
- */
- size_t totalSize()
- {
- return m_rows * m_cols;
- }
-
- /**
- * array2d iterator.
- */
- using iterator=T*;
- using const_iterator=T const*;
-
- iterator begin() { return m_data; }
- iterator end() { return m_data + totalSize(); }
- const_iterator begin() const { return m_data; }
- const_iterator end() const { return m_data + totalSize(); };
-};
diff --git a/samples/SpeechRecognition/include/Decoder.hpp b/samples/SpeechRecognition/include/Decoder.hpp
index 69d97ccf64..9dd484a5d1 100644
--- a/samples/SpeechRecognition/include/Decoder.hpp
+++ b/samples/SpeechRecognition/include/Decoder.hpp
@@ -46,8 +46,8 @@ namespace asr
rowVector.emplace_back(static_cast<int16_t>(contextToProcess[row * rowLength + j]));
}
- int max_index = std::distance(rowVector.begin(),std::max_element(rowVector.begin(), rowVector.end()));
- unfilteredText.emplace_back(this->m_labels.at(max_index)[0]);
+ int maxIndex = std::distance(rowVector.begin(), std::max_element(rowVector.begin(), rowVector.end()));
+ unfilteredText.emplace_back(this->m_labels.at(maxIndex)[0]);
}
std::string filteredText = FilterCharacters(unfilteredText);
diff --git a/samples/SpeechRecognition/include/MFCC.hpp b/samples/SpeechRecognition/include/MFCC.hpp
deleted file mode 100644
index 14b6d9fe79..0000000000
--- a/samples/SpeechRecognition/include/MFCC.hpp
+++ /dev/null
@@ -1,244 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <vector>
-#include <cstdint>
-#include <cmath>
-#include <limits>
-#include <string>
-
-/* MFCC's consolidated parameters */
-class MfccParams
-{
-public:
- float m_samplingFreq;
- int m_numFbankBins;
- float m_melLoFreq;
- float m_melHiFreq;
- int m_numMfccFeatures;
- int m_frameLen;
- int m_frameLenPadded;
- bool m_useHtkMethod;
- int m_numMfccVectors;
-
- /** @brief Constructor */
- MfccParams(const float samplingFreq, const int numFbankBins,
- const float melLoFreq, const float melHiFreq,
- const int numMfccFeats, const int frameLen,
- const bool useHtkMethod, const int numMfccVectors);
-
- /* Delete the default constructor */
- MfccParams() = delete;
-
- /* Default destructor */
- ~MfccParams() = default;
-
- /** @brief String representation of parameters */
- std::string Str();
-};
-
-/**
- * @brief Class for MFCC feature extraction.
- * Based on https://github.com/ARM-software/ML-KWS-for-MCU/blob/master/Deployment/Source/MFCC/mfcc.cpp
- * This class is designed to be generic and self-sufficient but
- * certain calculation routines can be overridden to accommodate
- * use-case specific requirements.
- */
-class MFCC
-{
-
-public:
-
- /**
- * @brief Extract MFCC features for one single small frame of
- * audio data e.g. 640 samples.
- * @param[in] audioData - Vector of audio samples to calculate
- * features for.
- * @return Vector of extracted MFCC features.
- **/
- std::vector<float> MfccCompute(const std::vector<float>& audioData);
-
- MfccParams _m_params;
-
- /**
- * @brief Constructor
- * @param[in] params - MFCC parameters
- */
- MFCC(const MfccParams& params);
-
- /* Delete the default constructor */
- MFCC() = delete;
-
- /** @brief Default destructor */
- ~MFCC() = default;
-
- /** @brief Initialise */
- void Init();
-
- /**
- * @brief Extract MFCC features and quantise for one single small
- * frame of audio data e.g. 640 samples.
- * @param[in] audioData - Vector of audio samples to calculate
- * features for.
- * @param[in] quantScale - quantisation scale.
- * @param[in] quantOffset - quantisation offset
- * @return Vector of extracted quantised MFCC features.
- **/
- template<typename T>
- std::vector<T> MfccComputeQuant(const std::vector<float>& audioData,
- const float quantScale,
- const int quantOffset)
- {
- this->_MfccComputePreFeature(audioData);
- float minVal = std::numeric_limits<T>::min();
- float maxVal = std::numeric_limits<T>::max();
-
- std::vector<T> mfccOut(this->_m_params.m_numMfccFeatures);
- const size_t numFbankBins = this->_m_params.m_numFbankBins;
-
- /* Take DCT. Uses matrix mul. */
- for (size_t i = 0, j = 0; i < mfccOut.size(); ++i, j += numFbankBins)
- {
- float sum = 0;
- for (size_t k = 0; k < numFbankBins; ++k)
- {
- sum += this->_m_dctMatrix[j + k] * this->_m_melEnergies[k];
- }
- /* Quantize to T. */
- sum = std::round((sum / quantScale) + quantOffset);
- mfccOut[i] = static_cast<T>(std::min<float>(std::max<float>(sum, minVal), maxVal));
- }
-
- return mfccOut;
- }
-
- /* Constants */
- static constexpr float logStep = 1.8562979903656 / 27.0;
- static constexpr float freqStep = 200.0 / 3;
- static constexpr float minLogHz = 1000.0;
- static constexpr float minLogMel = minLogHz / freqStep;
-
-protected:
- /**
- * @brief Project input frequency to Mel Scale.
- * @param[in] freq - input frequency in floating point
- * @param[in] useHTKmethod - bool to signal if HTK method is to be
- * used for calculation
- * @return Mel transformed frequency in floating point
- **/
- static float MelScale(const float freq,
- const bool useHTKMethod = true);
-
- /**
- * @brief Inverse Mel transform - convert MEL warped frequency
- * back to normal frequency
- * @param[in] freq - Mel frequency in floating point
- * @param[in] useHTKmethod - bool to signal if HTK method is to be
- * used for calculation
- * @return Real world frequency in floating point
- **/
- static float InverseMelScale(const float melFreq,
- const bool useHTKMethod = true);
-
- /**
- * @brief Populates MEL energies after applying the MEL filter
- * bank weights and adding them up to be placed into
- * bins, according to the filter bank's first and last
- * indices (pre-computed for each filter bank element
- * by _CreateMelFilterBank function).
- * @param[in] fftVec Vector populated with FFT magnitudes
- * @param[in] melFilterBank 2D Vector with filter bank weights
- * @param[in] filterBankFilterFirst Vector containing the first indices of filter bank
- * to be used for each bin.
- * @param[in] filterBankFilterLast Vector containing the last indices of filter bank
- * to be used for each bin.
- * @param[out] melEnergies Pre-allocated vector of MEL energies to be
- * populated.
- * @return true if successful, false otherwise
- */
- virtual bool ApplyMelFilterBank(
- std::vector<float>& fftVec,
- std::vector<std::vector<float>>& melFilterBank,
- std::vector<int32_t>& filterBankFilterFirst,
- std::vector<int32_t>& filterBankFilterLast,
- std::vector<float>& melEnergies);
-
- /**
- * @brief Converts the Mel energies for logarithmic scale
- * @param[in/out] melEnergies - 1D vector of Mel energies
- **/
- virtual void ConvertToLogarithmicScale(std::vector<float>& melEnergies);
-
- /**
- * @brief Create a matrix used to calculate Discrete Cosine
- * Transform.
- * @param[in] inputLength - input length of the buffer on which
- * DCT will be performed
- * @param[in] coefficientCount - Total coefficients per input
- * length
- * @return 1D vector with inputLength x coefficientCount elements
- * populated with DCT coefficients.
- */
- virtual std::vector<float> CreateDCTMatrix(
- const int32_t inputLength,
- const int32_t coefficientCount);
-
- /**
- * @brief Given the low and high Mel values, get the normaliser
- * for weights to be applied when populating the filter
- * bank.
- * @param[in] leftMel - low Mel frequency value
- * @param[in] rightMel - high Mel frequency value
- * @param[in] useHTKMethod - bool to signal if HTK method is to be
- * used for calculation
- */
- virtual float GetMelFilterBankNormaliser(
- const float& leftMel,
- const float& rightMel,
- const bool useHTKMethod);
-
-private:
-
- std::vector<float> _m_frame;
- std::vector<float> _m_buffer;
- std::vector<float> _m_melEnergies;
- std::vector<float> _m_windowFunc;
- std::vector<std::vector<float>> _m_melFilterBank;
- std::vector<float> _m_dctMatrix;
- std::vector<int32_t> _m_filterBankFilterFirst;
- std::vector<int32_t> _m_filterBankFilterLast;
- bool _m_filterBankInitialised;
-
- /**
- * @brief Initialises the filter banks and the DCT matrix **/
- void _InitMelFilterBank();
-
- /**
- * @brief Signals whether the instance of MFCC has had its
- * required buffers initialised
- * @return True if initialised, false otherwise
- **/
- bool _IsMelFilterBankInited();
-
- /**
- * @brief Create mel filter banks for MFCC calculation.
- * @return 2D vector of floats
- **/
- std::vector<std::vector<float>> _CreateMelFilterBank();
-
- /**
- * @brief Computes and populates internal memeber buffers used
- * in MFCC feature calculation
- * @param[in] audioData - 1D vector of 16-bit audio data
- */
- void _MfccComputePreFeature(const std::vector<float>& audioData);
-
- /** @brief Computes the magnitude from an interleaved complex array */
- void _ConvertToPowerSpectrum();
-
-};
-
diff --git a/samples/SpeechRecognition/include/MathUtils.hpp b/samples/SpeechRecognition/include/MathUtils.hpp
deleted file mode 100644
index 5f81fb6507..0000000000
--- a/samples/SpeechRecognition/include/MathUtils.hpp
+++ /dev/null
@@ -1,85 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <vector>
-#include <cmath>
-#include <cstdint>
-#include <numeric>
-
-class MathUtils
-{
-
-public:
-
- /**
- * @brief Computes the FFT for the input vector
- * @param[in] input Floating point vector of input elements
- * @param[out] fftOutput Output buffer to be populated by computed
- * FFTs
- * @return none
- */
- static void FftF32(std::vector<float>& input,
- std::vector<float>& fftOutput);
-
-
- /**
- * @brief Computes the dot product of two 1D floating point
- * vectors.
- * result = sum(srcA[0]*srcB[0] + srcA[1]*srcB[1] + ..)
- * @param[in] srcPtrA pointer to the first element of first
- * array
- * @param[in] srcPtrB pointer to the first element of second
- * array
- * @param[in] srcLen Number of elements in the array/vector
- * @return dot product
- */
- static float DotProductF32(float* srcPtrA, float* srcPtrB,
- const int srcLen);
-
- /**
- * @brief Computes the squared magnitude of floating point
- * complex number array.
- * @param[in] ptrSrc pointer to the first element of input
- * array
- * @param[in] srcLen Number of elements in the array/vector
- * @param[out] ptrDst Output buffer to be populated
- * @param[in] dstLen output buffer len (for sanity check only)
- * @return true if successful, false otherwise
- */
- static bool ComplexMagnitudeSquaredF32(float* ptrSrc,
- const int srcLen,
- float* ptrDst,
- const int dstLen);
-
- /**
- * @brief Computes the natural logarithms of input floating point
- * vector
- * @param[in] input Floating point input vector
- * @param[out] output Pre-allocated buffer to be populated with
- * natural log values of each input element
- * @return none
- */
- static void VecLogarithmF32(std::vector <float>& input,
- std::vector <float>& output);
-
- /**
- * @brief Gets the mean of a floating point array of elements
- * @param[in] ptrSrc pointer to the first element
- * @param[in] srcLen Number of elements in the array/vector
- * @return average value
- */
- static float MeanF32(float* ptrSrc, const uint32_t srcLen);
-
- /**
- * @brief Gets the standard deviation of a floating point array
- * of elements
- * @param[in] ptrSrc pointer to the first element
- * @param[in] srcLen Number of elements in the array/vector
- * @param[in] mean pre-computed mean value
- * @return standard deviation value
- */
- static float StdDevF32(float* ptrSrc, const uint32_t srcLen,
- const float mean);
-};
diff --git a/samples/SpeechRecognition/include/SlidingWindow.hpp b/samples/SpeechRecognition/include/SlidingWindow.hpp
deleted file mode 100644
index 791a0b7fc0..0000000000
--- a/samples/SpeechRecognition/include/SlidingWindow.hpp
+++ /dev/null
@@ -1,161 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-template<class T>
-class SlidingWindow
-{
-protected:
- T* m_start = nullptr;
- size_t m_dataSize = 0;
- size_t m_size = 0;
- size_t m_stride = 0;
- size_t m_count = 0;
-public:
-
- /**
- * Creates the window slider through the given data.
- *
- * @param data pointer to the data to slide through.
- * @param dataSize size in T type elements wise.
- * @param windowSize sliding window size in T type wise elements.
- * @param stride stride size in T type wise elements.
- */
- SlidingWindow(T* data, size_t dataSize,
- size_t windowSize, size_t stride)
- {
- m_start = data;
- m_dataSize = dataSize;
- m_size = windowSize;
- m_stride = stride;
- }
-
- SlidingWindow() = default;
-
- ~SlidingWindow() = default;
-
- /**
- * Get the next data window.
- * @return pointer to the next window, if next window is not available nullptr is returned.
- */
- virtual T* Next()
- {
- if (HasNext())
- {
- m_count++;
- return m_start + Index() * m_stride;
- }
- else
- {
- return nullptr;
- }
- }
-
- /**
- * Checks if the next data portion is available.
- * @return true if next data portion is available
- */
- bool HasNext()
- {
- return this->m_count < 1 + this->FractionalTotalStrides() && (this->NextWindowStartIndex() < this->m_dataSize);
- }
-
- /**
- * Resest the slider to the initial position.
- */
- virtual void Reset()
- {
- m_count = 0;
- }
-
- /**
- * Resest the slider to the initial position.
- */
- virtual size_t GetWindowSize()
- {
- return m_size;
- }
-
- /**
- * Resets the slider to the start of the new data.
- * New data size MUST be the same as the old one.
- * @param newStart pointer to the new data to slide through.
- */
- virtual void Reset(T* newStart)
- {
- m_start = newStart;
- Reset();
- }
-
- /**
- * Gets current index of the sliding window.
- * @return current position of the sliding window in number of strides
- */
- size_t Index()
- {
- return m_count == 0? 0: m_count - 1;
- }
-
- /**
- * Gets the index from the start of the data where the next window will begin.
- * While Index() returns the index of sliding window itself this function returns the index of the data
- * element itself.
- * @return Index from the start of the data where the next sliding window will begin.
- */
- virtual size_t NextWindowStartIndex()
- {
- return m_count == 0? 0: ((m_count) * m_stride);
- }
-
- /**
- * Go to given sliding window index.
- * @param index new position of the sliding window. if index is invalid (greater than possible range of strides)
- * then next call to Next() will return nullptr.
- */
- void FastForward(size_t index)
- {
- m_count = index;
- }
-
- /**
- * Calculates whole number of times the window can stride through the given data.
- * @return maximum number of strides.
- */
- size_t TotalStrides()
- {
- if (m_size > m_dataSize)
- {
- return 0;
- }
- return ((m_dataSize - m_size)/m_stride);
- }
-
- /**
- * Calculates number of times the window can stride through the given data. May not be a whole number.
- * @return Number of strides to cover all data.
- */
- float FractionalTotalStrides()
- {
- if(this->m_size > this->m_dataSize)
- {
- return this->m_dataSize / this->m_size;
- }
- else
- {
- return ((this->m_dataSize - this->m_size)/ static_cast<float>(this->m_stride));
- }
-
- }
-
- /**
- * Calculates the remaining data left to be processed
- * @return The remaining unprocessed data
- */
- int RemainingData()
- {
- return this->m_dataSize - this->NextWindowStartIndex();
- }
-}; \ No newline at end of file
diff --git a/samples/SpeechRecognition/include/SpeechRecognitionPipeline.hpp b/samples/SpeechRecognition/include/SpeechRecognitionPipeline.hpp
index 47ce30416f..bc3fbfe151 100644
--- a/samples/SpeechRecognition/include/SpeechRecognitionPipeline.hpp
+++ b/samples/SpeechRecognition/include/SpeechRecognitionPipeline.hpp
@@ -8,16 +8,16 @@
#include "ArmnnNetworkExecutor.hpp"
#include "Decoder.hpp"
#include "MFCC.hpp"
-#include "Preprocess.hpp"
+#include "Wav2LetterPreprocessor.hpp"
-namespace asr
+namespace asr
{
/**
* Generic Speech Recognition pipeline with 3 steps: data pre-processing, inference execution and inference
* result post-processing.
*
*/
-class ASRPipeline
+class ASRPipeline
{
public:
@@ -27,7 +27,7 @@ public:
* @param decoder - unique pointer to inference results decoder
*/
ASRPipeline(std::unique_ptr<common::ArmnnNetworkExecutor<int8_t>> executor,
- std::unique_ptr<Decoder> decoder);
+ std::unique_ptr<Decoder> decoder, std::unique_ptr<Wav2LetterPreprocessor> preprocessor);
/**
* @brief Standard audio pre-processing implementation.
@@ -36,20 +36,16 @@ public:
* extracting the MFCC features.
* @param[in] audio - the raw audio data
- * @param[out] preprocessor - the preprocessor object, which handles the data prepreration
+ * @param[out] preprocessor - the preprocessor object, which handles the data preparation
*/
- template<typename Tin,typename Tout>
- std::vector<Tout> PreProcessing(std::vector<Tin>& audio, Preprocess& preprocessor)
- {
- int audioDataToPreProcess = preprocessor._m_windowLen +
- ((preprocessor._m_mfcc._m_params.m_numMfccVectors -1) *preprocessor._m_windowStride);
- int outputBufferSize = preprocessor._m_mfcc._m_params.m_numMfccVectors
- * preprocessor._m_mfcc._m_params.m_numMfccFeatures * 3;
- std::vector<Tout> outputBuffer(outputBufferSize);
- preprocessor.Invoke(audio.data(), audioDataToPreProcess, outputBuffer, m_executor->GetQuantizationOffset(),
- m_executor->GetQuantizationScale());
- return outputBuffer;
- }
+ std::vector<int8_t> PreProcessing(std::vector<float>& audio);
+
+ int getInputSamplesSize();
+ int getSlidingWindowOffset();
+
+ // Exposing hardcoded constant as it can only be derived from model knowledge and not from model itself
+ // Will need to be refactored so that hard coded values are not defined outside of model settings
+ int SLIDING_WINDOW_OFFSET;
/**
* @brief Executes inference
@@ -60,9 +56,9 @@ public:
* @param[out] result - raw inference results.
*/
template<typename T>
- void Inference(const std::vector<T>& preprocessedData, common::InferenceResults<int8_t>& result)
+ void Inference(const std::vector<T>& preprocessedData, common::InferenceResults<int8_t>& result)
{
- size_t data_bytes = sizeof(std::vector<T>) + (sizeof(T) * preprocessedData.size());
+ size_t data_bytes = sizeof(T) * preprocessedData.size();
m_executor->Run(preprocessedData.data(), data_bytes, result);
}
@@ -78,9 +74,9 @@ public:
*/
template<typename T>
void PostProcessing(common::InferenceResults<int8_t>& inferenceResult,
- bool& isFirstWindow,
- bool isLastWindow,
- std::string currentRContext)
+ bool& isFirstWindow,
+ bool isLastWindow,
+ std::string currentRContext)
{
int rowLength = 29;
int middleContextStart = 49;
@@ -92,17 +88,17 @@ public:
std::vector<T> contextToProcess;
// If isFirstWindow we keep the left context of the output
- if(isFirstWindow)
+ if (isFirstWindow)
{
std::vector<T> chunk(&inferenceResult[0][leftContextStart],
- &inferenceResult[0][middleContextEnd * rowLength]);
+ &inferenceResult[0][middleContextEnd * rowLength]);
contextToProcess = chunk;
}
- // Else we only keep the middle context of the output
- else
+ else
{
+ // Else we only keep the middle context of the output
std::vector<T> chunk(&inferenceResult[0][middleContextStart * rowLength],
- &inferenceResult[0][middleContextEnd * rowLength]);
+ &inferenceResult[0][middleContextEnd * rowLength]);
contextToProcess = chunk;
}
std::string output = this->m_decoder->DecodeOutput<T>(contextToProcess);
@@ -110,10 +106,10 @@ public:
std::cout << output << std::flush;
// If this is the last window, we print the right context of the output
- if(isLastWindow)
+ if (isLastWindow)
{
- std::vector<T> rContext(&inferenceResult[0][rightContextStart*rowLength],
- &inferenceResult[0][rightContextEnd * rowLength]);
+ std::vector<T> rContext(&inferenceResult[0][rightContextStart * rowLength],
+ &inferenceResult[0][rightContextEnd * rowLength]);
currentRContext = this->m_decoder->DecodeOutput(rContext);
std::cout << currentRContext << std::endl;
}
@@ -122,6 +118,7 @@ public:
protected:
std::unique_ptr<common::ArmnnNetworkExecutor<int8_t>> m_executor;
std::unique_ptr<Decoder> m_decoder;
+ std::unique_ptr<Wav2LetterPreprocessor> m_preProcessor;
};
using IPipelinePtr = std::unique_ptr<asr::ASRPipeline>;
@@ -136,4 +133,4 @@ using IPipelinePtr = std::unique_ptr<asr::ASRPipeline>;
*/
IPipelinePtr CreatePipeline(common::PipelineOptions& config, std::map<int, std::string>& labels);
-}// namespace asr \ No newline at end of file
+} // namespace asr \ No newline at end of file
diff --git a/samples/SpeechRecognition/include/Wav2LetterMFCC.hpp b/samples/SpeechRecognition/include/Wav2LetterMFCC.hpp
new file mode 100644
index 0000000000..aa88aafb3b
--- /dev/null
+++ b/samples/SpeechRecognition/include/Wav2LetterMFCC.hpp
@@ -0,0 +1,78 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "MFCC.hpp"
+
+/* Class to provide Wav2Letter specific MFCC calculation requirements. */
+class Wav2LetterMFCC : public MFCC
+{
+
+public:
+ explicit Wav2LetterMFCC(const MfccParams& params)
+ : MFCC(params)
+ {}
+
+ Wav2LetterMFCC() = delete;
+ ~Wav2LetterMFCC() = default;
+
+protected:
+
+ /**
+ * @brief Overrides base class implementation of this function.
+ * @param[in] fftVec Vector populated with FFT magnitudes
+ * @param[in] melFilterBank 2D Vector with filter bank weights
+ * @param[in] filterBankFilterFirst Vector containing the first indices of filter bank
+ * to be used for each bin.
+ * @param[in] filterBankFilterLast Vector containing the last indices of filter bank
+ * to be used for each bin.
+ * @param[out] melEnergies Pre-allocated vector of MEL energies to be
+ * populated.
+ * @return true if successful, false otherwise
+ */
+ bool ApplyMelFilterBank(
+ std::vector<float>& fftVec,
+ std::vector<std::vector<float>>& melFilterBank,
+ std::vector<uint32_t>& filterBankFilterFirst,
+ std::vector<uint32_t>& filterBankFilterLast,
+ std::vector<float>& melEnergies) override;
+
+ /**
+ * @brief Override for the base class implementation convert mel
+ * energies to logarithmic scale. The difference from
+ * default behaviour is that the power is converted to dB
+ * and subsequently clamped.
+ * @param[in,out] melEnergies 1D vector of Mel energies
+ **/
+ void ConvertToLogarithmicScale(std::vector<float>& melEnergies) override;
+
+ /**
+ * @brief Create a matrix used to calculate Discrete Cosine
+ * Transform. Override for the base class' default
+ * implementation as the first and last elements
+ * use a different normaliser.
+ * @param[in] inputLength input length of the buffer on which
+ * DCT will be performed
+ * @param[in] coefficientCount Total coefficients per input length.
+ * @return 1D vector with inputLength x coefficientCount elements
+ * populated with DCT coefficients.
+ */
+ std::vector<float> CreateDCTMatrix(int32_t inputLength,
+ int32_t coefficientCount) override;
+
+ /**
+ * @brief Given the low and high Mel values, get the normaliser
+ * for weights to be applied when populating the filter
+ * bank. Override for the base class implementation.
+ * @param[in] leftMel Low Mel frequency value.
+ * @param[in] rightMel High Mel frequency value.
+ * @param[in] useHTKMethod bool to signal if HTK method is to be
+ * used for calculation.
+ * @return Value to use for normalising.
+ */
+ float GetMelFilterBankNormaliser(const float& leftMel,
+ const float& rightMel,
+ bool useHTKMethod) override;
+}; \ No newline at end of file
diff --git a/samples/SpeechRecognition/include/Preprocess.hpp b/samples/SpeechRecognition/include/Wav2LetterPreprocessor.hpp
index 80c568439b..ebc9e864e3 100644
--- a/samples/SpeechRecognition/include/Preprocess.hpp
+++ b/samples/SpeechRecognition/include/Wav2LetterPreprocessor.hpp
@@ -1,48 +1,23 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
+#ifndef SPEECH_RECOGNITION_EXAMPLE_WAV2LETTERPREPROCESSOR_HPP
+#define SPEECH_RECOGNITION_EXAMPLE_WAV2LETTERPREPROCESSOR_HPP
-#pragma once
-
+#include <numeric>
#include "DataStructures.hpp"
#include "SlidingWindow.hpp"
-#include <numeric>
#include "MFCC.hpp"
+#include "Wav2LetterMFCC.hpp"
+// Class to facilitate pre-processing calculation for Wav2Letter model for ASR
+using AudioWindow = SlidingWindow<const float>;
-/* Class to facilitate pre-processing calculation for Wav2Letter model
- * for ASR */
-using AudioWindow = SlidingWindow <const float>;
-
-class Preprocess
+class Wav2LetterPreprocessor
{
public:
-
- MFCC _m_mfcc; /* MFCC instance */
-
- /* Actual buffers to be populated */
- Array2d<float> _m_mfccBuf; /* Contiguous buffer 1D: MFCC */
- Array2d<float> _m_delta1Buf; /* Contiguous buffer 1D: Delta 1 */
- Array2d<float> _m_delta2Buf; /* Contiguous buffer 1D: Delta 2 */
-
- uint32_t _m_windowLen; /* Window length for MFCC */
- uint32_t _m_windowStride; /* Window stride len for MFCC */
- AudioWindow _m_window; /* Sliding window */
-
- /**
- * @brief Constructor
- * @param[in] numMfccFeatures number of MFCC features per window
- * @param[in] windowLen number of elements in a window
- * @param[in] windowStride stride (in number of elements) for
- * moving the window
- * @param[in] numMfccVectors number of MFCC vectors per window
- */
- Preprocess(
- const uint32_t windowLen,
- const uint32_t windowStride,
- const MFCC mfccInst);
- Preprocess() = delete;
- ~Preprocess();
+ Wav2LetterPreprocessor(uint32_t windowLen, uint32_t windowStride,
+ std::unique_ptr<Wav2LetterMFCC> mfccInst);
/**
* @brief Calculates the features required from audio data. This
@@ -55,12 +30,19 @@ public:
* @param[in] tensor tensor to be populated
* @return true if successful, false in case of error.
*/
- bool Invoke(const float* audioData,
- const uint32_t audioDataLen,
- std::vector<int8_t>& output,
- int quantOffset,
+ bool Invoke(const float* audioData, uint32_t audioDataLen, std::vector<int8_t>& output, int quantOffset,
float quantScale);
+ std::unique_ptr<MFCC> m_mfcc;
+
+ // Actual buffers to be populated
+ Array2d<float> m_mfccBuf; // Contiguous buffer 1D: MFCC
+ Array2d<float> m_delta1Buf; // Contiguous buffer 1D: Delta 1
+ Array2d<float> m_delta2Buf; // Contiguous buffer 1D: Delta 2
+
+ uint32_t m_windowLen; // Window length for MFCC
+ uint32_t m_windowStride; // Window stride len for MFCC
+ AudioWindow m_window; // Sliding window
protected:
/**
@@ -73,16 +55,18 @@ protected:
*
* @return true if successful, false otherwise
*/
- static bool _ComputeDeltas(Array2d<float>& mfcc,
- Array2d<float>& delta1,
- Array2d<float>& delta2);
+ static bool ComputeDeltas(Array2d<float>& mfcc,
+ Array2d<float>& delta1,
+ Array2d<float>& delta2);
+
+protected:
/**
* @brief Given a 2D vector of floats, computes the mean
* @param[in] vec vector of vector of floats
* @return mean value
*/
- static float _GetMean(Array2d<float>& vec);
+ static float GetMean(Array2d<float>& vec);
/**
* @brief Given a 2D vector of floats, computes the stddev
@@ -90,8 +74,7 @@ protected:
* @param[in] mean mean value of the vector passed in
* @return stddev value
*/
- static float _GetStdDev(Array2d<float>& vec,
- const float mean);
+ static float GetStdDev(Array2d<float>& vec, float mean);
/**
* @brief Given a 2D vector of floats, normalises it using
@@ -99,13 +82,13 @@ protected:
* @param[in/out] vec vector of vector of floats
* @return
*/
- static void _NormaliseVec(Array2d<float>& vec);
+ static void NormaliseVec(Array2d<float>& vec);
/**
* @brief Normalises the MFCC and delta buffers
* @return
*/
- void _Normalise();
+ void Normalise();
/**
* @brief Given the quantisation and data type limits, computes
@@ -117,12 +100,12 @@ protected:
* @param[in] maxVal Numerical limit - maximum
* @return floating point quantised value
*/
- static float _GetQuantElem(
- const float elem,
- const float quantScale,
- const int quantOffset,
- const float minVal,
- const float maxVal);
+ static float GetQuantElem(
+ float elem,
+ float quantScale,
+ int quantOffset,
+ float minVal,
+ float maxVal);
/**
* @brief Quantises the MFCC and delta buffers, and places them
@@ -137,39 +120,39 @@ protected:
* @param[in] quantScale quantisation scale
* @param[in] quantOffset quantisation offset
*/
- template <typename T>
- bool _Quantise(T* outputBuf, int quantOffset, float quantScale)
+ template<typename T>
+ bool Quantise(T*outputBuf, int quantOffset, float quantScale)
{
- /* Populate */
+ // Populate
T* outputBufMfcc = outputBuf;
- T* outputBufD1 = outputBuf + this->_m_mfcc._m_params.m_numMfccFeatures;
- T* outputBufD2 = outputBufD1 + this->_m_mfcc._m_params.m_numMfccFeatures;
- const uint32_t ptrIncr = this->_m_mfcc._m_params.m_numMfccFeatures * 2; /* (3 vectors - 1 vector) */
+ T* outputBufD1 = outputBuf + this->m_mfcc->m_params.m_numMfccFeatures;
+ T* outputBufD2 = outputBufD1 + this->m_mfcc->m_params.m_numMfccFeatures;
+ const uint32_t ptrIncr = this->m_mfcc->m_params.m_numMfccFeatures * 2; // (3 vectors - 1 vector)
const float minVal = std::numeric_limits<T>::min();
const float maxVal = std::numeric_limits<T>::max();
- /* We need to do a transpose while copying and concatenating
- * the tensor*/
- for (uint32_t j = 0; j < this->_m_mfcc._m_params.m_numMfccVectors; ++j) {
- for (uint32_t i = 0; i < this->_m_mfcc._m_params.m_numMfccFeatures; ++i)
+ // We need to do a transpose while copying and concatenating the tensor
+ for (uint32_t j = 0; j < this->m_mfcc->m_params.m_numMfccVectors; ++j)
+ {
+ for (uint32_t i = 0; i < this->m_mfcc->m_params.m_numMfccFeatures; ++i)
{
- *outputBufMfcc++ = static_cast<T>(this->_GetQuantElem(
- this->_m_mfccBuf(i, j), quantScale,
+ *outputBufMfcc++ = static_cast<T>(Wav2LetterPreprocessor::GetQuantElem(
+ this->m_mfccBuf(i, j), quantScale,
quantOffset, minVal, maxVal));
- *outputBufD1++ = static_cast<T>(this->_GetQuantElem(
- this->_m_delta1Buf(i, j), quantScale,
+ *outputBufD1++ = static_cast<T>(Wav2LetterPreprocessor::GetQuantElem(
+ this->m_delta1Buf(i, j), quantScale,
quantOffset, minVal, maxVal));
- *outputBufD2++ = static_cast<T>(this->_GetQuantElem(
- this->_m_delta2Buf(i, j), quantScale,
+ *outputBufD2++ = static_cast<T>(Wav2LetterPreprocessor::GetQuantElem(
+ this->m_delta2Buf(i, j), quantScale,
quantOffset, minVal, maxVal));
}
outputBufMfcc += ptrIncr;
outputBufD1 += ptrIncr;
outputBufD2 += ptrIncr;
}
-
return true;
}
};
+#endif //SPEECH_RECOGNITION_EXAMPLE_WAV2LETTERPREPROCESSOR_HPP
diff --git a/samples/SpeechRecognition/src/AudioCapture.cpp b/samples/SpeechRecognition/src/AudioCapture.cpp
deleted file mode 100644
index f3b9092218..0000000000
--- a/samples/SpeechRecognition/src/AudioCapture.cpp
+++ /dev/null
@@ -1,104 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "AudioCapture.hpp"
-#include <alsa/asoundlib.h>
-#include <sndfile.h>
-#include <samplerate.h>
-
-namespace asr
-{
- std::vector<float> AudioCapture::LoadAudioFile(std::string filePath)
- {
- SF_INFO inputSoundFileInfo;
- SNDFILE* infile = NULL;
- infile = sf_open(filePath.c_str(), SFM_READ, &inputSoundFileInfo);
-
- float audioIn[inputSoundFileInfo.channels * inputSoundFileInfo.frames];
- sf_read_float(infile, audioIn, inputSoundFileInfo.channels * inputSoundFileInfo.frames);
-
- float sampleRate = 16000.0f;
- float srcRatio = sampleRate / (float)inputSoundFileInfo.samplerate;
- int outputFrames = ceil(inputSoundFileInfo.frames * srcRatio);
- float dataOut[outputFrames];
-
- // Convert to mono
- float monoData[inputSoundFileInfo.frames];
- for(int i = 0; i < inputSoundFileInfo.frames; i++)
- {
- float val = 0.0f;
- for(int j = 0; j < inputSoundFileInfo.channels; j++)
- monoData[i] += audioIn[i * inputSoundFileInfo.channels + j];
- monoData[i] /= inputSoundFileInfo.channels;
- }
-
- // Resample
- SRC_DATA srcData;
- srcData.data_in = monoData;
- srcData.input_frames = inputSoundFileInfo.frames;
- srcData.data_out = dataOut;
- srcData.output_frames = outputFrames;
- srcData.src_ratio = srcRatio;
-
- src_simple(&srcData, SRC_SINC_BEST_QUALITY, 1);
-
- // Convert to Vector
- std::vector<float> processedInput;
-
- for(int i = 0; i < srcData.output_frames_gen; ++i)
- {
- processedInput.push_back(srcData.data_out[i]);
- }
-
- sf_close(infile);
-
- return processedInput;
- }
-
- void AudioCapture::InitSlidingWindow(float* data, size_t dataSize, int minSamples, size_t stride)
- {
- this->m_window = SlidingWindow<const float>(data, dataSize, minSamples, stride);
- }
-
- bool AudioCapture::HasNext()
- {
- return m_window.HasNext();
- }
-
- std::vector<float> AudioCapture::Next()
- {
- if (this->m_window.HasNext())
- {
- int remainingData = this->m_window.RemainingData();
- const float* windowData = this->m_window.Next();
-
- size_t windowSize = this->m_window.GetWindowSize();
-
- if(remainingData < windowSize)
- {
- std::vector<float> mfccAudioData(windowSize, 0.0f);
- for(int i = 0; i < remainingData; ++i)
- {
- mfccAudioData[i] = *windowData;
- if(i < remainingData - 1)
- {
- ++windowData;
- }
- }
- return mfccAudioData;
- }
- else
- {
- std::vector<float> mfccAudioData(windowData, windowData + windowSize);
- return mfccAudioData;
- }
- }
- else
- {
- throw std::out_of_range("Error, end of audio data reached.");
- }
- }
-} //namespace asr
-
diff --git a/samples/SpeechRecognition/src/Decoder.cpp b/samples/SpeechRecognition/src/Decoder.cpp
index 663d4db5b5..b95288e95c 100644
--- a/samples/SpeechRecognition/src/Decoder.cpp
+++ b/samples/SpeechRecognition/src/Decoder.cpp
@@ -5,33 +5,32 @@
#include "Decoder.hpp"
-namespace asr {
+namespace asr
+{
- Decoder::Decoder(std::map<int, std::string>& labels):
- m_labels(labels)
- {}
+Decoder::Decoder(std::map<int, std::string>& labels) :
+ m_labels(labels) {}
- std::string Decoder::FilterCharacters(std::vector<char>& unfiltered)
- {
- std::string filtered = "";
+std::string Decoder::FilterCharacters(std::vector<char>& unfiltered)
+{
+ std::string filtered;
- for(int i = 0; i < unfiltered.size(); ++i)
+ for (int i = 0; i < unfiltered.size(); ++i)
+ {
+ if (unfiltered.at(i) == '$')
{
- if (unfiltered.at(i) == '$')
- {
- continue;
- }
-
- else if (i + 1 < unfiltered.size() && unfiltered.at(i) == unfiltered.at(i + 1))
- {
- continue;
- }
- else
- {
- filtered += unfiltered.at(i);
- }
+ continue;
+ }
+ else if (i + 1 < unfiltered.size() && unfiltered.at(i) == unfiltered.at(i + 1))
+ {
+ continue;
+ }
+ else
+ {
+ filtered += unfiltered.at(i);
}
- return filtered;
}
-}// namespace
+ return filtered;
+}
+} // namespace asr
diff --git a/samples/SpeechRecognition/src/MFCC.cpp b/samples/SpeechRecognition/src/MFCC.cpp
deleted file mode 100644
index 234b14d3be..0000000000
--- a/samples/SpeechRecognition/src/MFCC.cpp
+++ /dev/null
@@ -1,397 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <cstdio>
-#include <float.h>
-
-#include "MFCC.hpp"
-#include "MathUtils.hpp"
-
-
-MfccParams::MfccParams(
- const float samplingFreq,
- const int numFbankBins,
- const float melLoFreq,
- const float melHiFreq,
- const int numMfccFeats,
- const int frameLen,
- const bool useHtkMethod,
- const int numMfccVectors):
- m_samplingFreq(samplingFreq),
- m_numFbankBins(numFbankBins),
- m_melLoFreq(melLoFreq),
- m_melHiFreq(melHiFreq),
- m_numMfccFeatures(numMfccFeats),
- m_frameLen(frameLen),
- m_numMfccVectors(numMfccVectors),
-
- /* Smallest power of 2 >= frame length. */
- m_frameLenPadded(pow(2, ceil((log(frameLen)/log(2))))),
- m_useHtkMethod(useHtkMethod)
-{}
-
-std::string MfccParams::Str()
-{
- char strC[1024];
- snprintf(strC, sizeof(strC) - 1, "\n \
- \n\t Sampling frequency: %f\
- \n\t Number of filter banks: %u\
- \n\t Mel frequency limit (low): %f\
- \n\t Mel frequency limit (high): %f\
- \n\t Number of MFCC features: %u\
- \n\t Frame length: %u\
- \n\t Padded frame length: %u\
- \n\t Using HTK for Mel scale: %s\n",
- this->m_samplingFreq, this->m_numFbankBins, this->m_melLoFreq,
- this->m_melHiFreq, this->m_numMfccFeatures, this->m_frameLen,
- this->m_frameLenPadded, this->m_useHtkMethod ? "yes" : "no");
- return std::string{strC};
-}
-
-MFCC::MFCC(const MfccParams& params):
- _m_params(params),
- _m_filterBankInitialised(false)
-{
- this->_m_buffer = std::vector<float>(
- this->_m_params.m_frameLenPadded, 0.0);
- this->_m_frame = std::vector<float>(
- this->_m_params.m_frameLenPadded, 0.0);
- this->_m_melEnergies = std::vector<float>(
- this->_m_params.m_numFbankBins, 0.0);
-
- this->_m_windowFunc = std::vector<float>(this->_m_params.m_frameLen);
- const float multiplier = 2 * M_PI / this->_m_params.m_frameLen;
-
- /* Create window function. */
- for (size_t i = 0; i < this->_m_params.m_frameLen; i++)
- {
- this->_m_windowFunc[i] = (0.5 - (0.5 * cos(static_cast<float>(i) * multiplier)));
- }
-}
-
-void MFCC::Init()
-{
- this->_InitMelFilterBank();
-}
-
-float MFCC::MelScale(const float freq, const bool useHTKMethod)
-{
- if (useHTKMethod)
- {
- return 1127.0f * logf (1.0f + freq / 700.0f);
- }
- else
- {
- /* Slaney formula for mel scale. */
- float mel = freq / freqStep;
-
- if (freq >= minLogHz)
- {
- mel = minLogMel + logf(freq / minLogHz) / logStep;
- }
- return mel;
- }
-}
-
-float MFCC::InverseMelScale(const float melFreq, const bool useHTKMethod)
-{
- if (useHTKMethod)
- {
- return 700.0f * (expf (melFreq / 1127.0f) - 1.0f);
- }
- else
- {
- /* Slaney formula for mel scale. */
- float freq = freqStep * melFreq;
-
- if (melFreq >= minLogMel)
- {
- freq = minLogHz * expf(logStep * (melFreq - minLogMel));
- }
- return freq;
- }
-}
-
-
-bool MFCC::ApplyMelFilterBank(
- std::vector<float>& fftVec,
- std::vector<std::vector<float>>& melFilterBank,
- std::vector<int32_t>& filterBankFilterFirst,
- std::vector<int32_t>& filterBankFilterLast,
- std::vector<float>& melEnergies)
-{
- const size_t numBanks = melEnergies.size();
-
- if (numBanks != filterBankFilterFirst.size() ||
- numBanks != filterBankFilterLast.size())
- {
- printf("unexpected filter bank lengths\n");
- return false;
- }
-
- for (size_t bin = 0; bin < numBanks; ++bin)
- {
- auto filterBankIter = melFilterBank[bin].begin();
- float melEnergy = 1e-10; /* Avoid log of zero at later stages */
- const int32_t firstIndex = filterBankFilterFirst[bin];
- const int32_t lastIndex = filterBankFilterLast[bin];
-
- for (int32_t i = firstIndex; i <= lastIndex; ++i)
- {
- melEnergy += (*filterBankIter++ * fftVec[i]);
- }
-
- melEnergies[bin] = melEnergy;
- }
-
- return true;
-}
-
-void MFCC::ConvertToLogarithmicScale(std::vector<float>& melEnergies)
-{
- float maxMelEnergy = -FLT_MAX;
-
- /* Container for natural logarithms of mel energies */
- std::vector <float> vecLogEnergies(melEnergies.size(), 0.f);
-
- /* Because we are taking natural logs, we need to multiply by log10(e).
- * Also, for wav2letter model, we scale our log10 values by 10 */
- constexpr float multiplier = 10.0 * /* default scalar */
- 0.4342944819032518; /* log10f(std::exp(1.0))*/
-
- /* Take log of the whole vector */
- MathUtils::VecLogarithmF32(melEnergies, vecLogEnergies);
-
- /* Scale the log values and get the max */
- for (auto iterM = melEnergies.begin(), iterL = vecLogEnergies.begin();
- iterM != melEnergies.end(); ++iterM, ++iterL)
- {
- *iterM = *iterL * multiplier;
-
- /* Save the max mel energy. */
- if (*iterM > maxMelEnergy)
- {
- maxMelEnergy = *iterM;
- }
- }
-
- /* Clamp the mel energies */
- constexpr float maxDb = 80.0;
- const float clampLevelLowdB = maxMelEnergy - maxDb;
- for (auto iter = melEnergies.begin(); iter != melEnergies.end(); ++iter)
- {
- *iter = std::max(*iter, clampLevelLowdB);
- }
-}
-
-void MFCC::_ConvertToPowerSpectrum()
-{
- const uint32_t halfDim = this->_m_params.m_frameLenPadded / 2;
-
- /* Handle this special case. */
- float firstEnergy = this->_m_buffer[0] * this->_m_buffer[0];
- float lastEnergy = this->_m_buffer[1] * this->_m_buffer[1];
-
- MathUtils::ComplexMagnitudeSquaredF32(
- this->_m_buffer.data(),
- this->_m_buffer.size(),
- this->_m_buffer.data(),
- this->_m_buffer.size()/2);
-
- this->_m_buffer[0] = firstEnergy;
- this->_m_buffer[halfDim] = lastEnergy;
-}
-
-std::vector<float> MFCC::CreateDCTMatrix(
- const int32_t inputLength,
- const int32_t coefficientCount)
-{
- std::vector<float> dctMatix(inputLength * coefficientCount);
-
- /* Orthonormal normalization. */
- const float normalizerK0 = 2 * sqrt(1.0 / static_cast<float>(4*inputLength));
- const float normalizer = 2 * sqrt(1.0 / static_cast<float>(2*inputLength));
-
- const float angleIncr = M_PI/inputLength;
- float angle = angleIncr; /* we start using it at k = 1 loop */
-
- /* First row of DCT will use normalizer K0 */
- for (int32_t n = 0; n < inputLength; ++n)
- {
- dctMatix[n] = normalizerK0;
- }
-
- /* Second row (index = 1) onwards, we use standard normalizer */
- for (int32_t k = 1, m = inputLength; k < coefficientCount; ++k, m += inputLength)
- {
- for (int32_t n = 0; n < inputLength; ++n)
- {
- dctMatix[m+n] = normalizer *
- cos((n + 0.5) * angle);
- }
- angle += angleIncr;
- }
- return dctMatix;
-}
-
-float MFCC::GetMelFilterBankNormaliser(
- const float& leftMel,
- const float& rightMel,
- const bool useHTKMethod)
-{
-/* Slaney normalization for mel weights. */
- return (2.0f / (MFCC::InverseMelScale(rightMel, useHTKMethod) -
- MFCC::InverseMelScale(leftMel, useHTKMethod)));
-}
-
-void MFCC::_InitMelFilterBank()
-{
- if (!this->_IsMelFilterBankInited())
- {
- this->_m_melFilterBank = this->_CreateMelFilterBank();
- this->_m_dctMatrix = this->CreateDCTMatrix(
- this->_m_params.m_numFbankBins,
- this->_m_params.m_numMfccFeatures);
- this->_m_filterBankInitialised = true;
- }
-}
-
-bool MFCC::_IsMelFilterBankInited()
-{
- return this->_m_filterBankInitialised;
-}
-
-void MFCC::_MfccComputePreFeature(const std::vector<float>& audioData)
-{
- this->_InitMelFilterBank();
-
- /* TensorFlow way of normalizing .wav data to (-1, 1). */
- constexpr float normaliser = 1.0;
- for (size_t i = 0; i < this->_m_params.m_frameLen; i++)
- {
- this->_m_frame[i] = static_cast<float>(audioData[i]) * normaliser;
- }
-
- /* Apply window function to input frame. */
- for(size_t i = 0; i < this->_m_params.m_frameLen; i++)
- {
- this->_m_frame[i] *= this->_m_windowFunc[i];
- }
-
- /* Set remaining frame values to 0. */
- std::fill(this->_m_frame.begin() + this->_m_params.m_frameLen,this->_m_frame.end(), 0);
-
- /* Compute FFT. */
- MathUtils::FftF32(this->_m_frame, this->_m_buffer);
-
- /* Convert to power spectrum. */
- this->_ConvertToPowerSpectrum();
-
- /* Apply mel filterbanks. */
- if (!this->ApplyMelFilterBank(this->_m_buffer,
- this->_m_melFilterBank,
- this->_m_filterBankFilterFirst,
- this->_m_filterBankFilterLast,
- this->_m_melEnergies))
- {
- printf("Failed to apply MEL filter banks\n");
- }
-
- /* Convert to logarithmic scale */
- this->ConvertToLogarithmicScale(this->_m_melEnergies);
-}
-
-std::vector<float> MFCC::MfccCompute(const std::vector<float>& audioData)
-{
- this->_MfccComputePreFeature(audioData);
-
- std::vector<float> mfccOut(this->_m_params.m_numMfccFeatures);
-
- float * ptrMel = this->_m_melEnergies.data();
- float * ptrDct = this->_m_dctMatrix.data();
- float * ptrMfcc = mfccOut.data();
-
- /* Take DCT. Uses matrix mul. */
- for (size_t i = 0, j = 0; i < mfccOut.size();
- ++i, j += this->_m_params.m_numFbankBins)
- {
- *ptrMfcc++ = MathUtils::DotProductF32(
- ptrDct + j,
- ptrMel,
- this->_m_params.m_numFbankBins);
- }
-
- return mfccOut;
-}
-
-std::vector<std::vector<float>> MFCC::_CreateMelFilterBank()
-{
- size_t numFftBins = this->_m_params.m_frameLenPadded / 2;
- float fftBinWidth = static_cast<float>(this->_m_params.m_samplingFreq) / this->_m_params.m_frameLenPadded;
-
- float melLowFreq = MFCC::MelScale(this->_m_params.m_melLoFreq,
- this->_m_params.m_useHtkMethod);
- float melHighFreq = MFCC::MelScale(this->_m_params.m_melHiFreq,
- this->_m_params.m_useHtkMethod);
- float melFreqDelta = (melHighFreq - melLowFreq) / (this->_m_params.m_numFbankBins + 1);
-
- std::vector<float> thisBin = std::vector<float>(numFftBins);
- std::vector<std::vector<float>> melFilterBank(
- this->_m_params.m_numFbankBins);
- this->_m_filterBankFilterFirst =
- std::vector<int32_t>(this->_m_params.m_numFbankBins);
- this->_m_filterBankFilterLast =
- std::vector<int32_t>(this->_m_params.m_numFbankBins);
-
- for (size_t bin = 0; bin < this->_m_params.m_numFbankBins; bin++)
- {
- float leftMel = melLowFreq + bin * melFreqDelta;
- float centerMel = melLowFreq + (bin + 1) * melFreqDelta;
- float rightMel = melLowFreq + (bin + 2) * melFreqDelta;
-
- int32_t firstIndex = -1;
- int32_t lastIndex = -1;
- const float normaliser = this->GetMelFilterBankNormaliser(leftMel, rightMel, this->_m_params.m_useHtkMethod);
-
- for (size_t i = 0; i < numFftBins; i++)
- {
- float freq = (fftBinWidth * i); /* Center freq of this fft bin. */
- float mel = MFCC::MelScale(freq, this->_m_params.m_useHtkMethod);
- thisBin[i] = 0.0;
-
- if (mel > leftMel && mel < rightMel)
- {
- float weight;
- if (mel <= centerMel)
- {
- weight = (mel - leftMel) / (centerMel - leftMel);
- }
- else
- {
- weight = (rightMel - mel) / (rightMel - centerMel);
- }
-
- thisBin[i] = weight * normaliser;
- if (firstIndex == -1)
- {
- firstIndex = i;
- }
- lastIndex = i;
- }
- }
-
- this->_m_filterBankFilterFirst[bin] = firstIndex;
- this->_m_filterBankFilterLast[bin] = lastIndex;
-
- /* Copy the part we care about. */
- for (int32_t i = firstIndex; i <= lastIndex; i++)
- {
- melFilterBank[bin].push_back(thisBin[i]);
- }
- }
-
- return melFilterBank;
-}
-
diff --git a/samples/SpeechRecognition/src/Main.cpp b/samples/SpeechRecognition/src/Main.cpp
index de37e23b40..e2d293001f 100644
--- a/samples/SpeechRecognition/src/Main.cpp
+++ b/samples/SpeechRecognition/src/Main.cpp
@@ -1,5 +1,5 @@
//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include <iostream>
@@ -11,10 +11,8 @@
#include "CmdArgsParser.hpp"
#include "ArmnnNetworkExecutor.hpp"
#include "AudioCapture.hpp"
-#include "Preprocess.hpp"
-#include "Decoder.hpp"
#include "SpeechRecognitionPipeline.hpp"
-
+#include "Wav2LetterMFCC.hpp"
using InferenceResult = std::vector<int8_t>;
using InferenceResults = std::vector<InferenceResult>;
@@ -25,101 +23,77 @@ const std::string LABEL_PATH = "--label-path";
const std::string PREFERRED_BACKENDS = "--preferred-backends";
const std::string HELP = "--help";
-std::map<int, std::string> labels = {
- {0, "a" },
- {1, "b" },
- {2, "c" },
- {3, "d" },
- {4, "e" },
- {5, "f" },
- {6, "g" },
- {7, "h" },
- {8, "i" },
- {9, "j" },
- {10,"k" },
- {11,"l" },
- {12,"m" },
- {13,"n" },
- {14,"o" },
- {15,"p" },
- {16,"q" },
- {17,"r" },
- {18,"s" },
- {19,"t" },
- {20,"u" },
- {21,"v" },
- {22,"w" },
- {23,"x" },
- {24,"y" },
- {25,"z" },
- {26, "\'" },
+std::map<int, std::string> labels =
+{
+ {0, "a"},
+ {1, "b"},
+ {2, "c"},
+ {3, "d"},
+ {4, "e"},
+ {5, "f"},
+ {6, "g"},
+ {7, "h"},
+ {8, "i"},
+ {9, "j"},
+ {10, "k"},
+ {11, "l"},
+ {12, "m"},
+ {13, "n"},
+ {14, "o"},
+ {15, "p"},
+ {16, "q"},
+ {17, "r"},
+ {18, "s"},
+ {19, "t"},
+ {20, "u"},
+ {21, "v"},
+ {22, "w"},
+ {23, "x"},
+ {24, "y"},
+ {25, "z"},
+ {26, "\'"},
{27, " "},
- {28,"$" }
+ {28, "$"}
};
/*
* The accepted options for this Speech Recognition executable
*/
-static std::map<std::string, std::string> CMD_OPTIONS = {
- {AUDIO_FILE_PATH, "[REQUIRED] Path to the Audio file to run speech recognition on"},
- {MODEL_FILE_PATH, "[REQUIRED] Path to the Speech Recognition model to use"},
- {PREFERRED_BACKENDS, "[OPTIONAL] Takes the preferred backends in preference order, separated by comma."
- " For example: CpuAcc,GpuAcc,CpuRef. Accepted options: [CpuAcc, CpuRef, GpuAcc]."
- " Defaults to CpuAcc,CpuRef"}
+static std::map<std::string, std::string> CMD_OPTIONS =
+{
+ {AUDIO_FILE_PATH, "[REQUIRED] Path to the Audio file to run speech recognition on"},
+ {MODEL_FILE_PATH, "[REQUIRED] Path to the Speech Recognition model to use"},
+ {PREFERRED_BACKENDS, "[OPTIONAL] Takes the preferred backends in preference order, separated by comma."
+ " For example: CpuAcc,GpuAcc,CpuRef. Accepted options: [CpuAcc, CpuRef, GpuAcc]."
+ " Defaults to CpuAcc,CpuRef"}
};
/*
* Reads the user supplied backend preference, splits it by comma, and returns an ordered vector
*/
-std::vector<armnn::BackendId> GetPreferredBackendList(const std::string& preferredBackends)
+std::vector<armnn::BackendId> GetPreferredBackendList(const std::string& preferredBackends)
{
std::vector<armnn::BackendId> backends;
std::stringstream ss(preferredBackends);
- while(ss.good())
+ while (ss.good())
{
std::string backend;
- std::getline( ss, backend, ',' );
+ std::getline(ss, backend, ',');
backends.emplace_back(backend);
}
return backends;
}
-int main(int argc, char *argv[])
+int main(int argc, char* argv[])
{
- // Wav2Letter ASR SETTINGS
- int SAMP_FREQ = 16000;
- int FRAME_LEN_MS = 32;
- int FRAME_LEN_SAMPLES = SAMP_FREQ * FRAME_LEN_MS * 0.001;
- int NUM_MFCC_FEATS = 13;
- int MFCC_WINDOW_LEN = 512;
- int MFCC_WINDOW_STRIDE = 160;
- const int NUM_MFCC_VECTORS = 296;
- int SAMPLES_PER_INFERENCE = MFCC_WINDOW_LEN + ((NUM_MFCC_VECTORS -1) * MFCC_WINDOW_STRIDE);
- int MEL_LO_FREQ = 0;
- int MEL_HI_FREQ = 8000;
- int NUM_FBANK_BIN = 128;
- int INPUT_WINDOW_LEFT_CONTEXT = 98;
- int INPUT_WINDOW_RIGHT_CONTEXT = 98;
- int INPUT_WINDOW_INNER_CONTEXT = NUM_MFCC_VECTORS -
- (INPUT_WINDOW_LEFT_CONTEXT + INPUT_WINDOW_RIGHT_CONTEXT);
- int SLIDING_WINDOW_OFFSET = INPUT_WINDOW_INNER_CONTEXT * MFCC_WINDOW_STRIDE;
-
-
- MfccParams mfccParams(SAMP_FREQ, NUM_FBANK_BIN,
- MEL_LO_FREQ, MEL_HI_FREQ, NUM_MFCC_FEATS, FRAME_LEN_SAMPLES, false, NUM_MFCC_VECTORS);
-
- MFCC mfccInst = MFCC(mfccParams);
-
- Preprocess preprocessor(MFCC_WINDOW_LEN, MFCC_WINDOW_STRIDE, mfccInst);
-
bool isFirstWindow = true;
- std::string currentRContext = "";
+ std::string currentRContext = "";
- std::map <std::string, std::string> options;
+ std::map<std::string, std::string> options;
int result = ParseOptions(options, CMD_OPTIONS, argv, argc);
- if (result != 0)
+ if (result != 0)
{
return result;
}
@@ -127,28 +101,29 @@ int main(int argc, char *argv[])
// Create the network options
common::PipelineOptions pipelineOptions;
pipelineOptions.m_ModelFilePath = GetSpecifiedOption(options, MODEL_FILE_PATH);
-
- if (CheckOptionSpecified(options, PREFERRED_BACKENDS))
+ pipelineOptions.m_ModelName = "Wav2Letter";
+ if (CheckOptionSpecified(options, PREFERRED_BACKENDS))
{
pipelineOptions.m_backends = GetPreferredBackendList((GetSpecifiedOption(options, PREFERRED_BACKENDS)));
- }
- else
+ }
+ else
{
pipelineOptions.m_backends = {"CpuAcc", "CpuRef"};
}
asr::IPipelinePtr asrPipeline = asr::CreatePipeline(pipelineOptions, labels);
- asr::AudioCapture capture;
- std::vector<float> audioData = capture.LoadAudioFile(GetSpecifiedOption(options, AUDIO_FILE_PATH));
- capture.InitSlidingWindow(audioData.data(), audioData.size(), SAMPLES_PER_INFERENCE, SLIDING_WINDOW_OFFSET);
+ audio::AudioCapture capture;
+ std::vector<float> audioData = audio::AudioCapture::LoadAudioFile(GetSpecifiedOption(options, AUDIO_FILE_PATH));
+ capture.InitSlidingWindow(audioData.data(), audioData.size(), asrPipeline->getInputSamplesSize(),
+ asrPipeline->getSlidingWindowOffset());
- while (capture.HasNext())
+ while (capture.HasNext())
{
std::vector<float> audioBlock = capture.Next();
InferenceResults results;
- std::vector<int8_t> preprocessedData = asrPipeline->PreProcessing<float, int8_t>(audioBlock, preprocessor);
+ std::vector<int8_t> preprocessedData = asrPipeline->PreProcessing(audioBlock);
asrPipeline->Inference<int8_t>(preprocessedData, results);
asrPipeline->PostProcessing<int8_t>(results, isFirstWindow, !capture.HasNext(), currentRContext);
}
diff --git a/samples/SpeechRecognition/src/MathUtils.cpp b/samples/SpeechRecognition/src/MathUtils.cpp
deleted file mode 100644
index bf9908343a..0000000000
--- a/samples/SpeechRecognition/src/MathUtils.cpp
+++ /dev/null
@@ -1,112 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "MathUtils.hpp"
-#include <vector>
-#include <cmath>
-#include <cstdio>
-
-void MathUtils::FftF32(std::vector<float>& input,
- std::vector<float>& fftOutput)
-{
- const int inputLength = input.size();
-
- for (int k = 0; k <= inputLength / 2; k++)
- {
- float sumReal = 0, sumImag = 0;
-
- for (int t = 0; t < inputLength; t++)
- {
- float angle = 2 * M_PI * t * k / inputLength;
- sumReal += input[t] * cosf(angle);
- sumImag += -input[t] * sinf(angle);
- }
-
- /* Arrange output to [real0, realN/2, real1, im1, real2, im2, ...] */
- if (k == 0)
- {
- fftOutput[0] = sumReal;
- }
- else if (k == inputLength / 2)
- {
- fftOutput[1] = sumReal;
- }
- else
- {
- fftOutput[k*2] = sumReal;
- fftOutput[k*2 + 1] = sumImag;
- };
- }
-}
-
-float MathUtils::DotProductF32(float* srcPtrA, float* srcPtrB,
- const int srcLen)
-{
- float output = 0.f;
-
- for (int i = 0; i < srcLen; ++i)
- {
- output += *srcPtrA++ * *srcPtrB++;
- }
- return output;
-}
-
-bool MathUtils::ComplexMagnitudeSquaredF32(float* ptrSrc,
- const int srcLen,
- float* ptrDst,
- const int dstLen)
-{
- if (dstLen < srcLen/2)
- {
- printf("dstLen must be greater than srcLen/2");
- return false;
- }
-
- for (int j = 0; j < srcLen; ++j)
- {
- const float real = *ptrSrc++;
- const float im = *ptrSrc++;
- *ptrDst++ = real*real + im*im;
- }
- return true;
-}
-
-void MathUtils::VecLogarithmF32(std::vector <float>& input,
- std::vector <float>& output)
-{
- for (auto in = input.begin(), out = output.begin();
- in != input.end(); ++in, ++out)
- {
- *out = logf(*in);
- }
-}
-
-float MathUtils::MeanF32(float* ptrSrc, const uint32_t srcLen)
-{
- if (!srcLen)
- {
- return 0.f;
- }
-
- float acc = std::accumulate(ptrSrc, ptrSrc + srcLen, 0.0);
- return acc/srcLen;
-}
-
-float MathUtils::StdDevF32(float* ptrSrc, const uint32_t srcLen,
- const float mean)
-{
- if (!srcLen)
- {
- return 0.f;
- }
- auto VarianceFunction = [=](float acc, const float value) {
- return acc + (((value - mean) * (value - mean))/ srcLen);
- };
-
- float acc = std::accumulate(ptrSrc, ptrSrc + srcLen, 0.0,
- VarianceFunction);
- return sqrtf(acc);
-}
-
diff --git a/samples/SpeechRecognition/src/Preprocess.cpp b/samples/SpeechRecognition/src/Preprocess.cpp
deleted file mode 100644
index 86279619d7..0000000000
--- a/samples/SpeechRecognition/src/Preprocess.cpp
+++ /dev/null
@@ -1,192 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include <algorithm>
-#include <numeric>
-#include <math.h>
-#include <string.h>
-
-#include "MathUtils.hpp"
-#include "Preprocess.hpp"
-
-Preprocess::Preprocess(
- const uint32_t windowLen,
- const uint32_t windowStride,
- const MFCC mfccInst):
- _m_mfcc(mfccInst),
- _m_mfccBuf(mfccInst._m_params.m_numMfccFeatures, mfccInst._m_params.m_numMfccVectors),
- _m_delta1Buf(mfccInst._m_params.m_numMfccFeatures, mfccInst._m_params.m_numMfccVectors),
- _m_delta2Buf(mfccInst._m_params.m_numMfccFeatures, mfccInst._m_params.m_numMfccVectors),
- _m_windowLen(windowLen),
- _m_windowStride(windowStride)
-{
- if (mfccInst._m_params.m_numMfccFeatures > 0 && windowLen > 0)
- {
- this->_m_mfcc.Init();
- }
-}
-
-Preprocess::~Preprocess()
-{
-}
-
-bool Preprocess::Invoke( const float* audioData, const uint32_t audioDataLen, std::vector<int8_t>& output,
- int quantOffset, float quantScale)
-{
- this->_m_window = SlidingWindow<const float>(
- audioData, audioDataLen,
- this->_m_windowLen, this->_m_windowStride);
-
- uint32_t mfccBufIdx = 0;
-
- // Init buffers with 0
- std::fill(_m_mfccBuf.begin(), _m_mfccBuf.end(), 0.f);
- std::fill(_m_delta1Buf.begin(), _m_delta1Buf.end(), 0.f);
- std::fill(_m_delta2Buf.begin(), _m_delta2Buf.end(), 0.f);
-
- /* While we can slide over the window */
- while (this->_m_window.HasNext())
- {
- const float* mfccWindow = this->_m_window.Next();
- auto mfccAudioData = std::vector<float>(
- mfccWindow,
- mfccWindow + this->_m_windowLen);
-
- auto mfcc = this->_m_mfcc.MfccCompute(mfccAudioData);
- for (size_t i = 0; i < this->_m_mfccBuf.size(0); ++i)
- {
- this->_m_mfccBuf(i, mfccBufIdx) = mfcc[i];
- }
- ++mfccBufIdx;
- }
-
- /* Pad MFCC if needed by repeating last feature vector */
- while (mfccBufIdx != this->_m_mfcc._m_params.m_numMfccVectors)
- {
- memcpy(&this->_m_mfccBuf(0, mfccBufIdx),
- &this->_m_mfccBuf(0, mfccBufIdx-1), sizeof(float)*this->_m_mfcc._m_params.m_numMfccFeatures);
- ++mfccBufIdx;
- }
-
- /* Compute first and second order deltas from MFCCs */
- this->_ComputeDeltas(this->_m_mfccBuf,
- this->_m_delta1Buf,
- this->_m_delta2Buf);
-
- /* Normalise */
- this->_Normalise();
-
- return this->_Quantise<int8_t>(output.data(), quantOffset, quantScale);
-}
-
-bool Preprocess::_ComputeDeltas(Array2d<float>& mfcc,
- Array2d<float>& delta1,
- Array2d<float>& delta2)
-{
- const std::vector <float> delta1Coeffs =
- {6.66666667e-02, 5.00000000e-02, 3.33333333e-02,
- 1.66666667e-02, -3.46944695e-18, -1.66666667e-02,
- -3.33333333e-02, -5.00000000e-02, -6.66666667e-02};
-
- const std::vector <float> delta2Coeffs =
- {0.06060606, 0.01515152, -0.01731602,
- -0.03679654, -0.04329004, -0.03679654,
- -0.01731602, 0.01515152, 0.06060606};
-
- if (delta1.size(0) == 0 || delta2.size(0) != delta1.size(0) ||
- mfcc.size(0) == 0 || mfcc.size(1) == 0)
- {
- return false;
- }
-
- /* Get the middle index; coeff vec len should always be odd */
- const size_t coeffLen = delta1Coeffs.size();
- const size_t fMidIdx = (coeffLen - 1)/2;
- const size_t numFeatures = mfcc.size(0);
- const size_t numFeatVectors = mfcc.size(1);
-
- /* iterate through features in MFCC vector*/
- for (size_t i = 0; i < numFeatures; ++i)
- {
- /* for each feature, iterate through time (t) samples representing feature evolution and
- * calculate d/dt and d^2/dt^2, using 1d convolution with differential kernels.
- * Convolution padding = valid, result size is `time length - kernel length + 1`.
- * The result is padded with 0 from both sides to match the size of initial time samples data.
- *
- * For the small filter, conv1d implementation as a simple loop is efficient enough.
- * Filters of a greater size would need CMSIS-DSP functions to be used, like arm_fir_f32.
- */
-
- for (size_t j = fMidIdx; j < numFeatVectors - fMidIdx; ++j)
- {
- float d1 = 0;
- float d2 = 0;
- const size_t mfccStIdx = j - fMidIdx;
-
- for (size_t k = 0, m = coeffLen - 1; k < coeffLen; ++k, --m)
- {
-
- d1 += mfcc(i,mfccStIdx + k) * delta1Coeffs[m];
- d2 += mfcc(i,mfccStIdx + k) * delta2Coeffs[m];
- }
-
- delta1(i,j) = d1;
- delta2(i,j) = d2;
- }
- }
-
- return true;
-}
-
-float Preprocess::_GetMean(Array2d<float>& vec)
-{
- return MathUtils::MeanF32(vec.begin(), vec.totalSize());
-}
-
-float Preprocess::_GetStdDev(Array2d<float>& vec, const float mean)
-{
- return MathUtils::StdDevF32(vec.begin(), vec.totalSize(), mean);
-}
-
-void Preprocess::_NormaliseVec(Array2d<float>& vec)
-{
- auto mean = Preprocess::_GetMean(vec);
- auto stddev = Preprocess::_GetStdDev(vec, mean);
-
- if (stddev == 0)
- {
- std::fill(vec.begin(), vec.end(), 0);
- }
- else
- {
- const float stddevInv = 1.f/stddev;
- const float normalisedMean = mean/stddev;
-
- auto NormalisingFunction = [=](float &value) {
- value = value * stddevInv - normalisedMean;
- };
- std::for_each(vec.begin(), vec.end(), NormalisingFunction);
- }
-}
-
-void Preprocess::_Normalise()
-{
- Preprocess::_NormaliseVec(this->_m_mfccBuf);
- Preprocess::_NormaliseVec(this->_m_delta1Buf);
- Preprocess::_NormaliseVec(this->_m_delta2Buf);
-}
-
-float Preprocess::_GetQuantElem(
- const float elem,
- const float quantScale,
- const int quantOffset,
- const float minVal,
- const float maxVal)
-{
- float val = std::round((elem/quantScale) + quantOffset);
- float maxim = std::max<float>(val, minVal);
- float returnVal = std::min<float>(std::max<float>(val, minVal), maxVal);
- return returnVal;
-} \ No newline at end of file
diff --git a/samples/SpeechRecognition/src/SpeechRecognitionPipeline.cpp b/samples/SpeechRecognition/src/SpeechRecognitionPipeline.cpp
index 1b822d6a88..8b7dd11cb4 100644
--- a/samples/SpeechRecognition/src/SpeechRecognitionPipeline.cpp
+++ b/samples/SpeechRecognition/src/SpeechRecognitionPipeline.cpp
@@ -6,21 +6,86 @@
#include "SpeechRecognitionPipeline.hpp"
#include "ArmnnNetworkExecutor.hpp"
-namespace asr
+namespace asr
{
+
ASRPipeline::ASRPipeline(std::unique_ptr<common::ArmnnNetworkExecutor<int8_t>> executor,
- std::unique_ptr<Decoder> decoder
- ) :
+ std::unique_ptr<Decoder> decoder, std::unique_ptr<Wav2LetterPreprocessor> preProcessor) :
m_executor(std::move(executor)),
- m_decoder(std::move(decoder)){}
+ m_decoder(std::move(decoder)), m_preProcessor(std::move(preProcessor)) {}
-IPipelinePtr CreatePipeline(common::PipelineOptions& config, std::map<int, std::string>& labels)
+int ASRPipeline::getInputSamplesSize()
{
- auto executor = std::make_unique<common::ArmnnNetworkExecutor<int8_t>>(config.m_ModelFilePath, config.m_backends);
+ return this->m_preProcessor->m_windowLen +
+ ((this->m_preProcessor->m_mfcc->m_params.m_numMfccVectors - 1) * this->m_preProcessor->m_windowStride);
+}
+
+int ASRPipeline::getSlidingWindowOffset()
+{
+ // Hardcoded for now until refactor
+ return ASRPipeline::SLIDING_WINDOW_OFFSET;
+}
+
+std::vector<int8_t> ASRPipeline::PreProcessing(std::vector<float>& audio)
+{
+ int audioDataToPreProcess = m_preProcessor->m_windowLen +
+ ((m_preProcessor->m_mfcc->m_params.m_numMfccVectors - 1) *
+ m_preProcessor->m_windowStride);
+ int outputBufferSize = m_preProcessor->m_mfcc->m_params.m_numMfccVectors
+ * m_preProcessor->m_mfcc->m_params.m_numMfccFeatures * 3;
+ std::vector<int8_t> outputBuffer(outputBufferSize);
+ m_preProcessor->Invoke(audio.data(), audioDataToPreProcess, outputBuffer, m_executor->GetQuantizationOffset(),
+ m_executor->GetQuantizationScale());
+ return outputBuffer;
+}
+
+IPipelinePtr CreatePipeline(common::PipelineOptions& config, std::map<int, std::string>& labels)
+{
+ if (config.m_ModelName == "Wav2Letter")
+ {
+ // Wav2Letter ASR SETTINGS
+ int SAMP_FREQ = 16000;
+ int FRAME_LEN_MS = 32;
+ int FRAME_LEN_SAMPLES = SAMP_FREQ * FRAME_LEN_MS * 0.001;
+ int NUM_MFCC_FEATS = 13;
+ int MFCC_WINDOW_LEN = 512;
+ int MFCC_WINDOW_STRIDE = 160;
+ const int NUM_MFCC_VECTORS = 296;
+ int SAMPLES_PER_INFERENCE = MFCC_WINDOW_LEN + ((NUM_MFCC_VECTORS - 1) * MFCC_WINDOW_STRIDE);
+ int MEL_LO_FREQ = 0;
+ int MEL_HI_FREQ = 8000;
+ int NUM_FBANK_BIN = 128;
+ int INPUT_WINDOW_LEFT_CONTEXT = 98;
+ int INPUT_WINDOW_RIGHT_CONTEXT = 98;
+ int INPUT_WINDOW_INNER_CONTEXT = NUM_MFCC_VECTORS -
+ (INPUT_WINDOW_LEFT_CONTEXT + INPUT_WINDOW_RIGHT_CONTEXT);
+ int SLIDING_WINDOW_OFFSET = INPUT_WINDOW_INNER_CONTEXT * MFCC_WINDOW_STRIDE;
+
+
+ MfccParams mfccParams(SAMP_FREQ, NUM_FBANK_BIN,
+ MEL_LO_FREQ, MEL_HI_FREQ, NUM_MFCC_FEATS, FRAME_LEN_SAMPLES, false, NUM_MFCC_VECTORS);
+
+ std::unique_ptr<Wav2LetterMFCC> mfccInst = std::make_unique<Wav2LetterMFCC>(mfccParams);
+
+ auto executor = std::make_unique<common::ArmnnNetworkExecutor<int8_t>>(config.m_ModelFilePath,
+ config.m_backends);
+
+ auto decoder = std::make_unique<asr::Decoder>(labels);
+
+ auto preprocessor = std::make_unique<Wav2LetterPreprocessor>(MFCC_WINDOW_LEN, MFCC_WINDOW_STRIDE,
+ std::move(mfccInst));
+
+ auto ptr = std::make_unique<asr::ASRPipeline>(
+ std::move(executor), std::move(decoder), std::move(preprocessor));
- auto decoder = std::make_unique<asr::Decoder>(labels);
+ ptr->SLIDING_WINDOW_OFFSET = SLIDING_WINDOW_OFFSET;
- return std::make_unique<asr::ASRPipeline>(std::move(executor), std::move(decoder));
+ return ptr;
+ }
+ else
+ {
+ throw std::invalid_argument("Unknown Model name: " + config.m_ModelName + " .");
+ }
}
}// namespace asr \ No newline at end of file
diff --git a/samples/SpeechRecognition/src/Wav2LetterMFCC.cpp b/samples/SpeechRecognition/src/Wav2LetterMFCC.cpp
new file mode 100644
index 0000000000..959bd9022e
--- /dev/null
+++ b/samples/SpeechRecognition/src/Wav2LetterMFCC.cpp
@@ -0,0 +1,126 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "Wav2LetterMFCC.hpp"
+#include "MathUtils.hpp"
+
+#include <cfloat>
+
+bool Wav2LetterMFCC::ApplyMelFilterBank(
+ std::vector<float>& fftVec,
+ std::vector<std::vector<float>>& melFilterBank,
+ std::vector<uint32_t>& filterBankFilterFirst,
+ std::vector<uint32_t>& filterBankFilterLast,
+ std::vector<float>& melEnergies)
+{
+ const size_t numBanks = melEnergies.size();
+
+ if (numBanks != filterBankFilterFirst.size() ||
+ numBanks != filterBankFilterLast.size())
+ {
+ printf("Unexpected filter bank lengths\n");
+ return false;
+ }
+
+ for (size_t bin = 0; bin < numBanks; ++bin)
+ {
+ auto filterBankIter = melFilterBank[bin].begin();
+ auto end = melFilterBank[bin].end();
+ // Avoid log of zero at later stages, same value used in librosa.
+ // The number was used during our default wav2letter model training.
+ float melEnergy = 1e-10;
+ const uint32_t firstIndex = filterBankFilterFirst[bin];
+ const uint32_t lastIndex = std::min<uint32_t>(filterBankFilterLast[bin], fftVec.size() - 1);
+
+ for (uint32_t i = firstIndex; i <= lastIndex && filterBankIter != end; ++i)
+ {
+ melEnergy += (*filterBankIter++ * fftVec[i]);
+ }
+
+ melEnergies[bin] = melEnergy;
+ }
+
+ return true;
+}
+
+void Wav2LetterMFCC::ConvertToLogarithmicScale(std::vector<float>& melEnergies)
+{
+ float maxMelEnergy = -FLT_MAX;
+
+ // Container for natural logarithms of mel energies.
+ std::vector <float> vecLogEnergies(melEnergies.size(), 0.f);
+
+ // Because we are taking natural logs, we need to multiply by log10(e).
+ // Also, for wav2letter model, we scale our log10 values by 10.
+ constexpr float multiplier = 10.0 * // Default scalar.
+ 0.4342944819032518; // log10f(std::exp(1.0))
+
+ // Take log of the whole vector.
+ MathUtils::VecLogarithmF32(melEnergies, vecLogEnergies);
+
+ // Scale the log values and get the max.
+ for (auto iterM = melEnergies.begin(), iterL = vecLogEnergies.begin();
+ iterM != melEnergies.end() && iterL != vecLogEnergies.end(); ++iterM, ++iterL)
+ {
+
+ *iterM = *iterL * multiplier;
+
+ // Save the max mel energy.
+ if (*iterM > maxMelEnergy)
+ {
+ maxMelEnergy = *iterM;
+ }
+ }
+
+ // Clamp the mel energies.
+ constexpr float maxDb = 80.0;
+ const float clampLevelLowdB = maxMelEnergy - maxDb;
+ for (float& melEnergy : melEnergies)
+ {
+ melEnergy = std::max(melEnergy, clampLevelLowdB);
+ }
+}
+
+std::vector<float> Wav2LetterMFCC::CreateDCTMatrix(
+ const int32_t inputLength,
+ const int32_t coefficientCount)
+{
+ std::vector<float> dctMatix(inputLength * coefficientCount);
+
+ // Orthonormal normalization.
+ const float normalizerK0 = 2 * sqrtf(1.0f /
+ static_cast<float>(4 * inputLength));
+ const float normalizer = 2 * sqrtf(1.0f /
+ static_cast<float>(2 * inputLength));
+
+ const float angleIncr = M_PI / inputLength;
+ float angle = angleIncr; // We start using it at k = 1 loop.
+
+ // First row of DCT will use normalizer K0.
+ for (int32_t n = 0; n < inputLength; ++n)
+ {
+ dctMatix[n] = normalizerK0; // cos(0) = 1
+ }
+
+ // Second row (index = 1) onwards, we use standard normalizer.
+ for (int32_t k = 1, m = inputLength; k < coefficientCount; ++k, m += inputLength)
+ {
+ for (int32_t n = 0; n < inputLength; ++n)
+ {
+ dctMatix[m+n] = normalizer * cosf((n + 0.5f) * angle);
+ }
+ angle += angleIncr;
+ }
+ return dctMatix;
+}
+
+float Wav2LetterMFCC::GetMelFilterBankNormaliser(
+ const float& leftMel,
+ const float& rightMel,
+ const bool useHTKMethod)
+{
+ // Slaney normalization for mel weights.
+ return (2.0f / (MFCC::InverseMelScale(rightMel, useHTKMethod) -
+ MFCC::InverseMelScale(leftMel, useHTKMethod)));
+}
diff --git a/samples/SpeechRecognition/src/Wav2LetterPreprocessor.cpp b/samples/SpeechRecognition/src/Wav2LetterPreprocessor.cpp
new file mode 100644
index 0000000000..9329d5e4d5
--- /dev/null
+++ b/samples/SpeechRecognition/src/Wav2LetterPreprocessor.cpp
@@ -0,0 +1,187 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "MathUtils.hpp"
+#include <cstring>
+#include <cmath>
+#include <numeric>
+#include <algorithm>
+#include <memory>
+#include "Wav2LetterPreprocessor.hpp"
+#include "Wav2LetterMFCC.hpp"
+
+float Wav2LetterPreprocessor::GetMean(Array2d<float>& vec)
+{
+ return MathUtils::MeanF32(vec.begin(), vec.totalSize());
+}
+
+float Wav2LetterPreprocessor::GetStdDev(Array2d<float>& vec, const float mean)
+{
+ return MathUtils::StdDevF32(vec.begin(), vec.totalSize(), mean);
+}
+
+void Wav2LetterPreprocessor::NormaliseVec(Array2d<float>& vec)
+{
+ auto mean = Wav2LetterPreprocessor::GetMean(vec);
+ auto stddev = Wav2LetterPreprocessor::GetStdDev(vec, mean);
+
+ if (stddev == 0)
+ {
+ std::fill(vec.begin(), vec.end(), 0);
+ }
+ else
+ {
+ const float stddevInv = 1.f/stddev;
+ const float normalisedMean = mean/stddev;
+
+ auto NormalisingFunction = [=](float &value) {
+ value = value * stddevInv - normalisedMean;
+ };
+ std::for_each(vec.begin(), vec.end(), NormalisingFunction);
+ }
+}
+
+void Wav2LetterPreprocessor::Normalise()
+{
+ Wav2LetterPreprocessor::NormaliseVec(this->m_mfccBuf);
+ Wav2LetterPreprocessor::NormaliseVec(this->m_delta1Buf);
+ Wav2LetterPreprocessor::NormaliseVec(this->m_delta2Buf);
+}
+
+float Wav2LetterPreprocessor::GetQuantElem(
+ const float elem,
+ const float quantScale,
+ const int quantOffset,
+ const float minVal,
+ const float maxVal)
+{
+ float val = std::round((elem/quantScale) + quantOffset);
+ float returnVal = std::min<float>(std::max<float>(val, minVal), maxVal);
+ return returnVal;
+}
+
+bool Wav2LetterPreprocessor::Invoke(const float* audioData, const uint32_t audioDataLen, std::vector<int8_t>& output,
+ int quantOffset, float quantScale)
+{
+ this->m_window = SlidingWindow<const float>(
+ audioData, audioDataLen,
+ this->m_windowLen, this->m_windowStride);
+
+ uint32_t mfccBufIdx = 0;
+
+ // Init buffers with 0
+ std::fill(m_mfccBuf.begin(), m_mfccBuf.end(), 0.f);
+ std::fill(m_delta1Buf.begin(), m_delta1Buf.end(), 0.f);
+ std::fill(m_delta2Buf.begin(), m_delta2Buf.end(), 0.f);
+
+ // While we can slide over the window
+ while (this->m_window.HasNext())
+ {
+ const float* mfccWindow = this->m_window.Next();
+ auto mfccAudioData = std::vector<float>(
+ mfccWindow,
+ mfccWindow + this->m_windowLen);
+
+ auto mfcc = this->m_mfcc->MfccCompute(mfccAudioData);
+ for (size_t i = 0; i < this->m_mfccBuf.size(0); ++i)
+ {
+ this->m_mfccBuf(i, mfccBufIdx) = mfcc[i];
+ }
+ ++mfccBufIdx;
+ }
+
+ // Pad MFCC if needed by repeating last feature vector
+ while (mfccBufIdx != this->m_mfcc->m_params.m_numMfccVectors)
+ {
+ memcpy(&this->m_mfccBuf(0, mfccBufIdx),
+ &this->m_mfccBuf(0, mfccBufIdx - 1), sizeof(float) * this->m_mfcc->m_params.m_numMfccFeatures);
+ ++mfccBufIdx;
+ }
+
+ // Compute first and second order deltas from MFCCs
+ Wav2LetterPreprocessor::ComputeDeltas(this->m_mfccBuf,
+ this->m_delta1Buf,
+ this->m_delta2Buf);
+
+ // Normalise
+ this->Normalise();
+
+ return this->Quantise<int8_t>(output.data(), quantOffset, quantScale);
+}
+
+bool Wav2LetterPreprocessor::ComputeDeltas(Array2d<float>& mfcc,
+ Array2d<float>& delta1,
+ Array2d<float>& delta2)
+{
+ const std::vector <float> delta1Coeffs =
+ {6.66666667e-02, 5.00000000e-02, 3.33333333e-02,
+ 1.66666667e-02, -3.46944695e-18, -1.66666667e-02,
+ -3.33333333e-02, -5.00000000e-02, -6.66666667e-02};
+
+ const std::vector <float> delta2Coeffs =
+ {0.06060606, 0.01515152, -0.01731602,
+ -0.03679654, -0.04329004, -0.03679654,
+ -0.01731602, 0.01515152, 0.06060606};
+
+ if (delta1.size(0) == 0 || delta2.size(0) != delta1.size(0) ||
+ mfcc.size(0) == 0 || mfcc.size(1) == 0)
+ {
+ return false;
+ }
+
+ // Get the middle index; coeff vec len should always be odd
+ const size_t coeffLen = delta1Coeffs.size();
+ const size_t fMidIdx = (coeffLen - 1)/2;
+ const size_t numFeatures = mfcc.size(0);
+ const size_t numFeatVectors = mfcc.size(1);
+
+ // iterate through features in MFCC vector
+ for (size_t i = 0; i < numFeatures; ++i)
+ {
+ /* for each feature, iterate through time (t) samples representing feature evolution and
+ * calculate d/dt and d^2/dt^2, using 1d convolution with differential kernels.
+ * Convolution padding = valid, result size is `time length - kernel length + 1`.
+ * The result is padded with 0 from both sides to match the size of initial time samples data.
+ *
+ * For the small filter, conv1d implementation as a simple loop is efficient enough.
+ * Filters of a greater size would need CMSIS-DSP functions to be used, like arm_fir_f32.
+ */
+
+ for (size_t j = fMidIdx; j < numFeatVectors - fMidIdx; ++j)
+ {
+ float d1 = 0;
+ float d2 = 0;
+ const size_t mfccStIdx = j - fMidIdx;
+
+ for (size_t k = 0, m = coeffLen - 1; k < coeffLen; ++k, --m)
+ {
+
+ d1 += mfcc(i,mfccStIdx + k) * delta1Coeffs[m];
+ d2 += mfcc(i,mfccStIdx + k) * delta2Coeffs[m];
+ }
+
+ delta1(i,j) = d1;
+ delta2(i,j) = d2;
+ }
+ }
+
+ return true;
+}
+
+Wav2LetterPreprocessor::Wav2LetterPreprocessor(const uint32_t windowLen,
+ const uint32_t windowStride,
+ std::unique_ptr<Wav2LetterMFCC> mfccInst):
+ m_mfcc(std::move(mfccInst)),
+ m_mfccBuf(m_mfcc->m_params.m_numMfccFeatures, m_mfcc->m_params.m_numMfccVectors),
+ m_delta1Buf(m_mfcc->m_params.m_numMfccFeatures, m_mfcc->m_params.m_numMfccVectors),
+ m_delta2Buf(m_mfcc->m_params.m_numMfccFeatures, m_mfcc->m_params.m_numMfccVectors),
+ m_windowLen(windowLen),
+ m_windowStride(windowStride)
+{
+ if (m_mfcc->m_params.m_numMfccFeatures > 0 && windowLen > 0)
+ {
+ this->m_mfcc->Init();
+ }
+ std::fill(m_mfccBuf.begin(), m_mfccBuf.end(), 0.f);
+} \ No newline at end of file
diff --git a/samples/SpeechRecognition/test/AudioCaptureTest.cpp b/samples/SpeechRecognition/test/AudioCaptureTest.cpp
deleted file mode 100644
index 94b4e7cb7a..0000000000
--- a/samples/SpeechRecognition/test/AudioCaptureTest.cpp
+++ /dev/null
@@ -1,61 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#define CATCH_CONFIG_MAIN
-#include <catch.hpp>
-#include <limits>
-
-#include "AudioCapture.hpp"
-
-TEST_CASE("Test capture of audio file")
-{
- std::string testResources = TEST_RESOURCE_DIR;
- REQUIRE(testResources != "");
- std::string file = testResources + "/" + "myVoiceIsMyPassportVerifyMe04.wav";
- asr::AudioCapture capture;
- std::vector<float> audioData = capture.LoadAudioFile(file);
- capture.InitSlidingWindow(audioData.data(), audioData.size(), 47712, 16000);
-
- std::vector<float> firstAudioBlock = capture.Next();
- float actual1 = firstAudioBlock.at(0);
- float actual2 = firstAudioBlock.at(47000);
- CHECK(std::to_string(actual1) == "0.000352");
- CHECK(std::to_string(actual2) == "-0.056441");
- CHECK(firstAudioBlock.size() == 47712);
-
- CHECK(capture.HasNext() == true);
-
- std::vector<float> secondAudioBlock = capture.Next();
- float actual3 = secondAudioBlock.at(0);
- float actual4 = secondAudioBlock.at(47000);
- CHECK(std::to_string(actual3) == "0.102077");
- CHECK(std::to_string(actual4) == "0.000194");
- CHECK(capture.HasNext() == true);
-
- std::vector<float> thirdAudioBlock = capture.Next();
- float actual5 = thirdAudioBlock.at(0);
- float actual6 = thirdAudioBlock.at(33500);
- float actual7 = thirdAudioBlock.at(33600);
- CHECK(std::to_string(actual5) == "-0.076416");
- CHECK(std::to_string(actual6) == "-0.000275");
- CHECK(std::to_string(actual7) == "0.000000");
- CHECK(capture.HasNext() == false);
-}
-
-TEST_CASE("Test sliding window of audio capture")
-{
- std::string testResources = TEST_RESOURCE_DIR;
- REQUIRE(testResources != "");
- std::string file = testResources + "/" + "myVoiceIsMyPassportVerifyMe04.wav";
- asr::AudioCapture capture;
- std::vector<float> audioData = capture.LoadAudioFile(file);
- capture.InitSlidingWindow(audioData.data(), audioData.size(), 47712, 16000);
- capture.Next();
- capture.Next();
-
- CHECK(capture.HasNext() == true);
- capture.Next();
- CHECK(capture.HasNext() == false);
-}
diff --git a/samples/SpeechRecognition/test/MFCCTest.cpp b/samples/SpeechRecognition/test/MFCCTest.cpp
index 2a552643d5..62a92fd5ba 100644
--- a/samples/SpeechRecognition/test/MFCCTest.cpp
+++ b/samples/SpeechRecognition/test/MFCCTest.cpp
@@ -6,9 +6,10 @@
#include <catch.hpp>
#include <limits>
-#include "MFCC.hpp"
+#include "Wav2LetterMFCC.hpp"
-const std::vector<float> testWav = std::vector<float>{
+const std::vector<float> testWav = std::vector<float>
+{
-3.0f, 0.0f, 1.0f, -1.0f, 2.0f, 3.0f, -2.0f, 2.0f,
1.0f, -2.0f, 0.0f, 3.0f, -1.0f, 8.0f, 3.0f, 2.0f,
-1.0f, -1.0f, 2.0f, 7.0f, 3.0f, 5.0f, 6.0f, 6.0f,
@@ -84,15 +85,16 @@ TEST_CASE("Test MFCC")
std::vector<float> fullAudioData;
- for (auto f : testWav)
- {
- fullAudioData.emplace_back( f / (1<<15));
- }
-
+ for (auto f : testWav)
+ {
+ fullAudioData.emplace_back( f / (1<<15));
+ }
- MfccParams mfccParams(sampFreq, 128, 0, 8000, numMfccFeats, frameLenSamples, false, 1);
+ MfccParams mfccParams(sampFreq, 128, 0, 8000, numMfccFeats,
+ frameLenSamples, false, 1);
- MFCC mfccInst = MFCC(mfccParams);
+ Wav2LetterMFCC mfccInst = Wav2LetterMFCC(mfccParams);
+ mfccInst.Init();
auto mfccOutput = mfccInst.MfccCompute(fullAudioData);
std::vector<float> expected = { -834.96564f, 21.02699f, 18.62856f, 7.3412f, 18.90791f, -5.36034f, 6.52351f,
diff --git a/samples/SpeechRecognition/test/PreprocessTest.cpp b/samples/SpeechRecognition/test/PreprocessTest.cpp
index 2b98831fda..f1127470fd 100644
--- a/samples/SpeechRecognition/test/PreprocessTest.cpp
+++ b/samples/SpeechRecognition/test/PreprocessTest.cpp
@@ -6,8 +6,8 @@
#include <catch.hpp>
#include <limits>
-#include "Preprocess.hpp"
#include "DataStructures.hpp"
+#include "Wav2LetterPreprocessor.hpp"
void PopulateTestWavVector(std::vector<int16_t>& vec)
{
@@ -51,9 +51,10 @@ TEST_CASE("Preprocessing calculation INT8")
/* Populate with dummy input */
PopulateTestWavVector(testWav1);
- MfccParams mfccParams(sampFreq, 128, 0, 8000, numMfccFeats, frameLenSamples, false, numMfccVectors);
+ MfccParams mfccParams(sampFreq, 128, 0, 8000, numMfccFeats,
+ frameLenSamples, false, numMfccVectors);
- MFCC mfccInst = MFCC(mfccParams);
+ std::unique_ptr<Wav2LetterMFCC> mfccInst = std::make_unique<Wav2LetterMFCC>(mfccParams);
std::vector<float> fullAudioData;
@@ -65,7 +66,7 @@ TEST_CASE("Preprocessing calculation INT8")
}
}
- Preprocess prep(frameLenSamples, windowStride, mfccInst);
+ Wav2LetterPreprocessor prep(frameLenSamples, windowStride, std::move(mfccInst));
std::vector<int8_t> outputBuffer(outputBufferSize);