summaryrefslogtreecommitdiff
path: root/tests/use_case
diff options
context:
space:
mode:
authoralexander <alexander.efremov@arm.com>2021-03-26 21:42:19 +0000
committerKshitij Sisodia <kshitij.sisodia@arm.com>2021-03-29 16:29:55 +0100
commit3c79893217bc632c9b0efa815091bef3c779490c (patch)
treead06b444557eb8124652b45621d736fa1b92f65d /tests/use_case
parent6ad6d55715928de72979b04194da1bdf04a4c51b (diff)
downloadml-embedded-evaluation-kit-3c79893217bc632c9b0efa815091bef3c779490c.tar.gz
Opensource ML embedded evaluation kit21.03
Change-Id: I12e807f19f5cacad7cef82572b6dd48252fd61fd
Diffstat (limited to 'tests/use_case')
-rw-r--r--tests/use_case/ad/AdTests.cc18
-rw-r--r--tests/use_case/ad/InferenceTestAD.cc100
-rw-r--r--tests/use_case/ad/MelSpecTests.cc226
-rw-r--r--tests/use_case/ad/PostProcessTests.cc53
-rw-r--r--tests/use_case/asr/AsrClassifierTests.cc98
-rw-r--r--tests/use_case/asr/AsrFeaturesTests.cc188
-rw-r--r--tests/use_case/asr/AsrTests.cc18
-rw-r--r--tests/use_case/asr/InferenceTestWav2Letter.cc105
-rw-r--r--tests/use_case/asr/MfccTests.cc170
-rw-r--r--tests/use_case/asr/OutputDecodeTests.cc67
-rw-r--r--tests/use_case/asr/Wav2LetterPostprocessingTest.cc199
-rw-r--r--tests/use_case/asr/Wav2LetterPreprocessingTest.cc152
-rw-r--r--tests/use_case/img_class/ImgClassTests.cc18
-rw-r--r--tests/use_case/img_class/ImgClassificationUCTest.cc140
-rw-r--r--tests/use_case/img_class/InferenceTestMobilenetV2.cc90
-rw-r--r--tests/use_case/kws/InferenceTestDSCNN.cc104
-rw-r--r--tests/use_case/kws/KWSHandlerTest.cc180
-rw-r--r--tests/use_case/kws/KwsTests.cc18
-rw-r--r--tests/use_case/kws/MfccTests.cc156
-rw-r--r--tests/use_case/kws_asr/InferenceTestDSCNN.cc111
-rw-r--r--tests/use_case/kws_asr/InferenceTestWav2Letter.cc114
-rw-r--r--tests/use_case/kws_asr/InitModels.cc52
-rw-r--r--tests/use_case/kws_asr/KwsAsrTests.cc18
-rw-r--r--tests/use_case/kws_asr/MfccTests.cc156
-rw-r--r--tests/use_case/kws_asr/Wav2LetterPostprocessingTest.cc194
-rw-r--r--tests/use_case/kws_asr/Wav2LetterPreprocessingTest.cc152
26 files changed, 2897 insertions, 0 deletions
diff --git a/tests/use_case/ad/AdTests.cc b/tests/use_case/ad/AdTests.cc
new file mode 100644
index 0000000..09f82da
--- /dev/null
+++ b/tests/use_case/ad/AdTests.cc
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define CATCH_CONFIG_MAIN
+#include <catch.hpp>
diff --git a/tests/use_case/ad/InferenceTestAD.cc b/tests/use_case/ad/InferenceTestAD.cc
new file mode 100644
index 0000000..b87699d
--- /dev/null
+++ b/tests/use_case/ad/InferenceTestAD.cc
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <catch.hpp>
+#include <random>
+
+#include "AdModel.hpp"
+#include "AdGoldenInput.hpp"
+#include "hal.h"
+#include "TensorFlowLiteMicro.hpp"
+
+#ifndef AD_FEATURE_VEC_DATA_SIZE
+#define AD_IN_FEATURE_VEC_DATA_SIZE (1024)
+#endif /* AD_FEATURE_VEC_DATA_SIZE */
+
+bool RunInference(arm::app::Model& model, const int8_t vec[])
+{
+ TfLiteTensor *inputTensor = model.GetInputTensor(0);
+ REQUIRE(inputTensor);
+
+ const size_t copySz = inputTensor->bytes < AD_IN_FEATURE_VEC_DATA_SIZE ? inputTensor->bytes : AD_IN_FEATURE_VEC_DATA_SIZE;
+
+ memcpy(inputTensor->data.data, vec, copySz);
+
+ return model.RunInference();
+}
+
+bool RunInferenceRandom(arm::app::Model& model)
+{
+ TfLiteTensor *inputTensor = model.GetInputTensor(0);
+ REQUIRE(inputTensor);
+
+ std::random_device rndDevice;
+ std::mt19937 mersenneGen{rndDevice()};
+ std::uniform_int_distribution<short> dist{-128, 127};
+
+ auto gen = [&dist, &mersenneGen]() {
+ return dist(mersenneGen);
+ };
+
+ std::vector<int8_t> randomInput(inputTensor->bytes);
+ std::generate(std::begin(randomInput), std::end(randomInput), gen);
+
+ REQUIRE(RunInference(model, randomInput.data()));
+ return true;
+}
+
+template <typename T>
+void TestInference(const T *input_goldenFV, const T *output_goldenFV, arm::app::Model& model)
+{
+ REQUIRE(RunInference(model, (int8_t*)input_goldenFV));
+
+ TfLiteTensor *outputTensor = model.GetOutputTensor(0);
+
+ REQUIRE(outputTensor);
+ REQUIRE(outputTensor->bytes == AD_OUT_FEATURE_VEC_DATA_SIZE);
+ auto tensorData = tflite::GetTensorData<T>(outputTensor);
+ REQUIRE(tensorData);
+
+ for (size_t i = 0; i < outputTensor->bytes; i++)
+ {
+ REQUIRE((int)tensorData[i] == (int)((T)output_goldenFV[i]));
+ }
+}
+
+TEST_CASE("Running random inference with TensorFlow Lite Micro and AdModel Int8", "[AD][.]")
+{
+ arm::app::AdModel model{};
+
+ REQUIRE_FALSE(model.IsInited());
+ REQUIRE(model.Init());
+ REQUIRE(model.IsInited());
+
+ REQUIRE(RunInferenceRandom(model));
+}
+
+TEST_CASE("Running golden vector inference with TensorFlow Lite Micro and AdModel Int8", "[AD][.]")
+{
+ arm::app::AdModel model{};
+
+ REQUIRE_FALSE(model.IsInited());
+ REQUIRE(model.Init());
+ REQUIRE(model.IsInited());
+
+ TestInference(ad_golden_input, ad_golden_out, model);
+} \ No newline at end of file
diff --git a/tests/use_case/ad/MelSpecTests.cc b/tests/use_case/ad/MelSpecTests.cc
new file mode 100644
index 0000000..affc67a
--- /dev/null
+++ b/tests/use_case/ad/MelSpecTests.cc
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AdMelSpectrogram.hpp"
+#include <limits>
+#include <algorithm>
+#include <catch.hpp>
+
+/* First 1024 samples from test wav. */
+const std::vector<int16_t> testWav1 = std::vector<int16_t>{
+ 490,495,445,403,390,259,126,146,
+ 175,134,232,243,166,145,123,33,
+ -61,-4,8,-115,-281,-292,-210,-133,
+ -98,-142,-229,-356,-415,-438,-443,-396,
+ -377,-297,-85,122,172,16,-197,-351,
+ -484,-408,-378,-405,-399,-335,-180,-141,
+ -124,-108,-46,37,141,234,264,218,
+ 147,164,132,111,125,73,2,36,
+ 107,113,93,6,-40,-153,-273,-282,
+ -291,-298,-389,-446,-394,-324,-333,-385,
+ -485,-548,-690,-718,-660,-704,-690,-601,
+ -549,-641,-637,-513,-469,-366,-227,-269,
+ -348,-408,-486,-570,-638,-666,-730,-746,
+ -710,-634,-543,-461,-281,-156,-130,-126,
+ -144,-118,-23,103,132,37,-69,-86,
+ -234,-360,-366,-330,-248,-268,-282,-169,
+ -190,-152,-151,-145,-133,-205,-263,-397,
+ -558,-656,-668,-718,-779,-828,-856,-817,
+ -761,-759,-722,-772,-873,-983,-962,-897,
+ -843,-788,-750,-677,-555,-447,-373,-218,
+ -182,-230,-204,-174,-144,-127,-231,-199,
+ -127,-194,-250,-183,-189,-254,-249,-337,
+ -417,-459,-513,-505,-481,-402,-344,-284,
+ -281,-441,-450,-423,-327,-119,102,197,
+ 208,173,102,103,165,131,15,75,
+ 283,365,322,391,303,287,372,406,
+ 493,577,640,681,577,498,524,511,
+ 476,425,380,315,337,339,408,603,
+ 749,745,672,654,588,520,523,544,
+ 557,632,636,565,491,413,368,252,
+ 136,33,1,-26,-152,-258,-98,18,
+ 1,-18,-99,-117,-109,-228,-295,-349,
+ -334,-337,-441,-373,-279,-202,-204,-219,
+ -119,149,410,489,564,623,683,642,
+ 707,872,932,862,833,862,894,784,
+ 637,559,507,394,306,420,510,484,
+ 519,526,599,789,959,1052,1063,1030,
+ 860,697,603,530,475,463,468,461,
+ 609,641,534,482,435,329,239,216,
+ 185,82,88,106,60,26,-43,-127,
+ -220,-262,-317,-259,-172,-175,-271,-217,
+ -196,-164,8,144,150,134,60,13,
+ 57,-58,-115,-171,-282,-310,-298,-106,
+ 42,-101,-172,-181,-249,-326,-262,-132,
+ -56,-82,-71,-88,-196,-325,-426,-413,
+ -411,-317,-191,-172,-195,-292,-328,-191,
+ -88,-60,21,-63,-175,-135,-64,-83,
+ -163,-279,-440,-536,-403,-308,-236,-132,
+ -95,-69,-73,-21,13,133,185,251,
+ 238,88,-66,-134,-175,-231,-219,-151,
+ -213,-328,-340,-374,-459,-601,-556,-395,
+ -248,-205,-174,-227,-402,-493,-464,-483,
+ -588,-564,-463,-493,-505,-416,-378,-313,
+ -215,-192,-192,-59,18,-40,-66,-60,
+ -143,-263,-213,-224,-265,-249,-237,-227,
+ -418,-504,-573,-699,-679,-577,-500,-570,
+ -538,-416,-444,-415,-294,-300,-427,-423,
+ -299,-279,-279,-187,-137,-123,60,230,
+ 227,277,356,413,440,418,477,594,
+ 697,729,586,561,653,570,590,628,
+ 497,357,366,470,591,576,458,439,
+ 417,431,447,349,304,241,294,406,
+ 484,516,587,598,566,465,380,347,
+ 316,391,429,409,216,69,57,76,
+ 150,101,93,113,90,41,-28,-15,
+ -2,47,208,261,333,362,239,301,
+ 422,431,426,434,482,510,480,407,
+ 244,53,-108,-234,-275,-302,-304,-207,
+ -117,-181,-214,-248,-203,-52,5,-14,
+ 24,-9,-154,-186,-82,-23,-62,-165,
+ -174,-190,-368,-414,-316,-301,-180,41,
+ 116,214,319,408,416,157,-100,-40,
+ 118,248,310,301,302,387,458,414,
+ 301,261,233,111,33,39,65,56,
+ 9,-92,-87,-98,-172,-196,-186,-18,
+ -14,-57,-111,-178,-278,-304,-358,-359,
+ -362,-464,-528,-400,-355,-284,-189,-240,
+ -253,-216,-319,-490,-621,-684,-758,-860,
+ -883,-877,-847,-787,-766,-852,-727,-481,
+ -339,-282,-266,-405,-414,-286,-225,-204,
+ -330,-488,-412,-292,-254,-290,-372,-436,
+ -545,-564,-413,-360,-344,-389,-430,-340,
+ -248,-271,-343,-383,-414,-409,-272,-223,
+ -215,-123,-10,-4,-6,-27,-11,78,
+ 169,226,139,-19,16,100,54,-75,
+ -117,-103,-77,-277,-598,-644,-602,-509,
+ -396,-232,-227,-208,-153,-146,-205,-223,
+ -108,-55,-26,-8,-42,-178,-298,-320,
+ -254,-146,-135,-262,-370,-331,-337,-394,
+ -265,-53,136,309,354,312,345,303,
+ 275,338,287,269,346,329,319,327,
+ 199,118,251,296,243,111,90,150,
+ 104,163,274,278,242,135,93,138,
+ 5,-154,-206,-270,-334,-356,-251,-96,
+ -78,-123,-80,-93,-160,-217,-214,-154,
+ -42,128,228,243,307,465,492,425,
+ 381,382,425,530,518,484,560,654,
+ 659,663,723,717,672,652,542,507,
+ 471,468,579,573,459,313,262,310,
+ 284,235,331,361,275,207,104,35,
+ 35,89,136,192,218,161,89,64,
+ 116,175,159,95,96,242,350,248,
+ 170,64,-35,-136,-202,-271,-307,-290,
+ -257,-219,-206,-185,-216,-213,-184,-135,
+ -165,-141,-25,-31,-28,-98,-247,-162,
+ 10,35,-16,-113,-139,-127,-58,-100,
+ -166,-320,-406,-462,-604,-594,-650,-538,
+ -427,-365,-196,-117,-120,-102,-66,-122,
+ -211,-235,-202,-135,-40,-10,-38,-150,
+ -286,-223,-50,93,149,86,184,128,
+ 113,163,13,-53,-135,-100,-72,-75,
+ -73,-118,-150,-197,-224,-131,-59,-109,
+ -92,-129,-189,-220,-166,-173,-114,-8,
+ 26,-27,-38,50,109,143,161,209,
+ 266,289,384,397,312,203,5,-64,
+ -14,6,56,67,19,-43,-112,-46,
+ -74,-101,-83,-115,-142,-207,-274,-292,
+ -299,-236,-181,-188,-48,60,6,-76,
+ -8,115,188,260,236,143,44,-30,
+ -17,31,37,-16,-28,87,210,276,
+ 372,365,302,270,137,-8,-142,-246,
+ -279,-259,-203,-241,-278,-254,-245,-177,
+ -77,-8,-47,-159,-295,-412,-414,-414,
+ -566,-533,-255,-82,-10,222,358,336,
+ 355,360,303,237,267,224,244,434,
+ 422,372,404,464,559,538,446,294,
+ 217,60,-82,-150,-144,-162,-250,-263,
+ -222,-148,-81,-134,-134,-106,-27,-71,
+};
+
+/* Golden log mel spec output for test wav. */
+const std::vector<float> testWavMelSpec {
+ -8.601085, -10.563560, -13.791912, -12.356619, -16.892878,
+ -16.913876, -15.695299, -21.848980, -21.193371, -18.772688,
+ -21.795116, -20.008236, -22.413673, -25.162649, -24.091856,
+ -24.936411, -19.341146, -23.534576, -29.052885, -26.562546,
+ -25.046455, -29.586889, -30.115177, -32.281334, -29.806450,
+ -30.398304, -26.682615, -27.397421, -31.224312, -31.033779,
+ -36.314369, -29.530331, -28.428139, -30.097546, -34.101303,
+ -32.660480, -34.229076, -34.668293, -35.140759, -34.104649,
+ -34.141472, -36.514408, -37.655891, -33.590931, -40.532566,
+ -39.105091, -39.600319, -40.239834, -41.356224, -41.103714,
+ -39.861557, -41.827553, -41.275696, -42.203575, -42.689217,
+ -46.495552, -46.704731, -45.560322, -47.423828, -50.672031,
+ -51.387669, -53.410839, -54.899536, -55.807552,
+};
+
+
+arm::app::audio::AdMelSpectrogram GetMelSpecInstance() {
+ int frameLenSamples = 1024;
+ return arm::app::audio::AdMelSpectrogram(frameLenSamples);
+}
+
+template <class T>
+void TestQuntisedMelSpec() {
+ float quantScale = 0.1410219967365265;
+ int quantOffset = 11;
+ std::vector<T> melSpecOutput = GetMelSpecInstance().MelSpecComputeQuant<T>(testWav1, quantScale, quantOffset);
+
+ long min_val = std::numeric_limits<T>::min();
+ long max_val = std::numeric_limits<T>::max();
+
+ for (size_t i = 0; i < testWavMelSpec.size(); i++){
+ long TestWavMelSpec = (std::lround((testWavMelSpec[i] / quantScale) + quantOffset));
+ T quantizedTestWavMelSpec = static_cast<T>(std::max(min_val, std::min(TestWavMelSpec, max_val)));
+
+ REQUIRE(quantizedTestWavMelSpec == Approx(melSpecOutput[i]).margin(1));
+ }
+}
+
+template void TestQuntisedMelSpec<int8_t>();
+template void TestQuntisedMelSpec<uint8_t>();
+template void TestQuntisedMelSpec<int16_t>();
+
+TEST_CASE("Mel Spec calculation") {
+
+ hal_platform platform;
+ data_acq_module dataAcq;
+ data_psn_module dataPsn;
+ platform_timer timer;
+
+ /* Initialise the HAL and platform */
+ hal_init(&platform, &dataAcq, &dataPsn, &timer);
+ hal_platform_init(&platform);
+
+ SECTION("FP32") {
+ auto melSpecOutput = GetMelSpecInstance().ComputeMelSpec(testWav1);
+ REQUIRE_THAT( melSpecOutput, Catch::Approx( testWavMelSpec ).margin(0.1) );
+ }
+
+ SECTION("int8_t") {
+ TestQuntisedMelSpec<int8_t>();
+ }
+
+ SECTION("uint8_t") {
+ TestQuntisedMelSpec<uint8_t>();
+ }
+
+ SECTION("int16_t") {
+ TestQuntisedMelSpec<int16_t>();
+ }
+}
diff --git a/tests/use_case/ad/PostProcessTests.cc b/tests/use_case/ad/PostProcessTests.cc
new file mode 100644
index 0000000..62fa9e7
--- /dev/null
+++ b/tests/use_case/ad/PostProcessTests.cc
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AdPostProcessing.hpp"
+#include <catch.hpp>
+
+TEST_CASE("Softmax_vector") {
+
+ std::vector<float> testVec = {1, 2, 3, 4, 1, 2, 3};
+ arm::app::Softmax(testVec);
+ CHECK((testVec[0] - 0.024) == Approx(0.0).margin(0.001));
+ CHECK((testVec[1] - 0.064) == Approx(0.0).margin(0.001));
+ CHECK((testVec[2] - 0.175) == Approx(0.0).margin(0.001));
+ CHECK((testVec[3] - 0.475) == Approx(0.0).margin(0.001));
+ CHECK((testVec[4] - 0.024) == Approx(0.0).margin(0.001));
+ CHECK((testVec[5] - 0.064) == Approx(0.0).margin(0.001));
+ CHECK((testVec[6] - 0.175) == Approx(0.0).margin(0.001));
+}
+
+TEST_CASE("Output machine index") {
+
+ auto index = arm::app::OutputIndexFromFileName("test_id_00.wav");
+ CHECK(index == 0);
+
+ auto index1 = arm::app::OutputIndexFromFileName("test_id_02.wav");
+ CHECK(index1 == 1);
+
+ auto index2 = arm::app::OutputIndexFromFileName("test_id_4.wav");
+ CHECK(index2 == 2);
+
+ auto index3 = arm::app::OutputIndexFromFileName("test_id_6.wav");
+ CHECK(index3 == 3);
+
+ auto index4 = arm::app::OutputIndexFromFileName("test_id_id_00.wav");
+ CHECK(index4 == -1);
+
+ auto index5 = arm::app::OutputIndexFromFileName("test_id_7.wav");
+ CHECK(index5 == -1);
+} \ No newline at end of file
diff --git a/tests/use_case/asr/AsrClassifierTests.cc b/tests/use_case/asr/AsrClassifierTests.cc
new file mode 100644
index 0000000..7c71912
--- /dev/null
+++ b/tests/use_case/asr/AsrClassifierTests.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "AsrClassifier.hpp"
+#include "Wav2LetterModel.hpp"
+
+#include <catch.hpp>
+
+TEST_CASE("Test invalid classifier")
+{
+ TfLiteTensor* outputTens = nullptr;
+ std::vector <arm::app::ClassificationResult> resultVec;
+ arm::app::AsrClassifier classifier;
+
+ REQUIRE(!classifier.GetClassificationResults(outputTens, resultVec, {}, 1));
+}
+
+
+TEST_CASE("Test valid classifier UINT8") {
+ const int dimArray[] = {4, 1, 1, 246, 29};
+ std::vector <std::string> labels(29);
+ std::vector <uint8_t> outputVec(7134);
+ TfLiteIntArray* dims= tflite::testing::IntArrayFromInts(dimArray);
+ TfLiteTensor tfTensor = tflite::testing::CreateQuantizedTensor(
+ outputVec.data(), dims, 1, 0, "test");
+ TfLiteTensor* outputTensor = &tfTensor;
+ std::vector <arm::app::ClassificationResult> resultVec;
+ arm::app::AsrClassifier classifier;
+
+ REQUIRE(classifier.GetClassificationResults(outputTensor, resultVec, labels, 1));
+ REQUIRE(246 == resultVec.size());
+}
+
+
+TEST_CASE("Get classification results") {
+ const int dimArray[] = {4, 1, 1, 10, 15};
+ std::vector <std::string> labels(15);
+ std::vector<uint8_t> outputVec(150, static_cast<uint8_t>(1));
+ TfLiteIntArray* dims= tflite::testing::IntArrayFromInts(dimArray);
+ TfLiteTensor tfTensor = tflite::testing::CreateQuantizedTensor(
+ outputVec.data(), dims, 1, 0, "test");
+ TfLiteTensor* outputTensor = &tfTensor;
+
+ std::vector <arm::app::ClassificationResult> resultVec(10);
+
+ /* set the top five results: */
+ std::vector<std::pair<uint32_t, std::pair<uint32_t, uint8_t>>> selectedResults {
+ {0, {3, 23}},
+ {0, {9, 15}},
+ {1, {5, 24}},
+ {1, {7, 4}},
+ {2, {9, 5}},
+ {3, {8, 6}},
+ {4, {13, 10}},
+ {4, {6, 18}},
+ {5, {3, 15}},
+ {5, {4, 115}},
+ {6, {6, 25}},
+ {7, {1, 7}},
+ {8, {11, 9}},
+ {9, {1, 10}}
+ };
+
+ const uint32_t nCols = outputTensor->dims->data[arm::app::Wav2LetterModel::ms_outputColsIdx];
+ for (size_t i = 0; i < selectedResults.size(); ++i) {
+ uint32_t rIndex = selectedResults[i].first;
+ uint32_t cIndex = selectedResults[i].second.first;
+ uint8_t value = selectedResults[i].second.second;
+ outputVec[rIndex * nCols + cIndex] = value;
+ }
+
+ arm::app::AsrClassifier classifier;
+
+ REQUIRE(classifier.GetClassificationResults(outputTensor, resultVec, labels, 1));
+ REQUIRE(resultVec[0].m_labelIdx == 3);
+ REQUIRE(resultVec[1].m_labelIdx == 5);
+ REQUIRE(resultVec[2].m_labelIdx == 9);
+ REQUIRE(resultVec[3].m_labelIdx == 8);
+ REQUIRE(resultVec[4].m_labelIdx == 6);
+ REQUIRE(resultVec[5].m_labelIdx == 4);
+ REQUIRE(resultVec[6].m_labelIdx == 6);
+ REQUIRE(resultVec[7].m_labelIdx == 1);
+ REQUIRE(resultVec[8].m_labelIdx == 11);
+ REQUIRE(resultVec[9].m_labelIdx == 1);
+}
diff --git a/tests/use_case/asr/AsrFeaturesTests.cc b/tests/use_case/asr/AsrFeaturesTests.cc
new file mode 100644
index 0000000..9401f40
--- /dev/null
+++ b/tests/use_case/asr/AsrFeaturesTests.cc
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DataStructures.hpp"
+#include "AsrGoldenFeatures.hpp"
+#include "hal.h"
+#include "TensorFlowLiteMicro.hpp"
+#include "Wav2LetterPreprocess.hpp"
+
+#include <catch.hpp>
+#include <random>
+
+class TestPreprocess : public arm::app::audio::asr::Preprocess {
+public:
+ TestPreprocess()
+ : arm::app::audio::asr::Preprocess(0,0,0,0)
+ {}
+
+ bool ComputeDeltas(arm::app::Array2d<float>& mfcc,
+ arm::app::Array2d<float>& delta1,
+ arm::app::Array2d<float>& delta2)
+ {
+ return this->_ComputeDeltas(mfcc, delta1, delta2);
+ }
+
+ float GetMean(arm::app::Array2d<float>& vec)
+ {
+ return this->_GetMean(vec);
+ }
+
+ float GetStdDev(arm::app::Array2d<float>& vec, const float mean)
+ {
+ return this->_GetStdDev(vec, mean);
+ }
+
+ void NormaliseVec(arm::app::Array2d<float>& vec)
+ {
+ return this->_NormaliseVec(vec);
+ }
+};
+
+template<class T>
+void CheckOutputs(const std::vector<T> goldenOutput, std::vector<T> output)
+{
+ const size_t goldenSize = goldenOutput.size();
+ const size_t realSize = output.size();
+
+ REQUIRE(realSize == goldenSize);
+ REQUIRE_THAT(output, Catch::Approx( goldenOutput ).margin(0.0001));
+}
+template void CheckOutputs<float>(const std::vector<float> goldenOutput, std::vector<float> output);
+
+void populateBuffer(const float* input, size_t size, size_t numMfccFeats, std::vector<std::vector<float>>& buf)
+{
+ size_t time = 0;
+ for (size_t i = 0; i < size; ++i) {
+ if (i > 0 && i % numMfccFeats == 0) {
+ ++time;
+ }
+ float featureValue = *(input + i);
+ buf[i % numMfccFeats][time] = featureValue;
+ }
+}
+
+void populateArray2dWithVectorOfVector(std::vector<std::vector<float>> vec, arm::app::Array2d<float>& buf)
+{
+ for (size_t i = 0; i < vec.size(); ++i) {
+ for (size_t j = 0; j < vec[i].size(); ++j) {
+ buf(i, j) = vec[i][j];
+ }
+ }
+}
+
+TEST_CASE("Floating point asr features calculation", "[ASR]")
+{
+ TestPreprocess tp;
+
+ SECTION("First and second diff")
+ {
+ constexpr uint32_t numMfccFeats = 13;
+ constexpr uint32_t numFeatVectors = 296;
+
+ arm::app::Array2d<float> mfccBuf(numMfccFeats, numFeatVectors);
+ arm::app::Array2d<float> delta1Buf(numMfccFeats, numFeatVectors);
+ arm::app::Array2d<float> delta2Buf(numMfccFeats, numFeatVectors);
+
+ std::vector<std::vector<float>> goldenMfccBuf(numMfccFeats, std::vector<float>(numFeatVectors));
+ std::vector<std::vector<float>> goldenDelta1Buf(numMfccFeats, std::vector<float>(numFeatVectors));
+ std::vector<std::vector<float>> goldenDelta2Buf(numMfccFeats, std::vector<float>(numFeatVectors));
+
+ populateBuffer(golden_asr_mfcc, golden_asr_mfcc_len, numMfccFeats, goldenMfccBuf);
+ populateBuffer(golden_diff1_features, golden_diff1_len, numMfccFeats, goldenDelta1Buf);
+ populateBuffer(golden_diff2_features, golden_diff2_len, numMfccFeats, goldenDelta2Buf);
+
+ populateArray2dWithVectorOfVector(goldenMfccBuf, mfccBuf);
+ std::fill(delta1Buf.begin(), delta1Buf.end(), 0.f);
+ std::fill(delta2Buf.begin(), delta2Buf.end(), 0.f);
+
+ tp.ComputeDeltas(mfccBuf, delta1Buf, delta2Buf);
+
+ /* First 4 and last 4 values are different because we pad AFTER diff calculated. */
+ for (size_t i = 0; i < numMfccFeats; ++i) {
+ const float* start_goldenDelta1Buf = goldenDelta1Buf[i].data() + 4;
+ const float* start_delta1 = delta1Buf.begin() + i * delta1Buf.size(1) + 4;
+ std::vector<float> goldenDataDelta1(start_goldenDelta1Buf, start_goldenDelta1Buf + numFeatVectors - 8);
+ std::vector<float> tensorDataDelta1(start_delta1, start_delta1 + numFeatVectors - 8);
+
+ CheckOutputs<float>(goldenDataDelta1,tensorDataDelta1);
+
+ const float* start_goldenDelta2Buf = goldenDelta2Buf[i].data() + 4;
+ const float* start_delta2 = delta2Buf.begin() + i * delta2Buf.size(1) + 4;
+ std::vector<float> goldenDataDelta2(start_goldenDelta2Buf, start_goldenDelta2Buf + numFeatVectors - 8);
+ std::vector<float> tensorDataDelta2(start_delta2, start_delta2 + numFeatVectors - 8);
+
+ CheckOutputs<float>(goldenDataDelta2,tensorDataDelta2);
+ }
+
+ }
+
+ SECTION("Mean")
+ {
+ std::vector<std::vector<float>> mean1vec{{1, 2},
+ {-1, -2}};
+ arm::app::Array2d<float> mean1(2,2); /* {{1, 2},{-1, -2}} */
+ populateArray2dWithVectorOfVector(mean1vec, mean1);
+ REQUIRE(0 == Approx(tp.GetMean(mean1)));
+
+ arm::app::Array2d<float> mean2(2, 2);
+ std::fill(mean2.begin(), mean2.end(), 0.f);
+ REQUIRE(0 == Approx(tp.GetMean(mean2)));
+
+ arm::app::Array2d<float> mean3(3,3);
+ std::fill(mean3.begin(), mean3.end(), 1.f);
+ REQUIRE(1 == Approx(tp.GetMean(mean3)));
+ }
+
+ SECTION("Std")
+ {
+ arm::app::Array2d<float> std1(2, 2);
+ std::fill(std1.begin(), std1.end(), 0.f); /* {{0, 0}, {0, 0}} */
+ REQUIRE(0 == Approx(tp.GetStdDev(std1, 0)));
+
+ std::vector<std::vector<float>> std2vec{{1, 2, 3, 4, 5}, {6, 7, 8, 9, 0}};
+ arm::app::Array2d<float> std2(2,5);
+ populateArray2dWithVectorOfVector(std2vec, std2);
+ const float mean = tp.GetMean(std2);
+ REQUIRE(2.872281323 == Approx(tp.GetStdDev(std2, mean)));
+
+ arm::app::Array2d<float> std3(2,2);
+ std::fill(std3.begin(), std3.end(), 1.f); /* std3{{1, 1}, {1, 1}}; */
+ REQUIRE(0 == Approx(tp.GetStdDev(std3, 1)));
+ }
+
+ SECTION("Norm") {
+ auto checker = [&](arm::app::Array2d<float>& d, std::vector<float>& g) {
+ tp.NormaliseVec(d);
+ std::vector<float> d_vec(d.begin(), d.end());
+ REQUIRE_THAT(g, Catch::Approx(d_vec));
+ };
+
+ std::vector<std::vector<float>> norm0vec{{1, 1}, {1, 1}};
+ std::vector<float> goldenNorm0 {0, 0, 0, 0};
+ arm::app::Array2d<float> norm0(2, 2);
+ populateArray2dWithVectorOfVector(norm0vec, norm0);
+ checker(norm0, goldenNorm0);
+
+ std::vector<std::vector<float>> norm1vec{{1, 2, 3, 4, 5}, {6, 7, 8, 9, 0}};
+ std::vector<float> goldenNorm1 {
+ -1.218543592, -0.87038828, -0.522232968, -0.174077656, 0.174077656,
+ 0.522232968, 0.87038828, 1.218543592, 1.566698904, -1.566698904};
+ arm::app::Array2d<float> norm1(2, 5);
+ populateArray2dWithVectorOfVector(norm1vec, norm1);
+ checker(norm1, goldenNorm1);
+ }
+}
diff --git a/tests/use_case/asr/AsrTests.cc b/tests/use_case/asr/AsrTests.cc
new file mode 100644
index 0000000..09f82da
--- /dev/null
+++ b/tests/use_case/asr/AsrTests.cc
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define CATCH_CONFIG_MAIN
+#include <catch.hpp>
diff --git a/tests/use_case/asr/InferenceTestWav2Letter.cc b/tests/use_case/asr/InferenceTestWav2Letter.cc
new file mode 100644
index 0000000..1fa4092
--- /dev/null
+++ b/tests/use_case/asr/InferenceTestWav2Letter.cc
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "hal.h"
+#include "TensorFlowLiteMicro.hpp"
+#include "Wav2LetterModel.hpp"
+#include "TestData_asr.hpp"
+
+#include <catch.hpp>
+#include <random>
+
+bool RunInference(arm::app::Model& model, const int8_t vec[], const size_t copySz)
+{
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
+ REQUIRE(inputTensor);
+
+ memcpy(inputTensor->data.data, vec, copySz);
+
+ return model.RunInference();
+}
+
+bool RunInferenceRandom(arm::app::Model& model)
+{
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
+ REQUIRE(inputTensor);
+
+ std::random_device rndDevice;
+ std::mt19937 mersenneGen{rndDevice()};
+ std::uniform_int_distribution<short> dist {-128, 127};
+
+ auto gen = [&dist, &mersenneGen](){
+ return dist(mersenneGen);
+ };
+
+ std::vector<int8_t> randomAudio(inputTensor->bytes);
+ std::generate(std::begin(randomAudio), std::end(randomAudio), gen);
+
+ REQUIRE(RunInference(model, randomAudio.data(), inputTensor->bytes));
+ return true;
+}
+
+/* Skip this test, Wav2LetterModel if not Vela optimized but only from ML-zoo will fail. */
+TEST_CASE("Running random inference with TensorFlow Lite Micro and Wav2LetterModel Int8", "[Wav2Letter][.]")
+{
+ arm::app::Wav2LetterModel model{};
+
+ REQUIRE_FALSE(model.IsInited());
+ REQUIRE(model.Init());
+ REQUIRE(model.IsInited());
+
+ REQUIRE(RunInferenceRandom(model));
+}
+
+template<typename T>
+void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::Model& model)
+{
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
+ REQUIRE(inputTensor);
+
+ REQUIRE(RunInference(model, input_goldenFV, inputTensor->bytes));
+
+ TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+
+ REQUIRE(outputTensor);
+ REQUIRE(outputTensor->bytes == OFM_DATA_SIZE);
+ auto tensorData = tflite::GetTensorData<T>(outputTensor);
+ REQUIRE(tensorData);
+
+ for (size_t i = 0; i < outputTensor->bytes; i++) {
+ REQUIRE((int)tensorData[i] == (int)((T)output_goldenFV[i]));
+ }
+}
+
+TEST_CASE("Running inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter][.]")
+{
+ for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
+ auto input_goldenFV = get_ifm_data_array(i);;
+ auto output_goldenFV = get_ofm_data_array(i);
+
+ DYNAMIC_SECTION("Executing inference with re-init")
+ {
+ arm::app::Wav2LetterModel model{};
+
+ REQUIRE_FALSE(model.IsInited());
+ REQUIRE(model.Init());
+ REQUIRE(model.IsInited());
+
+ TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
+
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/use_case/asr/MfccTests.cc b/tests/use_case/asr/MfccTests.cc
new file mode 100644
index 0000000..c70e53e
--- /dev/null
+++ b/tests/use_case/asr/MfccTests.cc
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterMfcc.hpp"
+
+#include <algorithm>
+#include <catch.hpp>
+#include <limits>
+
+/* First 512 samples from itellyou.wav. */
+const std::vector<int16_t> testWav1 = std::vector<int16_t> {
+ -3,0,1,-1,2,3,-2,2,
+ 1,-2,0,3,-1,8,3,2,
+ -1,-1,2,7,3,5,6,6,
+ 6,12,5,6,3,3,5,4,
+ 4,6,7,7,7,3,7,2,
+ 8,4,4,2,-4,-1,-1,-4,
+ 2,1,-1,-4,0,-7,-6,-2,
+ -5,1,-5,-1,-7,-3,-3,-7,
+ 0,-3,3,-5,0,1,-2,-2,
+ -3,-3,-7,-3,-2,-6,-5,-8,
+ -2,-8,4,-9,-4,-9,-5,-5,
+ -3,-9,-3,-9,-1,-7,-4,1,
+ -3,2,-8,-4,-4,-5,1,-3,
+ -1,0,-1,-2,-3,-2,-4,-1,
+ 1,-1,3,0,3,2,0,0,
+ 0,-3,1,1,0,8,3,4,
+ 1,5,6,4,7,3,3,0,
+ 3,6,7,6,4,5,9,9,
+ 5,5,8,1,6,9,6,6,
+ 7,1,8,1,5,0,5,5,
+ 0,3,2,7,2,-3,3,0,
+ 3,0,0,0,2,0,-1,-1,
+ -2,-3,-8,0,1,0,-3,-3,
+ -3,-2,-3,-3,-4,-6,-2,-8,
+ -9,-4,-1,-5,-3,-3,-4,-3,
+ -6,3,0,-1,-2,-9,-4,-2,
+ 2,-1,3,-5,-5,-2,0,-2,
+ 0,-1,-3,1,-2,9,4,5,
+ 2,2,1,0,-6,-2,0,0,
+ 0,-1,4,-4,3,-7,-1,5,
+ -6,-1,-5,4,3,9,-2,1,
+ 3,0,0,-2,1,2,1,1,
+ 0,3,2,-1,3,-3,7,0,
+ 0,3,2,2,-2,3,-2,2,
+ -3,4,-1,-1,-5,-1,-3,-2,
+ 1,-1,3,2,4,1,2,-2,
+ 0,2,7,0,8,-3,6,-3,
+ 6,1,2,-3,-1,-1,-1,1,
+ -2,2,1,2,0,-2,3,-2,
+ 3,-2,1,0,-3,-1,-2,-4,
+ -6,-5,-8,-1,-4,0,-3,-1,
+ -1,-1,0,-2,-3,-7,-1,0,
+ 1,5,0,5,1,1,-3,0,
+ -6,3,-8,4,-8,6,-6,1,
+ -6,-2,-5,-6,0,-5,4,-1,
+ 4,-2,1,2,1,0,-2,0,
+ 0,2,-2,2,-5,2,0,-2,
+ 1,-2,0,5,1,0,1,5,
+ 0,8,3,2,2,0,5,-2,
+ 3,1,0,1,0,-2,-1,-3,
+ 1,-1,3,0,3,0,-2,-1,
+ -4,-4,-4,-1,-4,-4,-3,-6,
+ -3,-7,-3,-1,-2,0,-5,-4,
+ -7,-3,-2,-2,1,2,2,8,
+ 5,4,2,4,3,5,0,3,
+ 3,6,4,2,2,-2,4,-2,
+ 3,3,2,1,1,4,-5,2,
+ -3,0,-1,1,-2,2,5,1,
+ 4,2,3,1,-1,1,0,6,
+ 0,-2,-1,1,-1,2,-5,-1,
+ -5,-1,-6,-3,-3,2,4,0,
+ -1,-5,3,-4,-1,-3,-4,1,
+ -4,1,-1,-1,0,-5,-4,-2,
+ -1,-1,-3,-7,-3,-3,4,4,
+};
+
+const std::vector<int16_t> testWav2 = std::vector<int16_t> (512, 0);
+
+/* Golden mfcc output for testwav1. */
+const std::vector<float> golden_mfcc_output_testWav1 {
+ -835.24603, 21.010452, 18.699404, 7.4338417, 19.028961, -5.401735, 6.4761047, -11.400679,
+ 8.392709, 12.202361, 8.403276, -13.508412, -18.307348
+};
+
+/* Golden mfcc output for the all zero wav. */
+const std::vector<float> golden_mfcc_output_testWav2 {
+ -1131.37085, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+
+arm::app::audio::Wav2LetterMFCC GetMFCCInstance()
+{
+ const auto sampFreq = arm::app::audio::Wav2LetterMFCC::ms_defaultSamplingFreq;
+ const auto frameLenMs = 32;
+ const auto numMfccFeats = 13;
+ const auto frameLenSamples = sampFreq * frameLenMs * 0.001;
+ return arm::app::audio::Wav2LetterMFCC(numMfccFeats, frameLenSamples);
+}
+
+template <class T>
+void TestQuantisedMFCC()
+{
+ const auto quantScale = 0.1410219967365265;
+ const auto quantOffset = 11;
+ std::vector<T> mfccOutput = GetMFCCInstance().MfccComputeQuant<T>(testWav1, quantScale, quantOffset);
+
+ long min_val = std::numeric_limits<T>::min();
+ long max_val = std::numeric_limits<T>::max();
+
+ for (size_t i = 0; i < golden_mfcc_output_testWav1.size(); i++){
+ long TestWavMfcc = (std::lround((golden_mfcc_output_testWav1[i] / quantScale) + quantOffset));
+ T quantizedTestWavMfcc = static_cast<T>(std::max(min_val, std::min(TestWavMfcc, max_val)));
+
+ REQUIRE(quantizedTestWavMfcc == Approx(mfccOutput[i]).margin(2));
+ }
+}
+
+template void TestQuantisedMFCC<int8_t>();
+template void TestQuantisedMFCC<uint8_t>();
+template void TestQuantisedMFCC<int16_t>();
+
+TEST_CASE("MFCC calculation")
+{
+ hal_platform platform;
+ data_acq_module dataAcq;
+ data_psn_module dataPsn;
+ platform_timer timer;
+
+ /* Initialise the HAL and platform. */
+ hal_init(&platform, &dataAcq, &dataPsn, &timer);
+ hal_platform_init(&platform);
+
+ SECTION("FP32")
+ {
+ auto mfccOutput = GetMFCCInstance().MfccCompute(testWav1);
+ REQUIRE_THAT( mfccOutput, Catch::Approx( golden_mfcc_output_testWav1 ).margin(0.3) );
+
+ auto mfccOutput2 = GetMFCCInstance().MfccCompute(testWav2);
+ REQUIRE_THAT( mfccOutput2, Catch::Approx( golden_mfcc_output_testWav2 ).margin(0.001) );
+ }
+
+ SECTION("int8_t")
+ {
+ TestQuantisedMFCC<int8_t>();
+ }
+
+ SECTION("uint8_t")
+ {
+ TestQuantisedMFCC<uint8_t>();
+ }
+
+ SECTION("int16_t")
+ {
+ TestQuantisedMFCC<int16_t>();
+ }
+}
diff --git a/tests/use_case/asr/OutputDecodeTests.cc b/tests/use_case/asr/OutputDecodeTests.cc
new file mode 100644
index 0000000..22153f3
--- /dev/null
+++ b/tests/use_case/asr/OutputDecodeTests.cc
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "OutputDecode.hpp"
+
+#include "catch.hpp"
+
+TEST_CASE("Running output decode on test vector") {
+
+ std::vector<arm::app::ClassificationResult> vecResult(20);
+ /* Number of test inputs. */
+ const size_t numStrings = 8;
+
+ /* The test inputs. */
+ std::string testText[numStrings][20]
+ {
+ {"a", "b", "c", "d", "e", "f", "g", "g", "g", " ", "h", "h", "i", " ", " ", "j", "k", "\'", "\'", "l"}, /* initial */
+ {" ", "b", "c", "d", "e", "f", "g", "g", "g", " ", "h", "h", "i", " ", " ", "j", "k", "\'", "\'", " "}, /* space start and end */
+ {"\'", "b", "c", "d", "e", "f", "g", "g", "g", " ", "h", "h", "i", " ", " ", "j", "k", "\'", "l", "\'"}, /* apostrophe start and end */
+ {"a", "a", "c", "d", "e", "f", "g", "g", "g", " ", "h", "h", "i", " ", " ", "j", "k", "\'", "l", "l"}, /* Double start and end */
+ {"a", "b", "c", "d", "e", "f", "g", "g", "o", "$", "o", "h", "i", " ", " ", "j", "k", "\'", "\'", "l"}, /* Legit double character */
+ {"a", "$", "a", "d", "e", "f", "g", "g", "o", "$", "o", "h", "i", " ", " ", "j", "k", "l", "$", "l"}, /* Legit double character start and end */
+ {"$", "a", "b", "d", "e", "f", "g", "g", "o", "$", "o", "h", "i", " ", " ", "j", "k", "l", "$", "$"}, /* $$ */
+ {"$", "a", "b", "d", "e", "f", "g", "g", "o", "$", "o", "h", "i", " ", " ", "j", "k", "l", "l", "l"}
+ };
+
+ /* The golden outputs for the above test inputs. */
+ std::string expectedOutput[numStrings] =
+ {
+ {"abcdefg hi jk\'l"},
+ {" bcdefg hi jk\' "},
+ {"\'bcdefg hi jk\'l\'"},
+ {"acdefg hi jk\'l"},
+ {"abcdefgoohi jk\'l"},
+ {"aadefgoohi jkll"},
+ {"abdefgoohi jkl"},
+ {"abdefgoohi jkl"}
+ };
+
+ /*For each test input. */
+ for (size_t h = 0; h < numStrings; ++h)
+ {
+ /* Generate fake vecResults.m_label to mimic AsrClassifier output containing the testText. */
+ for (size_t i = 0; i < 20; i++)
+ {
+ vecResult[i].m_label = testText[h][i];
+ }
+ /* Call function with fake vecResults and save returned string into 'buff'. */
+ std::string buff = arm::app::audio::asr::DecodeOutput(vecResult);
+
+ /* Check that the string returned from the function matches the expected output given above. */
+ REQUIRE(buff.compare(expectedOutput[h]) == 0);
+ }
+} \ No newline at end of file
diff --git a/tests/use_case/asr/Wav2LetterPostprocessingTest.cc b/tests/use_case/asr/Wav2LetterPostprocessingTest.cc
new file mode 100644
index 0000000..9ed2e1b
--- /dev/null
+++ b/tests/use_case/asr/Wav2LetterPostprocessingTest.cc
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterPostprocess.hpp"
+#include "Wav2LetterModel.hpp"
+
+#include <algorithm>
+#include <catch.hpp>
+#include <limits>
+
+template <typename T>
+static TfLiteTensor GetTestTensor(
+ std::vector <int>& shape,
+ T initVal,
+ std::vector<T>& vectorBuf)
+{
+ REQUIRE(0 != shape.size());
+
+ shape.insert(shape.begin(), shape.size());
+ uint32_t sizeInBytes = sizeof(T);
+ for (size_t i = 1; i < shape.size(); ++i) {
+ sizeInBytes *= shape[i];
+ }
+
+ /* Allocate mem. */
+ vectorBuf = std::vector<T>(sizeInBytes, initVal);
+ TfLiteIntArray* dims = tflite::testing::IntArrayFromInts(shape.data());
+ return tflite::testing::CreateQuantizedTensor(
+ vectorBuf.data(), dims,
+ 1, 0, "test-tensor");
+}
+
+TEST_CASE("Checking return value")
+{
+ SECTION("Mismatched post processing parameters and tensor size")
+ {
+ const uint32_t ctxLen = 5;
+ const uint32_t innerLen = 3;
+ arm::app::audio::asr::Postprocess post{ctxLen, innerLen, 0};
+
+ std::vector <int> tensorShape = {1, 1, 1, 13};
+ std::vector <int8_t> tensorVec;
+ TfLiteTensor tensor = GetTestTensor<int8_t>(
+ tensorShape, 100, tensorVec);
+ REQUIRE(false == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+ }
+
+ SECTION("Post processing succeeds")
+ {
+ const uint32_t ctxLen = 5;
+ const uint32_t innerLen = 3;
+ arm::app::audio::asr::Postprocess post{ctxLen, innerLen, 0};
+
+ std::vector <int> tensorShape = {1, 1, 13, 1};
+ std::vector <int8_t> tensorVec;
+ TfLiteTensor tensor = GetTestTensor<int8_t>(
+ tensorShape, 100, tensorVec);
+
+ /* Copy elements to compare later. */
+ std::vector <int8_t> originalVec = tensorVec;
+
+ /* This step should not erase anything. */
+ REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+ }
+}
+
+
+TEST_CASE("Postprocessing - erasing required elements")
+{
+ constexpr uint32_t ctxLen = 5;
+ constexpr uint32_t innerLen = 3;
+ constexpr uint32_t nRows = 2*ctxLen + innerLen;
+ constexpr uint32_t nCols = 10;
+ constexpr uint32_t blankTokenIdx = nCols - 1;
+ std::vector <int> tensorShape = {1, 1, nRows, nCols};
+
+ SECTION("First and last iteration")
+ {
+ arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+
+ std::vector <int8_t> tensorVec;
+ TfLiteTensor tensor = GetTestTensor<int8_t>(
+ tensorShape, 100, tensorVec);
+
+ /* Copy elements to compare later. */
+ std::vector <int8_t> originalVec = tensorVec;
+
+ /* This step should not erase anything. */
+ REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, true));
+ REQUIRE(originalVec == tensorVec);
+ }
+
+ SECTION("Right context erase")
+ {
+ arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+
+ std::vector <int8_t> tensorVec;
+ TfLiteTensor tensor = GetTestTensor<int8_t>(
+ tensorShape, 100, tensorVec);
+
+ /* Copy elements to compare later. */
+ std::vector <int8_t> originalVec = tensorVec;
+
+ /* This step should erase the right context only. */
+ REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+ REQUIRE(originalVec != tensorVec);
+
+ /* The last ctxLen * 10 elements should be gone. */
+ for (size_t i = 0; i < ctxLen; ++i) {
+ for (size_t j = 0; j < nCols; ++j) {
+ /* Check right context elements are zeroed. */
+ if (j == blankTokenIdx) {
+ CHECK(tensorVec[(ctxLen + innerLen) * nCols + i*nCols + j] == 1);
+ } else {
+ CHECK(tensorVec[(ctxLen + innerLen) * nCols + i*nCols + j] == 0);
+ }
+
+ /* Check left context is preserved. */
+ CHECK(tensorVec[i*nCols + j] == originalVec[i*nCols + j]);
+ }
+ }
+
+ /* Check inner elements are preserved. */
+ for (size_t i = ctxLen * nCols; i < (ctxLen + innerLen) * nCols; ++i) {
+ CHECK(tensorVec[i] == originalVec[i]);
+ }
+ }
+
+ SECTION("Left and right context erase")
+ {
+ arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+
+ std::vector <int8_t> tensorVec;
+ TfLiteTensor tensor = GetTestTensor<int8_t>(
+ tensorShape, 100, tensorVec);
+
+ /* Copy elements to compare later. */
+ std::vector <int8_t> originalVec = tensorVec;
+
+ /* This step should erase right context. */
+ REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+
+ /* Calling it the second time should erase the left context. */
+ REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+
+ REQUIRE(originalVec != tensorVec);
+
+ /* The first and last ctxLen * 10 elements should be gone. */
+ for (size_t i = 0; i < ctxLen; ++i) {
+ for (size_t j = 0; j < nCols; ++j) {
+ /* Check left and right context elements are zeroed. */
+ if (j == blankTokenIdx) {
+ CHECK(tensorVec[(ctxLen + innerLen) * nCols + i*nCols + j] == 1);
+ CHECK(tensorVec[i*nCols + j] == 1);
+ } else {
+ CHECK(tensorVec[(ctxLen + innerLen) * nCols + i*nCols + j] == 0);
+ CHECK(tensorVec[i*nCols + j] == 0);
+ }
+ }
+ }
+
+ /* Check inner elements are preserved. */
+ for (size_t i = ctxLen * nCols; i < (ctxLen + innerLen) * nCols; ++i) {
+ /* Check left context is preserved. */
+ CHECK(tensorVec[i] == originalVec[i]);
+ }
+ }
+
+ SECTION("Try left context erase")
+ {
+ /* Should not be able to erase the left context if it is the first iteration. */
+ arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+
+ std::vector <int8_t> tensorVec;
+ TfLiteTensor tensor = GetTestTensor<int8_t>(
+ tensorShape, 100, tensorVec);
+
+ /* Copy elements to compare later. */
+ std::vector <int8_t> originalVec = tensorVec;
+
+ /* Calling it the second time should erase the left context. */
+ REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, true));
+
+ REQUIRE(originalVec == tensorVec);
+ }
+}
diff --git a/tests/use_case/asr/Wav2LetterPreprocessingTest.cc b/tests/use_case/asr/Wav2LetterPreprocessingTest.cc
new file mode 100644
index 0000000..1391011
--- /dev/null
+++ b/tests/use_case/asr/Wav2LetterPreprocessingTest.cc
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterPreprocess.hpp"
+
+#include <limits>
+#include <algorithm>
+#include <catch.hpp>
+
+constexpr uint32_t numMfccFeatures = 13;
+constexpr uint32_t numMfccVectors = 10;
+
+/* Test vector output: generated using test-asr-preprocessing.py. */
+int8_t expectedResult[numMfccVectors][numMfccFeatures * 3] = {
+ /* Feature vec 0. */
+ -32, 4, -9, -8, -10, -10, -11, -11, -11, -11, -12, -11, -11, /* MFCCs. */
+ -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, /* Delta 1. */
+ -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, /* Delta 2. */
+
+ /* Feature vec 1. */
+ -31, 4, -9, -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,
+ -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+ -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+ /* Feature vec 2. */
+ -31, 4, -9, -9, -10, -10, -11, -11, -11, -11, -12, -12, -12,
+ -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+ -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+ /* Feature vec 3. */
+ -31, 4, -9, -9, -10, -10, -11, -11, -11, -11, -11, -12, -12,
+ -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+ -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+ /* Feature vec 4 : this should have valid delta 1 and delta 2. */
+ -31, 4, -9, -9, -10, -10, -11, -11, -11, -11, -11, -12, -12,
+ -38, -29, -9, 1, -2, -7, -8, -8, -12, -16, -14, -5, 5,
+ -68, -50, -13, 5, 0, -9, -9, -8, -13, -20, -19, -3, 15,
+
+ /* Feature vec 5 : this should have valid delta 1 and delta 2. */
+ -31, 4, -9, -8, -10, -10, -11, -11, -11, -11, -11, -12, -12,
+ -62, -45, -11, 5, 0, -8, -9, -8, -12, -19, -17, -3, 13,
+ -27, -22, -13, -9, -11, -12, -12, -11, -11, -13, -13, -10, -6,
+
+ /* Feature vec 6. */
+ -31, 4, -9, -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,
+ -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+ -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+ /* Feature vec 7. */
+ -32, 4, -9, -8, -10, -10, -11, -11, -11, -12, -12, -11, -11,
+ -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+ -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+ /* Feature vec 8. */
+ -32, 4, -9, -8, -10, -10, -11, -11, -11, -12, -12, -11, -11,
+ -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+ -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+ /* Feature vec 9. */
+ -31, 4, -9, -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,
+ -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+ -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10
+};
+
+void PopulateTestWavVector(std::vector<int16_t>& vec)
+{
+ constexpr int int16max = std::numeric_limits<int16_t>::max();
+ int val = 0;
+ for (size_t i = 0; i < vec.size(); ++i, ++val) {
+
+ /* We want a differential filter response from both - order 1
+ * and 2 => Don't have a linear signal here - we use a signal
+ * using squares for example. Alternate sign flips might work
+ * just as well and will be computationally less work! */
+ int valsq = val * val;
+ if (valsq > int16max) {
+ val = 0;
+ valsq = 0;
+ }
+ vec[i] = valsq;
+ }
+}
+
+TEST_CASE("Preprocessing calculation INT8")
+{
+ /* Initialise the HAL and platform. */
+ hal_platform platform;
+ data_acq_module data_acq;
+ data_psn_module data_psn;
+ platform_timer timer;
+ hal_init(&platform, &data_acq, &data_psn, &timer);
+ hal_platform_init(&platform);
+
+ /* Constants. */
+ const uint32_t windowLen = 512;
+ const uint32_t windowStride = 160;
+ const int dimArray[] = {3, 1, numMfccFeatures * 3, numMfccVectors};
+ const float quantScale = 0.1410219967365265;
+ const int quantOffset = -11;
+
+ /* Test wav memory. */
+ std::vector <int16_t> testWav((windowStride * numMfccVectors) +
+ (windowLen - windowStride));
+
+ /* Populate with dummy input. */
+ PopulateTestWavVector(testWav);
+
+ /* Allocate mem for tensor. */
+ std::vector<int8_t> tensorVec(dimArray[1]*dimArray[2]*dimArray[3]);
+
+ /* Initialise dimensions and the test tensor. */
+ TfLiteIntArray* dims= tflite::testing::IntArrayFromInts(dimArray);
+ TfLiteTensor tensor = tflite::testing::CreateQuantizedTensor(
+ tensorVec.data(), dims, quantScale, quantOffset, "preprocessedInput");
+
+ /* Initialise pre-processing module. */
+ arm::app::audio::asr::Preprocess prep{
+ numMfccFeatures, windowLen, windowStride, numMfccVectors};
+
+ /* Invoke pre-processing. */
+ REQUIRE(prep.Invoke(testWav.data(), testWav.size(), &tensor));
+
+ /* Wrap the tensor with a std::vector for ease. */
+ int8_t * tensorData = tflite::GetTensorData<int8_t>(&tensor);
+ std::vector <int8_t> vecResults =
+ std::vector<int8_t>(tensorData, tensorData + tensor.bytes);
+
+ /* Check sizes. */
+ REQUIRE(vecResults.size() == sizeof(expectedResult));
+
+ /* Check that the elements have been calculated correctly. */
+ for (uint32_t j = 0; j < numMfccVectors; ++j) {
+ for (uint32_t i = 0; i < numMfccFeatures * 3; ++i) {
+ size_t tensorIdx = (j * numMfccFeatures * 3) + i;
+ CHECK(vecResults[tensorIdx] == expectedResult[j][i]);
+ }
+ }
+}
diff --git a/tests/use_case/img_class/ImgClassTests.cc b/tests/use_case/img_class/ImgClassTests.cc
new file mode 100644
index 0000000..09f82da
--- /dev/null
+++ b/tests/use_case/img_class/ImgClassTests.cc
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define CATCH_CONFIG_MAIN
+#include <catch.hpp>
diff --git a/tests/use_case/img_class/ImgClassificationUCTest.cc b/tests/use_case/img_class/ImgClassificationUCTest.cc
new file mode 100644
index 0000000..abfcc44
--- /dev/null
+++ b/tests/use_case/img_class/ImgClassificationUCTest.cc
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "ClassificationResult.hpp"
+#include "Classifier.hpp"
+#include "hal.h"
+#include "Labels.hpp"
+#include "MobileNetModel.hpp"
+#include "UseCaseHandler.hpp"
+#include "UseCaseCommonUtils.hpp"
+
+#include <catch.hpp>
+
+TEST_CASE("Model info")
+{
+ /* Model wrapper object. */
+ arm::app::MobileNetModel model;
+
+ /* Load the model. */
+ REQUIRE(model.Init());
+
+ /* Instantiate application context. */
+ arm::app::ApplicationContext caseContext;
+
+ caseContext.Set<arm::app::Model&>("model", model);
+
+ REQUIRE(model.ShowModelInfoHandler());
+}
+
+
+TEST_CASE("Inference by index", "[.]")
+{
+ hal_platform platform;
+ data_acq_module data_acq;
+ data_psn_module data_psn;
+ platform_timer timer;
+
+ /* Initialise the HAL and platform. */
+ hal_init(&platform, &data_acq, &data_psn, &timer);
+ hal_platform_init(&platform);
+
+ /* Model wrapper object. */
+ arm::app::MobileNetModel model;
+
+ /* Load the model. */
+ REQUIRE(model.Init());
+
+ /* Instantiate application context. */
+ arm::app::ApplicationContext caseContext;
+
+ caseContext.Set<hal_platform&>("platform", platform);
+ caseContext.Set<arm::app::Model&>("model", model);
+ caseContext.Set<uint32_t>("imgIndex", 0);
+ arm::app::Classifier classifier; /* Classifier wrapper object. */
+ caseContext.Set<arm::app::Classifier&>("classifier", classifier);
+
+ std::vector <std::string> labels;
+ GetLabelsVector(labels);
+ caseContext.Set<const std::vector <std::string>&>("labels", labels);
+
+ REQUIRE(arm::app::ClassifyImageHandler(caseContext, 0, false));
+
+ auto results = caseContext.Get<std::vector<arm::app::ClassificationResult>>("results");
+
+ REQUIRE(results[0].m_labelIdx == 282);
+}
+
+
+TEST_CASE("Inference run all images", "[.]")
+{
+ hal_platform platform;
+ data_acq_module data_acq;
+ data_psn_module data_psn;
+ platform_timer timer;
+
+ /* Initialise the HAL and platform. */
+ hal_init(&platform, &data_acq, &data_psn, &timer);
+ hal_platform_init(&platform);
+
+ /* Model wrapper object. */
+ arm::app::MobileNetModel model;
+
+ /* Load the model. */
+ REQUIRE(model.Init());
+
+ /* Instantiate application context. */
+ arm::app::ApplicationContext caseContext;
+
+ caseContext.Set<hal_platform&>("platform", platform);
+ caseContext.Set<arm::app::Model&>("model", model);
+ caseContext.Set<uint32_t>("imgIndex", 0);
+ arm::app::Classifier classifier; /* classifier wrapper object. */
+ caseContext.Set<arm::app::Classifier&>("classifier", classifier);
+
+ std::vector <std::string> labels;
+ GetLabelsVector(labels);
+ caseContext.Set<const std::vector <std::string>&>("labels", labels);
+
+ REQUIRE(arm::app::ClassifyImageHandler(caseContext, 0, true));
+}
+
+
+TEST_CASE("List all images")
+{
+ hal_platform platform;
+ data_acq_module data_acq;
+ data_psn_module data_psn;
+ platform_timer timer;
+
+ /* Initialise the HAL and platform. */
+ hal_init(&platform, &data_acq, &data_psn, &timer);
+ hal_platform_init(&platform);
+
+ /* Model wrapper object. */
+ arm::app::MobileNetModel model;
+
+ /* Load the model. */
+ REQUIRE(model.Init());
+
+ /* Instantiate application context. */
+ arm::app::ApplicationContext caseContext;
+
+ caseContext.Set<hal_platform&>("platform", platform);
+ caseContext.Set<arm::app::Model&>("model", model);
+
+ REQUIRE(arm::app::ListFilesHandler(caseContext));
+} \ No newline at end of file
diff --git a/tests/use_case/img_class/InferenceTestMobilenetV2.cc b/tests/use_case/img_class/InferenceTestMobilenetV2.cc
new file mode 100644
index 0000000..698382f
--- /dev/null
+++ b/tests/use_case/img_class/InferenceTestMobilenetV2.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "hal.h"
+#include "ImageUtils.hpp"
+#include "MobileNetModel.hpp"
+#include "TensorFlowLiteMicro.hpp"
+#include "TestData_img_class.hpp"
+
+#include <catch.hpp>
+
+
+bool RunInference(arm::app::Model& model, const uint8_t imageData[])
+{
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
+ REQUIRE(inputTensor);
+
+ const size_t copySz = inputTensor->bytes < IFM_DATA_SIZE ?
+ inputTensor->bytes :
+ IFM_DATA_SIZE;
+ memcpy(inputTensor->data.data, imageData, copySz);
+
+ if(model.IsDataSigned()){
+ convertImgIoInt8(inputTensor->data.data, copySz);
+ }
+
+ return model.RunInference();
+}
+
+template<typename T>
+void TestInference(int imageIdx, arm::app::Model& model, T tolerance) {
+ auto image = get_ifm_data_array(imageIdx);
+ auto goldenFV = get_ofm_data_array(imageIdx);
+
+ REQUIRE(RunInference(model, image));
+
+ TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+
+ REQUIRE(outputTensor);
+ REQUIRE(outputTensor->bytes == OFM_DATA_SIZE);
+ auto tensorData = tflite::GetTensorData<T>(outputTensor);
+ REQUIRE(tensorData);
+
+ for (size_t i = 0; i < outputTensor->bytes; i++) {
+ REQUIRE((int)tensorData[i] == Approx((int)((T)goldenFV[i])).epsilon(tolerance));
+ }
+}
+
+
+TEST_CASE("Running inference with TensorFlow Lite Micro and MobileNeV2 Uint8", "[MobileNetV2]")
+{
+ SECTION("Executing inferences sequentially")
+ {
+ arm::app::MobileNetModel model{};
+
+ REQUIRE_FALSE(model.IsInited());
+ REQUIRE(model.Init());
+ REQUIRE(model.IsInited());
+
+ for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
+ TestInference<uint8_t>(i, model, 1);
+ }
+ }
+
+ for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
+ DYNAMIC_SECTION("Executing inference with re-init")
+ {
+ arm::app::MobileNetModel model{};
+
+ REQUIRE_FALSE(model.IsInited());
+ REQUIRE(model.Init());
+ REQUIRE(model.IsInited());
+
+ TestInference<uint8_t>(i, model, 1);
+ }
+ }
+}
diff --git a/tests/use_case/kws/InferenceTestDSCNN.cc b/tests/use_case/kws/InferenceTestDSCNN.cc
new file mode 100644
index 0000000..06358a4
--- /dev/null
+++ b/tests/use_case/kws/InferenceTestDSCNN.cc
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DsCnnModel.hpp"
+#include "hal.h"
+#include "TestData_kws.hpp"
+#include "TensorFlowLiteMicro.hpp"
+
+#include <catch.hpp>
+#include <random>
+
+bool RunInference(arm::app::Model& model, const int8_t vec[])
+{
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
+ REQUIRE(inputTensor);
+
+ const size_t copySz = inputTensor->bytes < IFM_DATA_SIZE ?
+ inputTensor->bytes :
+ IFM_DATA_SIZE;
+ memcpy(inputTensor->data.data, vec, copySz);
+
+ return model.RunInference();
+}
+
+bool RunInferenceRandom(arm::app::Model& model)
+{
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
+ REQUIRE(inputTensor);
+
+ std::random_device rndDevice;
+ std::mt19937 mersenneGen{rndDevice()};
+ std::uniform_int_distribution<short> dist {-128, 127};
+
+ auto gen = [&dist, &mersenneGen](){
+ return dist(mersenneGen);
+ };
+
+ std::vector<int8_t> randomAudio(inputTensor->bytes);
+ std::generate(std::begin(randomAudio), std::end(randomAudio), gen);
+
+ REQUIRE(RunInference(model, randomAudio.data()));
+ return true;
+}
+
+template<typename T>
+void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::Model& model)
+{
+ REQUIRE(RunInference(model, input_goldenFV));
+
+ TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+
+ REQUIRE(outputTensor);
+ REQUIRE(outputTensor->bytes == OFM_DATA_SIZE);
+ auto tensorData = tflite::GetTensorData<T>(outputTensor);
+ REQUIRE(tensorData);
+
+ for (size_t i = 0; i < outputTensor->bytes; i++) {
+ REQUIRE((int)tensorData[i] == (int)((T)output_goldenFV[i]));
+ }
+}
+
+TEST_CASE("Running random inference with TensorFlow Lite Micro and DsCnnModel Int8", "[DS_CNN]")
+{
+ arm::app::DsCnnModel model{};
+
+ REQUIRE_FALSE(model.IsInited());
+ REQUIRE(model.Init());
+ REQUIRE(model.IsInited());
+
+ REQUIRE(RunInferenceRandom(model));
+}
+
+TEST_CASE("Running inference with TensorFlow Lite Micro and DsCnnModel Uint8", "[DS_CNN]")
+{
+ for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
+ const int8_t* input_goldenFV = get_ifm_data_array(i);;
+ const int8_t* output_goldenFV = get_ofm_data_array(i);
+
+ DYNAMIC_SECTION("Executing inference with re-init")
+ {
+ arm::app::DsCnnModel model{};
+
+ REQUIRE_FALSE(model.IsInited());
+ REQUIRE(model.Init());
+ REQUIRE(model.IsInited());
+
+ TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
+
+ }
+ }
+}
diff --git a/tests/use_case/kws/KWSHandlerTest.cc b/tests/use_case/kws/KWSHandlerTest.cc
new file mode 100644
index 0000000..dee2f6f
--- /dev/null
+++ b/tests/use_case/kws/KWSHandlerTest.cc
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <catch.hpp>
+#include "DsCnnModel.hpp"
+#include "hal.h"
+
+#include "KwsResult.hpp"
+#include "Labels.hpp"
+#include "UseCaseHandler.hpp"
+#include "Classifier.hpp"
+#include "UseCaseCommonUtils.hpp"
+
+TEST_CASE("Model info")
+{
+ /* Model wrapper object. */
+ arm::app::DsCnnModel model;
+
+ /* Load the model. */
+ REQUIRE(model.Init());
+
+ /* Instantiate application context. */
+ arm::app::ApplicationContext caseContext;
+
+ caseContext.Set<arm::app::Model&>("model", model);
+
+ REQUIRE(model.ShowModelInfoHandler());
+}
+
+
+TEST_CASE("Inference by index")
+{
+ hal_platform platform;
+ data_acq_module data_acq;
+ data_psn_module data_psn;
+ platform_timer timer;
+
+ /* Initialise the HAL and platform. */
+ hal_init(&platform, &data_acq, &data_psn, &timer);
+ hal_platform_init(&platform);
+
+ /* Model wrapper object. */
+ arm::app::DsCnnModel model;
+
+ /* Load the model. */
+ REQUIRE(model.Init());
+
+ /* Instantiate application context. */
+ arm::app::ApplicationContext caseContext;
+ caseContext.Set<hal_platform&>("platform", platform);
+ caseContext.Set<arm::app::Model&>("model", model);
+ caseContext.Set<int>("frameLength", g_FrameLength); /* 640 sample length for DSCNN. */
+ caseContext.Set<int>("frameStride", g_FrameStride); /* 320 sample stride for DSCNN. */
+ caseContext.Set<float>("scoreThreshold", 0.5); /* Normalised score threshold. */
+
+ arm::app::Classifier classifier; /* classifier wrapper object. */
+ caseContext.Set<arm::app::Classifier&>("classifier", classifier);
+
+ auto checker = [&](uint32_t audioIndex, std::vector<uint32_t> labelIndex)
+ {
+ caseContext.Set<uint32_t>("audioIndex", audioIndex);
+
+ std::vector<std::string> labels;
+ GetLabelsVector(labels);
+ caseContext.Set<const std::vector<std::string> &>("labels", labels);
+
+ REQUIRE(arm::app::ClassifyAudioHandler(caseContext, audioIndex, false));
+ REQUIRE(caseContext.Has("results"));
+
+ auto results = caseContext.Get<std::vector<arm::app::kws::KwsResult>>("results");
+
+ REQUIRE(results.size() == labelIndex.size());
+
+ for (size_t i = 0; i < results.size(); i++ ) {
+ REQUIRE(results[i].m_resultVec.size());
+ REQUIRE(results[i].m_resultVec[0].m_labelIdx == labelIndex[i]);
+ }
+
+ };
+
+ SECTION("Index = 0, short clip down")
+ {
+ /* Result: down. */
+ checker(0, {5});
+ }
+
+ SECTION("Index = 1, long clip right->left->up")
+ {
+ /* Result: right->right->left->up->up. */
+ checker(1, {7, 1, 6, 4, 4});
+ }
+
+ SECTION("Index = 2, short clip yes")
+ {
+ /* Result: yes. */
+ checker(2, {2});
+ }
+
+ SECTION("Index = 3, long clip yes->no->go->stop")
+ {
+ /* Result: yes->go->no->go->go->go->stop. */
+ checker(3, {2, 11, 3, 11, 11, 11, 10});
+ }
+}
+
+
+TEST_CASE("Inference run all clips")
+{
+ hal_platform platform;
+ data_acq_module data_acq;
+ data_psn_module data_psn;
+ platform_timer timer;
+
+ /* Initialise the HAL and platform. */
+ hal_init(&platform, &data_acq, &data_psn, &timer);
+ hal_platform_init(&platform);
+
+ /* Model wrapper object. */
+ arm::app::DsCnnModel model;
+
+ /* Load the model. */
+ REQUIRE(model.Init());
+
+ /* Instantiate application context. */
+ arm::app::ApplicationContext caseContext;
+
+ caseContext.Set<hal_platform&>("platform", platform);
+ caseContext.Set<arm::app::Model&>("model", model);
+ caseContext.Set<uint32_t>("clipIndex", 0);
+ caseContext.Set<int>("frameLength", g_FrameLength); /* 640 sample length for DSCNN. */
+ caseContext.Set<int>("frameStride", g_FrameStride); /* 320 sample stride for DSCNN. */
+ caseContext.Set<float>("scoreThreshold", 0.9); /* Normalised score threshold. */
+ arm::app::Classifier classifier; /* classifier wrapper object. */
+ caseContext.Set<arm::app::Classifier&>("classifier", classifier);
+
+ std::vector <std::string> labels;
+ GetLabelsVector(labels);
+ caseContext.Set<const std::vector <std::string>&>("labels", labels);
+ REQUIRE(arm::app::ClassifyAudioHandler(caseContext, 0, true));
+}
+
+
+TEST_CASE("List all audio clips")
+{
+ hal_platform platform;
+ data_acq_module data_acq;
+ data_psn_module data_psn;
+ platform_timer timer;
+
+ /* Initialise the HAL and platform. */
+ hal_init(&platform, &data_acq, &data_psn, &timer);
+ hal_platform_init(&platform);
+
+ /* Model wrapper object. */
+ arm::app::DsCnnModel model;
+
+ /* Load the model. */
+ REQUIRE(model.Init());
+
+ /* Instantiate application context. */
+ arm::app::ApplicationContext caseContext;
+
+ caseContext.Set<hal_platform&>("platform", platform);
+ caseContext.Set<arm::app::Model&>("model", model);
+
+ REQUIRE(arm::app::ListFilesHandler(caseContext));
+} \ No newline at end of file
diff --git a/tests/use_case/kws/KwsTests.cc b/tests/use_case/kws/KwsTests.cc
new file mode 100644
index 0000000..09f82da
--- /dev/null
+++ b/tests/use_case/kws/KwsTests.cc
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define CATCH_CONFIG_MAIN
+#include <catch.hpp>
diff --git a/tests/use_case/kws/MfccTests.cc b/tests/use_case/kws/MfccTests.cc
new file mode 100644
index 0000000..407861f
--- /dev/null
+++ b/tests/use_case/kws/MfccTests.cc
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DsCnnMfcc.hpp"
+
+#include <algorithm>
+#include <catch.hpp>
+#include <limits>
+
+/* First 640 samples from yes.wav. */
+const std::vector<int16_t> testWav = std::vector<int16_t>{
+ 139, 143, 164, 163, 157, 156, 151, 148, 172, 171,
+ 165, 169, 149, 142, 145, 147, 166, 146, 112, 132,
+ 132, 136, 165, 176, 176, 152, 138, 158, 179, 185,
+ 183, 148, 121, 130, 167, 204, 163, 132, 165, 184,
+ 193, 205, 210, 204, 195, 178, 168, 197, 207, 201,
+ 197, 177, 185, 196, 191, 198, 196, 183, 193, 181,
+ 157, 170, 167, 159, 164, 152, 146, 167, 180, 171,
+ 194, 232, 204, 173, 171, 172, 184, 169, 175, 199,
+ 200, 195, 185, 214, 214, 193, 196, 191, 204, 191,
+ 172, 187, 183, 192, 203, 172, 182, 228, 232, 205,
+ 177, 174, 191, 210, 210, 211, 197, 177, 198, 217,
+ 233, 236, 203, 191, 169, 145, 149, 161, 198, 206,
+ 176, 137, 142, 181, 200, 215, 201, 188, 166, 162,
+ 184, 155, 135, 132, 126, 142, 169, 184, 172, 156,
+ 132, 119, 150, 147, 154, 160, 125, 130, 137, 154,
+ 161, 168, 195, 182, 160, 134, 138, 146, 130, 120,
+ 101, 122, 137, 118, 117, 131, 145, 140, 146, 148,
+ 148, 168, 159, 134, 114, 114, 130, 147, 147, 134,
+ 125, 98, 107, 127, 99, 79, 84, 107, 117, 114,
+ 93, 92, 127, 112, 109, 110, 96, 118, 97, 87,
+ 110, 95, 128, 153, 147, 165, 146, 106, 101, 137,
+ 139, 96, 73, 90, 91, 51, 69, 102, 100, 103,
+ 96, 101, 123, 107, 82, 89, 118, 127, 99, 100,
+ 111, 97, 111, 123, 106, 121, 133, 103, 100, 88,
+ 85, 111, 114, 125, 102, 91, 97, 84, 139, 157,
+ 109, 66, 72, 129, 111, 90, 127, 126, 101, 109,
+ 142, 138, 129, 159, 140, 80, 74, 78, 76, 98,
+ 68, 42, 106, 143, 112, 102, 115, 114, 82, 75,
+ 92, 80, 110, 114, 66, 86, 119, 101, 101, 103,
+ 118, 145, 85, 40, 62, 88, 95, 87, 73, 64,
+ 86, 71, 71, 105, 80, 73, 96, 92, 85, 90,
+ 81, 86, 105, 100, 89, 78, 102, 114, 95, 98,
+ 69, 70, 108, 112, 111, 90, 104, 137, 143, 160,
+ 145, 121, 98, 86, 91, 87, 115, 123, 109, 99,
+ 85, 120, 131, 116, 125, 144, 153, 111, 98, 110,
+ 93, 89, 101, 137, 155, 142, 108, 94, 136, 145,
+ 129, 129, 122, 109, 90, 76, 81, 110, 119, 96,
+ 95, 102, 105, 111, 90, 89, 111, 115, 86, 51,
+ 107, 140, 105, 105, 110, 142, 125, 76, 75, 69,
+ 65, 52, 61, 69, 55, 42, 47, 58, 37, 35,
+ 24, 20, 44, 22, 16, 26, 6, 3, 4, 23,
+ 60, 51, 30, 12, 24, 31, -9, -16, -13, 13,
+ 19, 9, 37, 55, 70, 36, 23, 57, 45, 33,
+ 50, 59, 18, 11, 62, 74, 52, 8, -3, 26,
+ 51, 48, -5, -9, 12, -7, -12, -5, 28, 41,
+ -2, -30, -13, 31, 33, -12, -22, -8, -15, -17,
+ 2, -6, -25, -27, -24, -8, 4, -9, -52, -47,
+ -9, -32, -45, -5, 41, 15, -32, -14, 2, -1,
+ -10, -30, -32, -25, -21, -17, -14, 8, -4, -13,
+ 34, 18, -36, -38, -18, -19, -28, -17, -14, -16,
+ -2, -20, -27, 12, 11, -17, -33, -12, -22, -64,
+ -42, -26, -23, -22, -37, -51, -53, -30, -18, -48,
+ -69, -38, -54, -96, -72, -49, -50, -57, -41, -22,
+ -43, -64, -54, -23, -49, -69, -41, -44, -42, -49,
+ -40, -26, -54, -50, -38, -49, -70, -94, -89, -69,
+ -56, -65, -71, -47, -39, -49, -79, -91, -56, -46,
+ -62, -86, -64, -32, -47, -50, -71, -77, -65, -68,
+ -52, -51, -61, -67, -61, -81, -93, -52, -59, -62,
+ -51, -75, -76, -50, -32, -54, -68, -70, -43, 1,
+ -42, -92, -80, -41, -38, -79, -69, -49, -82, -122,
+ -93, -21, -24, -61, -70, -73, -62, -74, -69, -43,
+ -25, -15, -43, -23, -26, -69, -44, -12, 1, -51,
+ -78, -13, 3, -53, -105, -72, -24, -62, -66, -31,
+ -40, -65, -86, -64, -44, -55, -63, -61, -37, -41,
+};
+
+/* Golden audio ops mfcc output for the above wav. */
+const std::vector<float> testWavMfcc {
+ -22.67135, -0.61615, 2.07233, 0.58137, 1.01655, 0.85816, 0.46039, 0.03393, 1.16511, 0.0072,
+};
+
+arm::app::audio::DsCnnMFCC GetMFCCInstance() {
+ const int sampFreq = arm::app::audio::DsCnnMFCC::ms_defaultSamplingFreq;
+ const int frameLenMs = 40;
+ const int frameLenSamples = sampFreq * frameLenMs * 0.001;
+ const int numMfccFeats = 10;
+
+ return arm::app::audio::DsCnnMFCC(numMfccFeats, frameLenSamples);
+}
+
+template <class T>
+void TestQuantisedMFCC() {
+ const float quantScale = 1.1088106632232666;
+ const int quantOffset = 95;
+ std::vector<T> mfccOutput = GetMFCCInstance().MfccComputeQuant<T>(testWav, quantScale, quantOffset);
+
+ const long min_val = std::numeric_limits<T>::min();
+ const long max_val = std::numeric_limits<T>::max();
+
+ for (size_t i = 0; i < testWavMfcc.size(); ++i){
+ long TestWavMfcc = (std::lround((testWavMfcc[i] / quantScale) + quantOffset));
+ T quantizedTestWavMfcc = static_cast<T>(std::max(min_val, std::min(TestWavMfcc, max_val)));
+
+ REQUIRE(quantizedTestWavMfcc == Approx(mfccOutput[i]).margin(0));
+ }
+}
+template void TestQuantisedMFCC<int8_t>();
+template void TestQuantisedMFCC<uint8_t>();
+template void TestQuantisedMFCC<int16_t>();
+
+
+TEST_CASE("MFCC calculation test") {
+ hal_platform platform;
+ data_acq_module dataAcq;
+ data_psn_module dataPsn;
+ platform_timer timer;
+
+ /* Initialise the HAL and platform. */
+ hal_init(&platform, &dataAcq, &dataPsn, &timer);
+ hal_platform_init(&platform);
+
+ SECTION("FP32")
+ {
+ auto mfccOutput = GetMFCCInstance().MfccCompute(testWav);
+ REQUIRE_THAT( mfccOutput, Catch::Approx(testWavMfcc).margin(0.0001) );
+ }
+
+ SECTION("int8_t")
+ {
+ TestQuantisedMFCC<int8_t>();
+ }
+
+ SECTION("uint8_t")
+ {
+ TestQuantisedMFCC<uint8_t>();
+ }
+
+ SECTION("MFCC quant calculation test - int16_t")
+ {
+ TestQuantisedMFCC<int16_t>();
+ }
+} \ No newline at end of file
diff --git a/tests/use_case/kws_asr/InferenceTestDSCNN.cc b/tests/use_case/kws_asr/InferenceTestDSCNN.cc
new file mode 100644
index 0000000..f0e5c02
--- /dev/null
+++ b/tests/use_case/kws_asr/InferenceTestDSCNN.cc
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DsCnnModel.hpp"
+#include "hal.h"
+#include "TestData_kws.hpp"
+#include "TensorFlowLiteMicro.hpp"
+
+#include <catch.hpp>
+#include <random>
+
+namespace arm {
+namespace app {
+namespace kws {
+bool RunInference(arm::app::Model& model, const int8_t vec[])
+{
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
+ REQUIRE(inputTensor);
+
+ const size_t copySz = inputTensor->bytes < IFM_DATA_SIZE ?
+ inputTensor->bytes :
+ IFM_DATA_SIZE;
+ memcpy(inputTensor->data.data, vec, copySz);
+
+ return model.RunInference();
+}
+
+bool RunInferenceRandom(arm::app::Model& model)
+{
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
+ REQUIRE(inputTensor);
+
+ std::random_device rndDevice;
+ std::mt19937 mersenneGen{rndDevice()};
+ std::uniform_int_distribution<short> dist {-128, 127};
+
+ auto gen = [&dist, &mersenneGen](){
+ return dist(mersenneGen);
+ };
+
+ std::vector<int8_t> randomAudio(inputTensor->bytes);
+ std::generate(std::begin(randomAudio), std::end(randomAudio), gen);
+
+ REQUIRE(RunInference(model, randomAudio.data()));
+ return true;
+}
+
+template<typename T>
+void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::Model& model)
+{
+ REQUIRE(RunInference(model, input_goldenFV));
+
+ TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+
+ REQUIRE(outputTensor);
+ REQUIRE(outputTensor->bytes == OFM_DATA_SIZE);
+ auto tensorData = tflite::GetTensorData<T>(outputTensor);
+ REQUIRE(tensorData);
+
+ for (size_t i = 0; i < outputTensor->bytes; i++) {
+ REQUIRE((int)tensorData[i] == (int)((T)output_goldenFV[i]));
+ }
+}
+
+TEST_CASE("Running random inference with Tflu and DsCnnModel Int8", "[DS_CNN]")
+{
+ arm::app::DsCnnModel model{};
+
+ REQUIRE_FALSE(model.IsInited());
+ REQUIRE(model.Init());
+ REQUIRE(model.IsInited());
+
+ REQUIRE(RunInferenceRandom(model));
+}
+
+TEST_CASE("Running inference with Tflu and DsCnnModel Uint8", "[DS_CNN]")
+{
+ for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
+ const int8_t* input_goldenFV = get_ifm_data_array(i);
+ const int8_t* output_goldenFV = get_ofm_data_array(i);
+
+ DYNAMIC_SECTION("Executing inference with re-init")
+ {
+ arm::app::DsCnnModel model{};
+
+ REQUIRE_FALSE(model.IsInited());
+ REQUIRE(model.Init());
+ REQUIRE(model.IsInited());
+
+ TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
+
+ }
+ }
+}
+
+} //namespace
+} //namespace
+} //namespace
diff --git a/tests/use_case/kws_asr/InferenceTestWav2Letter.cc b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
new file mode 100644
index 0000000..ee63c2f
--- /dev/null
+++ b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "hal.h"
+#include "TensorFlowLiteMicro.hpp"
+#include "Wav2LetterModel.hpp"
+#include "TestData_asr.hpp"
+
+#include <catch.hpp>
+#include <random>
+
+namespace arm {
+namespace app {
+namespace asr {
+
+bool RunInference(arm::app::Model& model, const int8_t vec[], const size_t copySz)
+{
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
+ REQUIRE(inputTensor);
+
+ memcpy(inputTensor->data.data, vec, copySz);
+
+ return model.RunInference();
+}
+
+bool RunInferenceRandom(arm::app::Model& model)
+{
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
+ REQUIRE(inputTensor);
+
+ std::random_device rndDevice;
+ std::mt19937 mersenneGen{rndDevice()};
+ std::uniform_int_distribution<short> dist {-128, 127};
+
+ auto gen = [&dist, &mersenneGen](){
+ return dist(mersenneGen);
+ };
+
+ std::vector<int8_t> randomAudio(inputTensor->bytes);
+ std::generate(std::begin(randomAudio), std::end(randomAudio), gen);
+
+ REQUIRE(RunInference(model, randomAudio.data(), inputTensor->bytes));
+ return true;
+}
+
+/* Skip this test, Wav2LetterModel if not Vela optimized but only from ML-zoo will fail. */
+TEST_CASE("Running random inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter][.]")
+{
+ arm::app::Wav2LetterModel model{};
+
+ REQUIRE_FALSE(model.IsInited());
+ REQUIRE(model.Init());
+ REQUIRE(model.IsInited());
+
+ REQUIRE(RunInferenceRandom(model));
+}
+
+
+template<typename T>
+void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::Model& model)
+{
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
+ REQUIRE(inputTensor);
+
+ REQUIRE(RunInference(model, input_goldenFV, inputTensor->bytes));
+
+ TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+
+ REQUIRE(outputTensor);
+ REQUIRE(outputTensor->bytes == OFM_DATA_SIZE);
+ auto tensorData = tflite::GetTensorData<T>(outputTensor);
+ REQUIRE(tensorData);
+
+ for (size_t i = 0; i < outputTensor->bytes; i++) {
+ REQUIRE((int)tensorData[i] == (int)((T)output_goldenFV[i]));
+ }
+}
+
+TEST_CASE("Running inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter][.]")
+{
+ for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
+ auto input_goldenFV = get_ifm_data_array(i);;
+ auto output_goldenFV = get_ofm_data_array(i);
+
+ DYNAMIC_SECTION("Executing inference with re-init")
+ {
+ arm::app::Wav2LetterModel model{};
+
+ REQUIRE_FALSE(model.IsInited());
+ REQUIRE(model.Init());
+ REQUIRE(model.IsInited());
+
+ TestInference<int8_t>(input_goldenFV, output_goldenFV, model);
+
+ }
+ }
+}
+
+} //namespace
+} //namespace
+} //namespace
diff --git a/tests/use_case/kws_asr/InitModels.cc b/tests/use_case/kws_asr/InitModels.cc
new file mode 100644
index 0000000..770944d
--- /dev/null
+++ b/tests/use_case/kws_asr/InitModels.cc
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DsCnnModel.hpp"
+#include "Wav2LetterModel.hpp"
+
+#include <catch.hpp>
+
+/* Skip this test, Wav2LetterModel if not Vela optimized but only from ML-zoo will fail. */
+TEST_CASE("Init two Models", "[.]")
+{
+ arm::app::DsCnnModel model1;
+ arm::app::DsCnnModel model2;
+
+ /* Ideally we should load the wav2letter model here, but there is
+ * none available to run on native (ops not supported on unoptimised
+ * version). However, we can certainly create two instances of the
+ * same type of model to see if our tensor arena re-use works as
+ * intended.
+ *
+ * @TODO: uncomment this when this model can run on native pipeline. */
+ //arm::app::Wav2LetterModel model2; /* model2. */
+
+ /* Load/initialise the first model. */
+ REQUIRE(model1.Init());
+
+ /* Allocator instance should have been created. */
+ REQUIRE(nullptr != model1.GetAllocator());
+
+ /* Load the second model using the same allocator as model 1. */
+ REQUIRE(model2.Init(model1.GetAllocator()));
+
+ /* Make sure they point to the same allocator object. */
+ REQUIRE(model1.GetAllocator() == model2.GetAllocator());
+
+ /* Both models should report being initialised. */
+ REQUIRE(true == model1.IsInited());
+ REQUIRE(true == model2.IsInited());
+} \ No newline at end of file
diff --git a/tests/use_case/kws_asr/KwsAsrTests.cc b/tests/use_case/kws_asr/KwsAsrTests.cc
new file mode 100644
index 0000000..09f82da
--- /dev/null
+++ b/tests/use_case/kws_asr/KwsAsrTests.cc
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define CATCH_CONFIG_MAIN
+#include <catch.hpp>
diff --git a/tests/use_case/kws_asr/MfccTests.cc b/tests/use_case/kws_asr/MfccTests.cc
new file mode 100644
index 0000000..9509519
--- /dev/null
+++ b/tests/use_case/kws_asr/MfccTests.cc
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DsCnnMfcc.hpp"
+
+#include <algorithm>
+#include <catch.hpp>
+#include <limits>
+
+/* First 640 samples from yes.wav. */
+const std::vector<int16_t> testWav = std::vector<int16_t>{
+ 139, 143, 164, 163, 157, 156, 151, 148, 172, 171,
+ 165, 169, 149, 142, 145, 147, 166, 146, 112, 132,
+ 132, 136, 165, 176, 176, 152, 138, 158, 179, 185,
+ 183, 148, 121, 130, 167, 204, 163, 132, 165, 184,
+ 193, 205, 210, 204, 195, 178, 168, 197, 207, 201,
+ 197, 177, 185, 196, 191, 198, 196, 183, 193, 181,
+ 157, 170, 167, 159, 164, 152, 146, 167, 180, 171,
+ 194, 232, 204, 173, 171, 172, 184, 169, 175, 199,
+ 200, 195, 185, 214, 214, 193, 196, 191, 204, 191,
+ 172, 187, 183, 192, 203, 172, 182, 228, 232, 205,
+ 177, 174, 191, 210, 210, 211, 197, 177, 198, 217,
+ 233, 236, 203, 191, 169, 145, 149, 161, 198, 206,
+ 176, 137, 142, 181, 200, 215, 201, 188, 166, 162,
+ 184, 155, 135, 132, 126, 142, 169, 184, 172, 156,
+ 132, 119, 150, 147, 154, 160, 125, 130, 137, 154,
+ 161, 168, 195, 182, 160, 134, 138, 146, 130, 120,
+ 101, 122, 137, 118, 117, 131, 145, 140, 146, 148,
+ 148, 168, 159, 134, 114, 114, 130, 147, 147, 134,
+ 125, 98, 107, 127, 99, 79, 84, 107, 117, 114,
+ 93, 92, 127, 112, 109, 110, 96, 118, 97, 87,
+ 110, 95, 128, 153, 147, 165, 146, 106, 101, 137,
+ 139, 96, 73, 90, 91, 51, 69, 102, 100, 103,
+ 96, 101, 123, 107, 82, 89, 118, 127, 99, 100,
+ 111, 97, 111, 123, 106, 121, 133, 103, 100, 88,
+ 85, 111, 114, 125, 102, 91, 97, 84, 139, 157,
+ 109, 66, 72, 129, 111, 90, 127, 126, 101, 109,
+ 142, 138, 129, 159, 140, 80, 74, 78, 76, 98,
+ 68, 42, 106, 143, 112, 102, 115, 114, 82, 75,
+ 92, 80, 110, 114, 66, 86, 119, 101, 101, 103,
+ 118, 145, 85, 40, 62, 88, 95, 87, 73, 64,
+ 86, 71, 71, 105, 80, 73, 96, 92, 85, 90,
+ 81, 86, 105, 100, 89, 78, 102, 114, 95, 98,
+ 69, 70, 108, 112, 111, 90, 104, 137, 143, 160,
+ 145, 121, 98, 86, 91, 87, 115, 123, 109, 99,
+ 85, 120, 131, 116, 125, 144, 153, 111, 98, 110,
+ 93, 89, 101, 137, 155, 142, 108, 94, 136, 145,
+ 129, 129, 122, 109, 90, 76, 81, 110, 119, 96,
+ 95, 102, 105, 111, 90, 89, 111, 115, 86, 51,
+ 107, 140, 105, 105, 110, 142, 125, 76, 75, 69,
+ 65, 52, 61, 69, 55, 42, 47, 58, 37, 35,
+ 24, 20, 44, 22, 16, 26, 6, 3, 4, 23,
+ 60, 51, 30, 12, 24, 31, -9, -16, -13, 13,
+ 19, 9, 37, 55, 70, 36, 23, 57, 45, 33,
+ 50, 59, 18, 11, 62, 74, 52, 8, -3, 26,
+ 51, 48, -5, -9, 12, -7, -12, -5, 28, 41,
+ -2, -30, -13, 31, 33, -12, -22, -8, -15, -17,
+ 2, -6, -25, -27, -24, -8, 4, -9, -52, -47,
+ -9, -32, -45, -5, 41, 15, -32, -14, 2, -1,
+ -10, -30, -32, -25, -21, -17, -14, 8, -4, -13,
+ 34, 18, -36, -38, -18, -19, -28, -17, -14, -16,
+ -2, -20, -27, 12, 11, -17, -33, -12, -22, -64,
+ -42, -26, -23, -22, -37, -51, -53, -30, -18, -48,
+ -69, -38, -54, -96, -72, -49, -50, -57, -41, -22,
+ -43, -64, -54, -23, -49, -69, -41, -44, -42, -49,
+ -40, -26, -54, -50, -38, -49, -70, -94, -89, -69,
+ -56, -65, -71, -47, -39, -49, -79, -91, -56, -46,
+ -62, -86, -64, -32, -47, -50, -71, -77, -65, -68,
+ -52, -51, -61, -67, -61, -81, -93, -52, -59, -62,
+ -51, -75, -76, -50, -32, -54, -68, -70, -43, 1,
+ -42, -92, -80, -41, -38, -79, -69, -49, -82, -122,
+ -93, -21, -24, -61, -70, -73, -62, -74, -69, -43,
+ -25, -15, -43, -23, -26, -69, -44, -12, 1, -51,
+ -78, -13, 3, -53, -105, -72, -24, -62, -66, -31,
+ -40, -65, -86, -64, -44, -55, -63, -61, -37, -41,
+};
+
+/* Golden audio ops mfcc output for the above wav. */
+const std::vector<float> testWavMfcc {
+ -22.67135, -0.61615, 2.07233, 0.58137, 1.01655, 0.85816, 0.46039, 0.03393, 1.16511, 0.0072,
+};
+
+arm::app::audio::DsCnnMFCC GetMFCCInstance() {
+ const int sampFreq = arm::app::audio::DsCnnMFCC::ms_defaultSamplingFreq;
+ const int frameLenMs = 40;
+ const int frameLenSamples = sampFreq * frameLenMs * 0.001;
+ const int numMfccFeats = 10;
+
+ return arm::app::audio::DsCnnMFCC(numMfccFeats, frameLenSamples);
+}
+
+template <class T>
+void TestQuntisedMFCC() {
+ const float quantScale = 1.1088106632232666;
+ const int quantOffset = 95;
+ std::vector<T> mfccOutput = GetMFCCInstance().MfccComputeQuant<T>(testWav, quantScale, quantOffset);
+
+ const long min_val = std::numeric_limits<T>::min();
+ const long max_val = std::numeric_limits<T>::max();
+
+ for (size_t i = 0; i < testWavMfcc.size(); ++i){
+ long TestWavMfcc = (std::lround((testWavMfcc[i] / quantScale) + quantOffset));
+ T quantizedTestWavMfcc = static_cast<T>(std::max(min_val, std::min(TestWavMfcc, max_val)));
+
+ REQUIRE(quantizedTestWavMfcc == Approx(mfccOutput[i]).margin(0));
+ }
+}
+template void TestQuntisedMFCC<int8_t>();
+template void TestQuntisedMFCC<uint8_t>();
+template void TestQuntisedMFCC<int16_t>();
+
+TEST_CASE("MFCC calculation test")
+{
+ hal_platform platform;
+ data_acq_module dataAcq;
+ data_psn_module dataPsn;
+ platform_timer timer;
+
+ /* Initialise the HAL and platform. */
+ hal_init(&platform, &dataAcq, &dataPsn, &timer);
+ hal_platform_init(&platform);
+
+ SECTION("FP32")
+ {
+ auto mfccOutput = GetMFCCInstance().MfccCompute(testWav);
+ REQUIRE_THAT( mfccOutput, Catch::Approx( testWavMfcc ).margin(0.0001) );
+ }
+
+ SECTION("int8_t")
+ {
+ TestQuntisedMFCC<int8_t>();
+ }
+
+ SECTION("uint8_t")
+ {
+ TestQuntisedMFCC<uint8_t>();
+ }
+
+ SECTION("MFCC quant calculation test - int16_t")
+ {
+ TestQuntisedMFCC<int16_t>();
+ }
+} \ No newline at end of file
diff --git a/tests/use_case/kws_asr/Wav2LetterPostprocessingTest.cc b/tests/use_case/kws_asr/Wav2LetterPostprocessingTest.cc
new file mode 100644
index 0000000..6fd7df3
--- /dev/null
+++ b/tests/use_case/kws_asr/Wav2LetterPostprocessingTest.cc
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterPostprocess.hpp"
+#include "Wav2LetterModel.hpp"
+
+#include <algorithm>
+#include <catch.hpp>
+#include <limits>
+
+template <typename T>
+static TfLiteTensor GetTestTensor(std::vector <int>& shape,
+ T initVal,
+ std::vector<T>& vectorBuf)
+{
+ REQUIRE(0 != shape.size());
+
+ shape.insert(shape.begin(), shape.size());
+ uint32_t sizeInBytes = sizeof(T);
+ for (size_t i = 1; i < shape.size(); ++i) {
+ sizeInBytes *= shape[i];
+ }
+
+ /* Allocate mem. */
+ vectorBuf = std::vector<T>(sizeInBytes, initVal);
+ TfLiteIntArray* dims = tflite::testing::IntArrayFromInts(shape.data());
+ return tflite::testing::CreateQuantizedTensor(
+ vectorBuf.data(), dims,
+ 1, 0, "test-tensor");
+}
+
+TEST_CASE("Checking return value")
+{
+ SECTION("Mismatched post processing parameters and tensor size")
+ {
+ const uint32_t ctxLen = 5;
+ const uint32_t innerLen = 3;
+ arm::app::audio::asr::Postprocess post{ctxLen, innerLen, 0};
+
+ std::vector <int> tensorShape = {1, 1, 1, 13};
+ std::vector <int8_t> tensorVec;
+ TfLiteTensor tensor = GetTestTensor<int8_t>(
+ tensorShape, 100, tensorVec);
+ REQUIRE(false == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+ }
+
+ SECTION("Post processing succeeds")
+ {
+ const uint32_t ctxLen = 5;
+ const uint32_t innerLen = 3;
+ arm::app::audio::asr::Postprocess post{ctxLen, innerLen, 0};
+
+ std::vector <int> tensorShape = {1, 1, 13, 1};
+ std::vector <int8_t> tensorVec;
+ TfLiteTensor tensor = GetTestTensor<int8_t>(
+ tensorShape, 100, tensorVec);
+
+ /* Copy elements to compare later. */
+ std::vector <int8_t> originalVec = tensorVec;
+
+ /* This step should not erase anything. */
+ REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+ }
+}
+
+TEST_CASE("Postprocessing - erasing required elements")
+{
+ constexpr uint32_t ctxLen = 5;
+ constexpr uint32_t innerLen = 3;
+ constexpr uint32_t nRows = 2*ctxLen + innerLen;
+ constexpr uint32_t nCols = 10;
+ constexpr uint32_t blankTokenIdx = nCols - 1;
+ std::vector <int> tensorShape = {1, 1, nRows, nCols};
+
+ SECTION("First and last iteration")
+ {
+ arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+ std::vector <int8_t> tensorVec;
+ TfLiteTensor tensor = GetTestTensor<int8_t>(
+ tensorShape, 100, tensorVec);
+
+ /* Copy elements to compare later. */
+ std::vector <int8_t> originalVec = tensorVec;
+
+ /* This step should not erase anything. */
+ REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, true));
+ REQUIRE(originalVec == tensorVec);
+ }
+
+ SECTION("Right context erase")
+ {
+ arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+
+ std::vector <int8_t> tensorVec;
+ TfLiteTensor tensor = GetTestTensor<int8_t>(
+ tensorShape, 100, tensorVec);
+
+ /* Copy elements to compare later. */
+ std::vector <int8_t> originalVec = tensorVec;
+
+ /* This step should erase the right context only. */
+ REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+ REQUIRE(originalVec != tensorVec);
+
+ /* The last ctxLen * 10 elements should be gone. */
+ for (size_t i = 0; i < ctxLen; ++i) {
+ for (size_t j = 0; j < nCols; ++j) {
+ /* Check right context elements are zeroed. */
+ if (j == blankTokenIdx) {
+ CHECK(tensorVec[(ctxLen + innerLen) * nCols + i*nCols + j] == 1);
+ } else {
+ CHECK(tensorVec[(ctxLen + innerLen) * nCols + i*nCols + j] == 0);
+ }
+
+ /* Check left context is preserved. */
+ CHECK(tensorVec[i*nCols + j] == originalVec[i*nCols + j]);
+ }
+ }
+
+ /* Check inner elements are preserved. */
+ for (size_t i = ctxLen * nCols; i < (ctxLen + innerLen) * nCols; ++i) {
+ CHECK(tensorVec[i] == originalVec[i]);
+ }
+ }
+
+ SECTION("Left and right context erase")
+ {
+ arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+
+ std::vector <int8_t> tensorVec;
+ TfLiteTensor tensor = GetTestTensor<int8_t>(tensorShape, 100, tensorVec);
+
+ /* Copy elements to compare later. */
+ std::vector <int8_t> originalVec = tensorVec;
+
+ /* This step should erase right context. */
+ REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+
+ /* Calling it the second time should erase the left context. */
+ REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, false));
+
+ REQUIRE(originalVec != tensorVec);
+
+ /* The first and last ctxLen * 10 elements should be gone. */
+ for (size_t i = 0; i < ctxLen; ++i) {
+ for (size_t j = 0; j < nCols; ++j) {
+ /* Check left and right context elements are zeroed. */
+ if (j == blankTokenIdx) {
+ CHECK(tensorVec[(ctxLen + innerLen) * nCols + i * nCols + j] == 1);
+ CHECK(tensorVec[i * nCols + j] == 1);
+ } else {
+ CHECK(tensorVec[(ctxLen + innerLen) * nCols + i * nCols + j] == 0);
+ CHECK(tensorVec[i * nCols + j] == 0);
+ }
+ }
+ }
+
+ /* Check inner elements are preserved. */
+ for (size_t i = ctxLen * nCols; i < (ctxLen + innerLen) * nCols; ++i) {
+ /* Check left context is preserved. */
+ CHECK(tensorVec[i] == originalVec[i]);
+ }
+ }
+
+ SECTION("Try left context erase")
+ {
+ /* Should not be able to erase the left context if it is the first iteration. */
+ arm::app::audio::asr::Postprocess post{ctxLen, innerLen, blankTokenIdx};
+
+ std::vector <int8_t> tensorVec;
+ TfLiteTensor tensor = GetTestTensor<int8_t>(
+ tensorShape, 100, tensorVec);
+
+ /* Copy elements to compare later. */
+ std::vector <int8_t> originalVec = tensorVec;
+
+ /* Calling it the second time should erase the left context. */
+ REQUIRE(true == post.Invoke(&tensor, arm::app::Wav2LetterModel::ms_outputRowsIdx, true));
+ REQUIRE(originalVec == tensorVec);
+ }
+} \ No newline at end of file
diff --git a/tests/use_case/kws_asr/Wav2LetterPreprocessingTest.cc b/tests/use_case/kws_asr/Wav2LetterPreprocessingTest.cc
new file mode 100644
index 0000000..e71366a
--- /dev/null
+++ b/tests/use_case/kws_asr/Wav2LetterPreprocessingTest.cc
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Wav2LetterPreprocess.hpp"
+
+#include <algorithm>
+#include <catch.hpp>
+#include <limits>
+
+constexpr uint32_t numMfccFeatures = 13;
+constexpr uint32_t numMfccVectors = 10;
+
+/* Test vector output: generated using test-asr-preprocessing.py. */
+int8_t expectedResult[numMfccVectors][numMfccFeatures*3] = {
+ /* Feature vec 0. */
+ -32, 4, -9, -8, -10, -10, -11, -11, -11, -11, -12, -11, -11, /* MFCCs. */
+ -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, /* Delta 1. */
+ -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, /* Delta 2. */
+
+ /* Feature vec 1. */
+ -31, 4, -9, -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,
+ -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+ -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+ /* Feature vec 2. */
+ -31, 4, -9, -9, -10, -10, -11, -11, -11, -11, -12, -12, -12,
+ -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+ -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+ /* Feature vec 3. */
+ -31, 4, -9, -9, -10, -10, -11, -11, -11, -11, -11, -12, -12,
+ -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+ -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+ /* Feature vec 4 : this should have valid delta 1 and delta 2. */
+ -31, 4, -9, -9, -10, -10, -11, -11, -11, -11, -11, -12, -12,
+ -38, -29, -9, 1, -2, -7, -8, -8, -12, -16, -14, -5, 5,
+ -68, -50, -13, 5, 0, -9, -9, -8, -13, -20, -19, -3, 15,
+
+ /* Feature vec 5 : this should have valid delta 1 and delta 2. */
+ -31, 4, -9, -8, -10, -10, -11, -11, -11, -11, -11, -12, -12,
+ -62, -45, -11, 5, 0, -8, -9, -8, -12, -19, -17, -3, 13,
+ -27, -22, -13, -9, -11, -12, -12, -11, -11, -13, -13, -10, -6,
+
+ /* Feature vec 6. */
+ -31, 4, -9, -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,
+ -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+ -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+ /* Feature vec 7. */
+ -32, 4, -9, -8, -10, -10, -11, -11, -11, -12, -12, -11, -11,
+ -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+ -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+ /* Feature vec 8. */
+ -32, 4, -9, -8, -10, -10, -11, -11, -11, -12, -12, -11, -11,
+ -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+ -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10,
+
+ /* Feature vec 9. */
+ -31, 4, -9, -8, -10, -10, -11, -11, -11, -11, -12, -11, -11,
+ -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11, -11,
+ -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10, -10
+};
+
+void PopulateTestWavVector(std::vector<int16_t>& vec)
+{
+ constexpr int int16max = std::numeric_limits<int16_t>::max();
+ int val = 0;
+ for (size_t i = 0; i < vec.size(); ++i, ++val) {
+
+ /* We want a differential filter response from both - order 1
+ * and 2 => Don't have a linear signal here - we use a signal
+ * using squares for example. Alternate sign flips might work
+ * just as well and will be computationally less work! */
+ int valsq = val * val;
+ if (valsq > int16max) {
+ val = 0;
+ valsq = 0;
+ }
+ vec[i] = valsq;
+ }
+}
+
+TEST_CASE("Preprocessing calculation INT8")
+{
+ /* Initialise the HAL and platform. */
+ hal_platform platform;
+ data_acq_module data_acq;
+ data_psn_module data_psn;
+ platform_timer timer;
+ hal_init(&platform, &data_acq, &data_psn, &timer);
+ hal_platform_init(&platform);
+
+ /* Constants. */
+ const uint32_t windowLen = 512;
+ const uint32_t windowStride = 160;
+ const int dimArray[] = {3, 1, numMfccFeatures * 3, numMfccVectors};
+ const float quantScale = 0.1410219967365265;
+ const int quantOffset = -11;
+
+ /* Test wav memory. */
+ std::vector <int16_t> testWav((windowStride * numMfccVectors) +
+ (windowLen - windowStride));
+
+ /* Populate with dummy input. */
+ PopulateTestWavVector(testWav);
+
+ /* Allocate mem for tensor. */
+ std::vector<int8_t> tensorVec(dimArray[1]*dimArray[2]*dimArray[3]);
+
+ /* Initialise dimensions and the test tensor. */
+ TfLiteIntArray* dims= tflite::testing::IntArrayFromInts(dimArray);
+ TfLiteTensor tensor = tflite::testing::CreateQuantizedTensor(
+ tensorVec.data(), dims, quantScale, quantOffset, "preprocessedInput");
+
+ /* Initialise pre-processing module. */
+ arm::app::audio::asr::Preprocess prep{
+ numMfccFeatures, windowLen, windowStride, numMfccVectors};
+
+ /* Invoke pre-processing. */
+ REQUIRE(prep.Invoke(testWav.data(), testWav.size(), &tensor));
+
+ /* Wrap the tensor with a std::vector for ease. */
+ int8_t * tensorData = tflite::GetTensorData<int8_t>(&tensor);
+ std::vector <int8_t> vecResults =
+ std::vector<int8_t>(tensorData, tensorData + tensor.bytes);
+
+ /* Check sizes. */
+ REQUIRE(vecResults.size() == sizeof(expectedResult));
+
+ /* Check that the elements have been calculated correctly. */
+ for (uint32_t j = 0; j < numMfccVectors; ++j) {
+ for (uint32_t i = 0; i < numMfccFeatures * 3; ++i) {
+ size_t tensorIdx = (j * numMfccFeatures * 3) + i;
+ CHECK(vecResults[tensorIdx] == expectedResult[j][i]);
+ }
+ }
+}