diff options
author | Richard Burton <richard.burton@arm.com> | 2021-11-10 16:27:14 +0000 |
---|---|---|
committer | Richard <richard.burton@arm.com> | 2021-11-10 16:34:16 +0000 |
commit | 005534664e192cf909a11435c4bc4696b1f4c51f (patch) | |
tree | f8314bd284561e1f0ff68fc393ee22d0318ae162 /tests/use_case/kws_asr | |
parent | dee53bc7769d6201ec27deea4405c0df6c9b0623 (diff) | |
download | ml-embedded-evaluation-kit-005534664e192cf909a11435c4bc4696b1f4c51f.tar.gz |
MLECO-2354 MLECO-2355 MLECO-2356: Moving noise reduction to public repository
* Use RNNoise model from PMZ
* Add Noise reduction use-case
Signed-off-by: Richard burton <richard.burton@arm.com>
Change-Id: Ia8cc7ef102e22a5ff8bfbd3833594a4905a66057
Diffstat (limited to 'tests/use_case/kws_asr')
-rw-r--r-- | tests/use_case/kws_asr/InferenceTestDSCNN.cc | 9 | ||||
-rw-r--r-- | tests/use_case/kws_asr/InferenceTestWav2Letter.cc | 5 |
2 files changed, 8 insertions, 6 deletions
diff --git a/tests/use_case/kws_asr/InferenceTestDSCNN.cc b/tests/use_case/kws_asr/InferenceTestDSCNN.cc index 134003d..ad1731b 100644 --- a/tests/use_case/kws_asr/InferenceTestDSCNN.cc +++ b/tests/use_case/kws_asr/InferenceTestDSCNN.cc @@ -29,9 +29,9 @@ bool RunInference(arm::app::Model& model, const int8_t vec[]) { TfLiteTensor* inputTensor = model.GetInputTensor(0); REQUIRE(inputTensor); - const size_t copySz = inputTensor->bytes < IFM_DATA_SIZE ? + const size_t copySz = inputTensor->bytes < IFM_0_DATA_SIZE ? inputTensor->bytes : - IFM_DATA_SIZE; + IFM_0_DATA_SIZE; memcpy(inputTensor->data.data, vec, copySz); return model.RunInference(); @@ -63,7 +63,7 @@ void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app:: TfLiteTensor* outputTensor = model.GetOutputTensor(0); REQUIRE(outputTensor); - REQUIRE(outputTensor->bytes == OFM_DATA_SIZE); + REQUIRE(outputTensor->bytes == OFM_0_DATA_SIZE); auto tensorData = tflite::GetTensorData<T>(outputTensor); REQUIRE(tensorData); @@ -83,7 +83,8 @@ TEST_CASE("Running random inference with Tflu and DsCnnModel Int8", "[DS_CNN]") } TEST_CASE("Running inference with Tflu and DsCnnModel Uint8", "[DS_CNN]") { - for (uint32_t i = 0; i < NUMBER_OF_FM_FILES; ++i) { + REQUIRE(NUMBER_OF_IFM_FILES == NUMBER_OF_OFM_FILES); + for (uint32_t i = 0; i < NUMBER_OF_IFM_FILES; ++i) { const int8_t* input_goldenFV = get_ifm_data_array(i); const int8_t* output_goldenFV = get_ofm_data_array(i); diff --git a/tests/use_case/kws_asr/InferenceTestWav2Letter.cc b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc index 1b14a42..477a1dd 100644 --- a/tests/use_case/kws_asr/InferenceTestWav2Letter.cc +++ b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc @@ -78,7 +78,7 @@ void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app:: TfLiteTensor* outputTensor = model.GetOutputTensor(0); REQUIRE(outputTensor); - REQUIRE(outputTensor->bytes == OFM_DATA_SIZE); + REQUIRE(outputTensor->bytes == OFM_0_DATA_SIZE); auto tensorData = tflite::GetTensorData<T>(outputTensor); REQUIRE(tensorData); @@ -89,7 +89,8 @@ void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app:: TEST_CASE("Running inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter]") { - for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) { + REQUIRE(NUMBER_OF_IFM_FILES == NUMBER_OF_OFM_FILES); + for (uint32_t i = 0 ; i < NUMBER_OF_IFM_FILES; ++i) { auto input_goldenFV = get_ifm_data_array(i);; auto output_goldenFV = get_ofm_data_array(i); |