summaryrefslogtreecommitdiff
path: root/source/use_case/vww/src/UseCaseHandler.cc
diff options
context:
space:
mode:
authorIsabella Gottardi <isabella.gottardi@arm.com>2021-10-20 15:52:32 +0100
committerIsabella Gottardi <isabella.gottardi@arm.com>2021-10-25 08:51:10 +0000
commit79d4154ee071d0e7ef2d1eecdde149d488bb9d8b (patch)
tree21c8c0a2bd187a925f28045d4a57e9e4ef05be82 /source/use_case/vww/src/UseCaseHandler.cc
parent14ab8d447c5f12df2ac7fd4217fc0d2005b02dca (diff)
downloadml-embedded-evaluation-kit-79d4154ee071d0e7ef2d1eecdde149d488bb9d8b.tar.gz
MLECO-2458 and MLECO-2476 [Fix] VWW IFM quant step
* Changed image->cc conversion to be similar with preprocessing of img_class and vww models: images are scaled maintaing the aspect ration and then the centre crop of the correct size is taken. * VWW applies input quantization info to the int8 image (prior converted to [0,1] float range). * Changed adult_blur to a image without person. * Fix menu print when selecting a specific ifm to run (Select message was displayed after typing something) Change-Id: Ie6cde7ab4835ea842667b87397458a5d32131df3
Diffstat (limited to 'source/use_case/vww/src/UseCaseHandler.cc')
-rw-r--r--source/use_case/vww/src/UseCaseHandler.cc18
1 files changed, 13 insertions, 5 deletions
diff --git a/source/use_case/vww/src/UseCaseHandler.cc b/source/use_case/vww/src/UseCaseHandler.cc
index fb2e837..d384032 100644
--- a/source/use_case/vww/src/UseCaseHandler.cc
+++ b/source/use_case/vww/src/UseCaseHandler.cc
@@ -21,6 +21,8 @@
#include "UseCaseCommonUtils.hpp"
#include "hal.h"
+#include <algorithm>
+
namespace arm {
namespace app {
@@ -94,13 +96,19 @@ namespace app {
/* Display this image on the LCD. */
platform.data_psn->present_data_image(
- (uint8_t *) inputTensor->data.data,
+ static_cast<uint8_t *>(inputTensor->data.data),
nCols, nRows, nChannels,
dataPsnImgStartX, dataPsnImgStartY, dataPsnImgDownscaleFactor);
- /* If the data is signed. */
- if (model.IsDataSigned()) {
- image::ConvertImgToInt8(inputTensor->data.data, inputTensor->bytes);
+ /* Vww model preprocessing is image conversion from uint8 to [0,1] float values,
+ * then quantize them with input quantization info. */
+ QuantParams inQuantParams = GetTensorQuantParams(inputTensor);
+
+ auto* req_data = static_cast<uint8_t *>(inputTensor->data.data);
+ auto* signed_req_data = static_cast<int8_t *>(inputTensor->data.data);
+ for (size_t i = 0; i < inputTensor->bytes; i++) {
+ auto i_data_int8 = static_cast<int8_t>(((static_cast<float>(req_data[i]) / 255.0f) / inQuantParams.scale) + inQuantParams.offset);
+ signed_req_data[i] = std::min<int8_t>(INT8_MAX, std::max<int8_t>(i_data_int8, INT8_MIN));
}
/* Display message on the LCD - inference running. */
@@ -159,7 +167,7 @@ namespace app {
const uint32_t nChannels = (inputTensor->dims->size == 4) ? inputTensor->dims->data[3] : 1;
const uint8_t* srcPtr = get_img_array(imIdx);
- auto* dstPtr = (uint8_t*)inputTensor->data.data;
+ auto* dstPtr = static_cast<uint8_t *>(inputTensor->data.data);
if (1 == nChannels) {
/**
* Visual Wake Word model accepts only one channel =>