diff options
Diffstat (limited to 'source/use_case/kws/src')
-rw-r--r-- | source/use_case/kws/src/DsCnnModel.cc | 18 | ||||
-rw-r--r-- | source/use_case/kws/src/UseCaseHandler.cc | 12 |
2 files changed, 14 insertions, 16 deletions
diff --git a/source/use_case/kws/src/DsCnnModel.cc b/source/use_case/kws/src/DsCnnModel.cc index a093eb4..4edfc04 100644 --- a/source/use_case/kws/src/DsCnnModel.cc +++ b/source/use_case/kws/src/DsCnnModel.cc @@ -20,21 +20,21 @@ const tflite::MicroOpResolver& arm::app::DsCnnModel::GetOpResolver() { - return this->_m_opResolver; + return this->m_opResolver; } bool arm::app::DsCnnModel::EnlistOperations() { - this->_m_opResolver.AddReshape(); - this->_m_opResolver.AddAveragePool2D(); - this->_m_opResolver.AddConv2D(); - this->_m_opResolver.AddDepthwiseConv2D(); - this->_m_opResolver.AddFullyConnected(); - this->_m_opResolver.AddRelu(); - this->_m_opResolver.AddSoftmax(); + this->m_opResolver.AddReshape(); + this->m_opResolver.AddAveragePool2D(); + this->m_opResolver.AddConv2D(); + this->m_opResolver.AddDepthwiseConv2D(); + this->m_opResolver.AddFullyConnected(); + this->m_opResolver.AddRelu(); + this->m_opResolver.AddSoftmax(); #if defined(ARM_NPU) - if (kTfLiteOk == this->_m_opResolver.AddEthosU()) { + if (kTfLiteOk == this->m_opResolver.AddEthosU()) { info("Added %s support to op resolver\n", tflite::GetString_ETHOSU()); } else { diff --git a/source/use_case/kws/src/UseCaseHandler.cc b/source/use_case/kws/src/UseCaseHandler.cc index eaf53c1..2144c03 100644 --- a/source/use_case/kws/src/UseCaseHandler.cc +++ b/source/use_case/kws/src/UseCaseHandler.cc @@ -52,8 +52,6 @@ namespace app { * object. * @param[in] platform Reference to the hal platform object. * @param[in] results Vector of classification results to be displayed. - * @param[in] infTimeMs Inference time in milliseconds, if available, - * otherwise, this can be passed in as 0. * @return true if successful, false otherwise. **/ static bool PresentInferenceResult(hal_platform& platform, @@ -341,11 +339,11 @@ namespace app { * Real features math is done by a lambda function provided as a parameter. * Features are written to input tensor memory. * - * @tparam T Feature vector type. - * @param inputTensor Model input tensor pointer. - * @param cacheSize Number of feature vectors to cache. Defined by the sliding window overlap. - * @param compute Features calculator function. - * @return Lambda function to compute features. + * @tparam T Feature vector type. + * @param[in] inputTensor Model input tensor pointer. + * @param[in] cacheSize Number of feature vectors to cache. Defined by the sliding window overlap. + * @param[in] compute Features calculator function. + * @return Lambda function to compute features. */ template<class T> std::function<void (std::vector<int16_t>&, size_t, bool, size_t)> |