summaryrefslogtreecommitdiff
path: root/source/application/api/common/source/Model.cc
diff options
context:
space:
mode:
authorLiam Barry <liam.barry@arm.com>2023-08-03 18:21:58 +0100
committerRichard <richard.burton@arm.com>2023-08-29 12:48:06 +0000
commit677d43fa8f55a8aa52e6bd9d1884e2797650fd65 (patch)
tree154774010a306aeac14bf2eb4b4a0846dd905c1b /source/application/api/common/source/Model.cc
parentf1b28b861e301122b85ad7dc3d8ccb0720fcb584 (diff)
downloadml-embedded-evaluation-kit-677d43fa8f55a8aa52e6bd9d1884e2797650fd65.tar.gz
MLECO-4260: Replace raw C++ pointers with smart variants
Model: Added std::unique_ptr qualifier to Model.cc member and used make_unique when creating interpreter object Removed custom destructor and un-necessary memory cleanup following failed allocation DataStructures: Refactored array 2d to use a std::vector under the hood. This should preserve desired attributes including contiguous memory while removing the need for custom destructor. Original size function renamed to dimSize to avoid confusion with vector.size() Accompanying changes made to preprocessing and ASR tests. AppContext: Replaced use of raw pointers in AppContext.hpp. Previously a std::map including IAttribute pointers required individual deallocation as they were allocated using new. Signed-off-by: Liam Barry <liam.barry@arm.com> Change-Id: I1a34dce5dea6ecf4883a9ada3a20f827eb6e6d6b
Diffstat (limited to 'source/application/api/common/source/Model.cc')
-rw-r--r--source/application/api/common/source/Model.cc17
1 files changed, 5 insertions, 12 deletions
diff --git a/source/application/api/common/source/Model.cc b/source/application/api/common/source/Model.cc
index f365c89..da8f46b 100644
--- a/source/application/api/common/source/Model.cc
+++ b/source/application/api/common/source/Model.cc
@@ -18,18 +18,13 @@
#include "log_macros.h"
#include <cinttypes>
+#include <memory>
-/* Initialise the model */
-arm::app::Model::~Model()
-{
- delete this->m_pInterpreter;
- /**
- * No clean-up function available for allocator in TensorFlow Lite Micro yet.
- **/
-}
+arm::app::Model::~Model() = default;
arm::app::Model::Model() : m_inited(false), m_type(kTfLiteNoType) {}
+/* Initialise the model */
bool arm::app::Model::Init(uint8_t* tensorArenaAddr,
uint32_t tensorArenaSize,
const uint8_t* nnModelAddr,
@@ -83,8 +78,8 @@ bool arm::app::Model::Init(uint8_t* tensorArenaAddr,
debug("Using existing allocator @ 0x%p\n", this->m_pAllocator);
}
- this->m_pInterpreter =
- new ::tflite::MicroInterpreter(this->m_pModel, this->GetOpResolver(), this->m_pAllocator);
+ this->m_pInterpreter = std::make_unique<tflite::MicroInterpreter>(
+ this->m_pModel, this->GetOpResolver(), this->m_pAllocator);
if (!this->m_pInterpreter) {
printf_err("Failed to allocate interpreter\n");
@@ -97,8 +92,6 @@ bool arm::app::Model::Init(uint8_t* tensorArenaAddr,
if (allocate_status != kTfLiteOk) {
printf_err("tensor allocation failed!\n");
- delete this->m_pInterpreter;
- this->m_pInterpreter = nullptr;
return false;
}