diff options
author | hakanardo <hakan@debian.org> | 2018-02-16 10:06:34 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:47:18 +0000 |
commit | f36ac355e050a4714a951d04a72896e02cf5e2a1 (patch) | |
tree | aaa24d307d363b7faf09156d85c782e2fe1ff1e5 /utils | |
parent | 57c033bb5400ef19e5952f191da3e878e21bba91 (diff) | |
download | ComputeLibrary-f36ac355e050a4714a951d04a72896e02cf5e2a1.tar.gz |
COMPMID-931 Bugfix for NumPyBinLoader with SubTensor output. (#345)
* Bugfix for NumPyBinLoader with SubTensor output.
When a SubTensor is used as output from a NumPyBinLoader, data should
not be written to the begining of the buffer.
* Reduce the buffer size with the same amount
* SubTensor's has to be accessed through execution windows
Change-Id: Ib15b19f7873d73d8cff0497adfe60805c56c640d
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/120733
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Pablo Tello <pablo.tello@arm.com>
Diffstat (limited to 'utils')
-rw-r--r-- | utils/GraphUtils.cpp | 6 |
1 files changed, 4 insertions, 2 deletions
diff --git a/utils/GraphUtils.cpp b/utils/GraphUtils.cpp index e248929cc2..a36cf8ea9f 100644 --- a/utils/GraphUtils.cpp +++ b/utils/GraphUtils.cpp @@ -23,6 +23,8 @@ */ #include "utils/GraphUtils.h" + +#include "arm_compute/runtime/SubTensor.h" #include "utils/Utils.h" #ifdef ARM_COMPUTE_CL @@ -206,7 +208,7 @@ void RandomAccessor::fill(ITensor &tensor, D &&distribution) { std::mt19937 gen(_seed); - if(tensor.info()->padding().empty()) + if(tensor.info()->padding().empty() && (dynamic_cast<SubTensor *>(&tensor) == nullptr)) { for(size_t offset = 0; offset < tensor.info()->total_size(); offset += tensor.info()->element_size()) { @@ -362,7 +364,7 @@ bool NumPyBinLoader::access_tensor(ITensor &tensor) } // Read data - if(tensor.info()->padding().empty()) + if(tensor.info()->padding().empty() && (dynamic_cast<SubTensor *>(&tensor) == nullptr)) { // If tensor has no padding read directly from stream. stream.read(reinterpret_cast<char *>(tensor.buffer()), tensor.info()->total_size()); |