aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads/Pad.cpp
diff options
context:
space:
mode:
authorFinn Williams <Finn.Williams@arm.com>2021-04-26 12:06:34 +0100
committerfinn.williams <finn.williams@arm.com>2021-04-28 11:39:10 +0000
commit01097941ef85073c56cbd1d5f00d7e8ffeb9876d (patch)
tree818686d467b142084e0e49bbd4084670d1d0d50b /src/backends/reference/workloads/Pad.cpp
parentc2b99a8783388ec3bd90dfed2e1b6d4f4d4bd1c8 (diff)
downloadarmnn-01097941ef85073c56cbd1d5f00d7e8ffeb9876d.tar.gz
IVGCVSW-5843 Separate memory managers for WorkingMemHandles
* Add inter layer memory management to WorkingMemHandle * Change Const layers to be executed once in loadedNetworkConstruction and share tensorHandle between all WorkingMemHandles * Fix various reference workloads pointing to memory in the queueDescriptor Signed-off-by: Finn Williams <Finn.Williams@arm.com> Change-Id: I69d4b3c5c84d2f5abe4540c3e624ab4f00d88226
Diffstat (limited to 'src/backends/reference/workloads/Pad.cpp')
-rw-r--r--src/backends/reference/workloads/Pad.cpp8
1 files changed, 5 insertions, 3 deletions
diff --git a/src/backends/reference/workloads/Pad.cpp b/src/backends/reference/workloads/Pad.cpp
index 1f8b674c3a..f58dbaea61 100644
--- a/src/backends/reference/workloads/Pad.cpp
+++ b/src/backends/reference/workloads/Pad.cpp
@@ -38,6 +38,8 @@ namespace armnn
void Pad(const TensorInfo& inputInfo,
const TensorInfo& outputInfo,
+ const ITensorHandle* inputHandle,
+ ITensorHandle* outputHandle,
const PadQueueDescriptor& data)
{
auto padList = data.m_Parameters.m_PadList;
@@ -66,15 +68,15 @@ void Pad(const TensorInfo& inputInfo,
unsigned int outputHeight = 0;
unsigned int outputWidth = 0;
- auto inputData = MakeDecoder<float>(inputInfo, data.m_Inputs[0]->Map());
- auto outData = MakeEncoder<float>(outputInfo, data.m_Outputs[0]->Map());
+ auto inputData = MakeDecoder<float>(inputInfo, inputHandle->Map());
+ auto outData = MakeEncoder<float>(outputInfo, outputHandle->Map());
// Fill the output tensor with Pad value first
if (outputInfo.IsQuantized())
{
// For Quantized types Pad Value should not be quantized with scale and offset of the tensor info
auto temporaryInfo = TensorInfo(outputInfo.GetShape(), outputInfo.GetDataType(), 1.0f, 0);
- auto outputData = MakeEncoder<float>(temporaryInfo, data.m_Outputs[0]->Map());
+ auto outputData = MakeEncoder<float>(temporaryInfo, outputHandle->Map());
FillOutputWithPadValue(*outputData, padValue, numOutputElements);
}
else