From c394a6d17008f876c73e94883f0c59aeedfe73f0 Mon Sep 17 00:00:00 2001 From: Matthew Bentham Date: Mon, 24 Jun 2019 12:51:25 +0100 Subject: IVGCVSW-3307 Don't assume TensorInfo::Map() can be called before Execute() Change-Id: I445c69d2e99d8c93622e739af61f721e61b0f90f Signed-off-by: Matthew Bentham --- src/backends/reference/workloads/BaseIterator.hpp | 42 +++++++++++++++++++++- src/backends/reference/workloads/Decoders.hpp | 2 +- src/backends/reference/workloads/Encoders.hpp | 2 +- .../workloads/RefConvolution2dWorkload.cpp | 7 ++-- .../RefDepthwiseConvolution2dWorkload.cpp | 7 ++-- .../reference/workloads/RefElementwiseWorkload.cpp | 10 ++++-- .../workloads/RefFullyConnectedWorkload.cpp | 7 ++-- 7 files changed, 65 insertions(+), 12 deletions(-) (limited to 'src/backends') diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp index 26b0179e71..5583fe79ad 100644 --- a/src/backends/reference/workloads/BaseIterator.hpp +++ b/src/backends/reference/workloads/BaseIterator.hpp @@ -8,6 +8,8 @@ #include #include +#include + namespace armnn { @@ -35,6 +37,8 @@ public: virtual ~Decoder() {} + virtual void Reset(void*) = 0; + virtual IType Get() const = 0; }; @@ -46,6 +50,8 @@ public: virtual ~Encoder() {} + virtual void Reset(void*) = 0; + virtual void Set(IType right) = 0; virtual IType Get() const = 0; @@ -55,30 +61,40 @@ template class TypedIterator : public Base { public: - TypedIterator(T* data) + TypedIterator(T* data = nullptr) : m_Iterator(data), m_Start(data) {} + void Reset(void* data) override + { + m_Iterator = reinterpret_cast(data); + m_Start = m_Iterator; + } + TypedIterator& operator++() override { + BOOST_ASSERT(m_Iterator); ++m_Iterator; return *this; } TypedIterator& operator+=(const unsigned int increment) override { + BOOST_ASSERT(m_Iterator); m_Iterator += increment; return *this; } TypedIterator& operator-=(const unsigned int increment) override { + BOOST_ASSERT(m_Iterator); m_Iterator -= increment; return *this; } TypedIterator& operator[](const unsigned int index) override { + BOOST_ASSERT(m_Iterator); m_Iterator = m_Start + index; return *this; } @@ -94,6 +110,9 @@ public: QASymm8Decoder(const uint8_t* data, const float scale, const int32_t offset) : TypedIterator(data), m_Scale(scale), m_Offset(offset) {} + QASymm8Decoder(const float scale, const int32_t offset) + : QASymm8Decoder(nullptr, scale, offset) {} + float Get() const override { return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset); @@ -110,6 +129,9 @@ public: QSymm16Decoder(const int16_t* data, const float scale, const int32_t offset) : TypedIterator(data), m_Scale(scale), m_Offset(offset) {} + QSymm16Decoder(const float scale, const int32_t offset) + : QSymm16Decoder(nullptr, scale, offset) {} + float Get() const override { return armnn::Dequantize(*m_Iterator, m_Scale, m_Offset); @@ -126,6 +148,9 @@ public: FloatDecoder(const float* data) : TypedIterator(data) {} + FloatDecoder() + : FloatDecoder(nullptr) {} + float Get() const override { return *m_Iterator; @@ -138,6 +163,9 @@ public: ScaledInt32Decoder(const int32_t* data, const float scale) : TypedIterator(data), m_Scale(scale) {} + ScaledInt32Decoder(const float scale) + : ScaledInt32Decoder(nullptr, scale) {} + float Get() const override { return static_cast(*m_Iterator) * m_Scale; @@ -153,6 +181,9 @@ public: QASymm8Encoder(uint8_t* data, const float scale, const int32_t offset) : TypedIterator(data), m_Scale(scale), m_Offset(offset) {} + QASymm8Encoder(const float scale, const int32_t offset) + : QASymm8Encoder(nullptr, scale, offset) {} + void Set(float right) override { *m_Iterator = armnn::Quantize(right, m_Scale, m_Offset); @@ -174,6 +205,9 @@ public: QSymm16Encoder(int16_t* data, const float scale, const int32_t offset) : TypedIterator(data), m_Scale(scale), m_Offset(offset) {} + QSymm16Encoder(const float scale, const int32_t offset) + : QSymm16Encoder(nullptr, scale, offset) {} + void Set(float right) override { *m_Iterator = armnn::Quantize(right, m_Scale, m_Offset); @@ -195,6 +229,9 @@ public: FloatEncoder(float* data) : TypedIterator(data) {} + FloatEncoder() + : FloatEncoder(nullptr) {} + void Set(float right) override { *m_Iterator = right; @@ -212,6 +249,9 @@ public: BooleanEncoder(uint8_t* data) : TypedIterator(data) {} + BooleanEncoder() + : BooleanEncoder(nullptr) {} + void Set(bool right) override { *m_Iterator = right; diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp index f5ec90662a..793e550764 100644 --- a/src/backends/reference/workloads/Decoders.hpp +++ b/src/backends/reference/workloads/Decoders.hpp @@ -13,7 +13,7 @@ namespace armnn { template -inline std::unique_ptr> MakeDecoder(const TensorInfo& info, const void* data); +inline std::unique_ptr> MakeDecoder(const TensorInfo& info, const void* data = nullptr); template<> inline std::unique_ptr> MakeDecoder(const TensorInfo& info, const void* data) diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp index af3b937c2a..ed92393cdb 100644 --- a/src/backends/reference/workloads/Encoders.hpp +++ b/src/backends/reference/workloads/Encoders.hpp @@ -13,7 +13,7 @@ namespace armnn { template -inline std::unique_ptr> MakeEncoder(const TensorInfo& info, void* data); +inline std::unique_ptr> MakeEncoder(const TensorInfo& info, void* data = nullptr); template<> inline std::unique_ptr> MakeEncoder(const TensorInfo& info, void* data) diff --git a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp index a660d2e7f4..dad9936f1b 100644 --- a/src/backends/reference/workloads/RefConvolution2dWorkload.cpp +++ b/src/backends/reference/workloads/RefConvolution2dWorkload.cpp @@ -34,16 +34,19 @@ void RefConvolution2dWorkload::PostAllocationConfigure() { const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); m_InputShape = inputInfo.GetShape(); - m_InputDecoder = MakeDecoder(inputInfo, m_Data.m_Inputs[0]->Map()); + m_InputDecoder = MakeDecoder(inputInfo); const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); m_OutputShape = outputInfo.GetShape(); - m_OutputEncoder = MakeEncoder(outputInfo, m_Data.m_Outputs[0]->Map()); + m_OutputEncoder = MakeEncoder(outputInfo); } void RefConvolution2dWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefConvolution2dWorkload_Execute"); + m_InputDecoder->Reset(m_Data.m_Inputs[0]->Map()); + m_OutputEncoder->Reset(m_Data.m_Outputs[0]->Map()); + Convolve(m_InputShape, *m_InputDecoder, m_OutputShape, *m_OutputEncoder, m_FilterShape, *m_FilterDecoder, m_Data.m_Parameters.m_BiasEnabled, m_BiasDecoder.get(), m_Data.m_Parameters.m_DataLayout, m_Data.m_Parameters.m_PadTop, m_Data.m_Parameters.m_PadLeft, diff --git a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp index 48a20cf380..cfc81ce203 100644 --- a/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp +++ b/src/backends/reference/workloads/RefDepthwiseConvolution2dWorkload.cpp @@ -36,11 +36,11 @@ void RefDepthwiseConvolution2dWorkload::PostAllocationConfigure() { const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); m_InputShape = inputInfo.GetShape(); - m_InputDecoder = MakeDecoder(inputInfo, m_Data.m_Inputs[0]->Map()); + m_InputDecoder = MakeDecoder(inputInfo); const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); m_OutputShape = outputInfo.GetShape(); - m_OutputEncoder = MakeEncoder(outputInfo, m_Data.m_Outputs[0]->Map()); + m_OutputEncoder = MakeEncoder(outputInfo); } void RefDepthwiseConvolution2dWorkload::Execute() const @@ -48,6 +48,9 @@ void RefDepthwiseConvolution2dWorkload::Execute() const ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefDepthwiseConvolution2dWorkload_Execute"); std::unique_ptr> pBiasDecoder{}; + m_InputDecoder->Reset(m_Data.m_Inputs[0]->Map()); + m_OutputEncoder->Reset(m_Data.m_Outputs[0]->Map()); + Convolve(m_InputShape, *m_InputDecoder, m_OutputShape, *m_OutputEncoder, m_FilterShape, *m_FilterDecoder, m_Data.m_Parameters.m_BiasEnabled, m_BiasDecoder.get(), m_Data.m_Parameters.m_DataLayout, m_Data.m_Parameters.m_PadTop, m_Data.m_Parameters.m_PadLeft, diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.cpp b/src/backends/reference/workloads/RefElementwiseWorkload.cpp index cbacd9c21e..6431348bc2 100644 --- a/src/backends/reference/workloads/RefElementwiseWorkload.cpp +++ b/src/backends/reference/workloads/RefElementwiseWorkload.cpp @@ -32,9 +32,9 @@ void RefElementwiseWorkload::PostAllocat const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]); const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); - m_Input0 = MakeDecoder(inputInfo0, m_Data.m_Inputs[0]->Map()); - m_Input1 = MakeDecoder(inputInfo1, m_Data.m_Inputs[1]->Map()); - m_Output = MakeEncoder(outputInfo, m_Data.m_Outputs[0]->Map()); + m_Input0 = MakeDecoder(inputInfo0); + m_Input1 = MakeDecoder(inputInfo1); + m_Output = MakeEncoder(outputInfo); } template @@ -49,6 +49,10 @@ void RefElementwiseWorkload::Execute() c const TensorShape& inShape1 = inputInfo1.GetShape(); const TensorShape& outShape = outputInfo.GetShape(); + m_Input0->Reset(m_Data.m_Inputs[0]->Map()); + m_Input1->Reset(m_Data.m_Inputs[1]->Map()); + m_Output->Reset(m_Data.m_Outputs[0]->Map()); + ElementwiseFunction(inShape0, inShape1, outShape, diff --git a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp index c7a3d901e7..ac82db90e5 100644 --- a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp +++ b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp @@ -34,11 +34,11 @@ void RefFullyConnectedWorkload::PostAllocationConfigure() const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]); BOOST_ASSERT(inputInfo.GetNumDimensions() > 1); m_InputShape = inputInfo.GetShape(); - m_InputDecoder = MakeDecoder(inputInfo, m_Data.m_Inputs[0]->Map()); + m_InputDecoder = MakeDecoder(inputInfo); const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]); m_OutputShape = outputInfo.GetShape(); - m_OutputEncoder = MakeEncoder(outputInfo, m_Data.m_Outputs[0]->Map()); + m_OutputEncoder = MakeEncoder(outputInfo); m_NumActivations = 1; // Total number of activations in the input. for (unsigned int i = 1; i < inputInfo.GetNumDimensions(); i++) @@ -51,6 +51,9 @@ void RefFullyConnectedWorkload::Execute() const { ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFullyConnectedWorkload_Execute"); + m_InputDecoder->Reset(m_Data.m_Inputs[0]->Map()); + m_OutputEncoder->Reset(m_Data.m_Outputs[0]->Map()); + FullyConnected(m_InputShape, *m_InputDecoder, m_OutputShape, -- cgit v1.2.1