aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatteo Martincigh <matteo.martincigh@arm.com>2019-06-05 14:12:48 +0100
committerTeresa Charlin <teresa.charlinreyes@arm.com>2019-06-06 13:02:22 +0000
commit2fc70c5f7bebd95da7c718907011c92fd29e3603 (patch)
tree9b3e4b0e7e4b4ff2079dafc0528534f944729231
parenta3b20473ea0583ba4c6a2c17ba2d59ed958c0aa7 (diff)
downloadarmnn-2fc70c5f7bebd95da7c718907011c92fd29e3603.tar.gz
IVGCVSW-3226 Refactor the reference normalization workload
* Refactored RefNormalizationFloat32Workload into RefNormalizationWorkload * Added ref support of Uint8 norm workloads * Added workload unit tests for Uint8 Change-Id: I063ce919c267e02a32e739848e49d75fd98a5eb6 Signed-off-by: Matteo Martincigh <matteo.martincigh@arm.com>
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp17
-rw-r--r--src/backends/backendsCommon/test/WorkloadDataValidation.cpp2
-rw-r--r--src/backends/reference/RefLayerSupport.cpp27
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp2
-rw-r--r--src/backends/reference/backend.mk2
-rw-r--r--src/backends/reference/test/RefCreateWorkloadTests.cpp18
-rw-r--r--src/backends/reference/workloads/CMakeLists.txt4
-rw-r--r--src/backends/reference/workloads/RefNormalizationWorkload.cpp (renamed from src/backends/reference/workloads/RefNormalizationFloat32Workload.cpp)106
-rw-r--r--src/backends/reference/workloads/RefNormalizationWorkload.hpp (renamed from src/backends/reference/workloads/RefNormalizationFloat32Workload.hpp)8
-rw-r--r--src/backends/reference/workloads/RefWorkloads.hpp2
10 files changed, 127 insertions, 61 deletions
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index a95abf12ae..9482136b59 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -608,6 +608,23 @@ void NormalizationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co
{
ValidateNumInputs(workloadInfo, "NormalizationQueueDescriptor", 1);
ValidateNumOutputs(workloadInfo, "NormalizationQueueDescriptor", 1);
+
+ // Check the supported data types
+ std::vector<DataType> supportedTypes =
+ {
+ DataType::Float16,
+ DataType::Float32,
+ DataType::QuantisedAsymm8
+ };
+
+ ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
+ supportedTypes,
+ "NormalizationQueueDescriptor");
+
+ ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
+ { workloadInfo.m_InputTensorInfos[0].GetDataType() },
+ "NormalizationQueueDescriptor");
+
ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
workloadInfo.m_OutputTensorInfos[0],
"NormalizationQueueDescriptor",
diff --git a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
index a2e049d06d..7c7af2ddce 100644
--- a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
+++ b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
@@ -173,7 +173,7 @@ BOOST_AUTO_TEST_CASE(NormalizationQueueDescriptor_Validate_WrongInputHeight)
invalidData.m_Parameters.m_K = kappa;
//Invalid argument exception is expected, because input height != output height.
- BOOST_CHECK_THROW(RefNormalizationFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+ BOOST_CHECK_THROW(RefNormalizationWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
}
BOOST_AUTO_TEST_CASE(SplitterQueueDescriptor_Validate_WrongWindow)
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index f177385837..60536081be 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -959,12 +959,29 @@ bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input,
const NormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(output);
ignore_unused(descriptor);
- return IsSupportedForDataTypeRef(reasonIfUnsupported,
- input.GetDataType(),
- &TrueFunc<>,
- &FalseFuncU8<>);
+
+ // Define supported types
+ std::array<DataType, 3> supportedTypes =
+ {
+ DataType::Float16,
+ DataType::Float32,
+ DataType::QuantisedAsymm8
+ };
+
+ bool supported = true;
+
+ supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+ "Reference normalization: input type not supported.");
+
+ supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+ "Reference normalization: output type not supported.");
+
+ supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
+ "Reference normalization: input and output shapes have different "
+ "num total elements.");
+
+ return supported;
}
bool RefLayerSupport::IsOutputSupported(const TensorInfo& output,
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 76139026cc..319a620d2b 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -189,7 +189,7 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDetectionPostProcess(
std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateNormalization(
const NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
- return MakeWorkload<RefNormalizationFloat32Workload, NullWorkload>(descriptor, info);
+ return std::make_unique<RefNormalizationWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index 6f951130f3..2822c305f5 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -47,7 +47,7 @@ BACKEND_SOURCES := \
workloads/RefLstmWorkload.cpp \
workloads/RefMeanFloat32Workload.cpp \
workloads/RefMeanUint8Workload.cpp \
- workloads/RefNormalizationFloat32Workload.cpp \
+ workloads/RefNormalizationWorkload.cpp \
workloads/RefPadWorkload.cpp \
workloads/RefPermuteWorkload.cpp \
workloads/RefPooling2dWorkload.cpp \
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index 8216ed5a99..3da9de9263 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -372,14 +372,24 @@ static void RefCreateNormalizationWorkloadTest(DataLayout dataLayout)
CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
}
-BOOST_AUTO_TEST_CASE(CreateRefNormalizationNchwWorkload)
+BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NchwWorkload)
{
- RefCreateNormalizationWorkloadTest<RefNormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
+ RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
}
-BOOST_AUTO_TEST_CASE(CreateRefNormalizationNhwcWorkload)
+BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NhwcWorkload)
{
- RefCreateNormalizationWorkloadTest<RefNormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
+ RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
+}
+
+BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NchwWorkload)
+{
+ RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NhwcWorkload)
+{
+ RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
}
template <typename Pooling2dWorkloadType, armnn::DataType DataType>
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 82502c513c..9d5c4442fc 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -76,8 +76,8 @@ list(APPEND armnnRefBackendWorkloads_sources
RefLstmWorkload.hpp
RefConcatWorkload.cpp
RefConcatWorkload.hpp
- RefNormalizationFloat32Workload.cpp
- RefNormalizationFloat32Workload.hpp
+ RefNormalizationWorkload.cpp
+ RefNormalizationWorkload.hpp
RefPadWorkload.cpp
RefPadWorkload.hpp
RefPermuteWorkload.cpp
diff --git a/src/backends/reference/workloads/RefNormalizationFloat32Workload.cpp b/src/backends/reference/workloads/RefNormalizationWorkload.cpp
index 3a2f2b9658..8ff2d9cf92 100644
--- a/src/backends/reference/workloads/RefNormalizationFloat32Workload.cpp
+++ b/src/backends/reference/workloads/RefNormalizationWorkload.cpp
@@ -3,31 +3,34 @@
// SPDX-License-Identifier: MIT
//
-#include "RefNormalizationFloat32Workload.hpp"
+#include "RefNormalizationWorkload.hpp"
#include "RefWorkloadUtils.hpp"
-#include "TensorBufferArrayView.hpp"
-
-#include "Profiling.hpp"
+#include "Decoders.hpp"
+#include "Encoders.hpp"
#include <armnn/Tensor.hpp>
+#include <DataLayoutIndexed.hpp>
+#include <Profiling.hpp>
+
#include <boost/log/trivial.hpp>
#include <boost/numeric/conversion/cast.hpp>
+using namespace armnn;
using namespace armnnUtils;
-namespace armnn
+namespace
{
// Helper function to compute "Within" normalization using Krichevsky 2012: Local Brightness Normalization.
-static void NormalizeWithinUingLbr(const float* inputData,
- float* outputData,
- const TensorShape& tensorShape,
- uint32_t norm_size,
- float alpha,
- float beta,
- float kappa)
+void NormalizeWithinUingLbr(Decoder<float>& inputData,
+ Encoder<float>& outputData,
+ const TensorShape& tensorShape,
+ uint32_t norm_size,
+ float alpha,
+ float beta,
+ float kappa)
{
const unsigned int batchSize = tensorShape[0];
const unsigned int depth = tensorShape[1];
@@ -62,21 +65,24 @@ static void NormalizeWithinUingLbr(const float* inputData,
continue;
}
- float inval = inputData[n * cols * rows * depth +
- c * cols * rows +
- boost::numeric_cast<unsigned int>(j) * cols +
- boost::numeric_cast<unsigned int>(i)];
+ unsigned int inputIndex = n * cols * rows * depth +
+ c * cols * rows +
+ boost::numeric_cast<unsigned int>(j) * cols +
+ boost::numeric_cast<unsigned int>(i);
+ inputData[inputIndex];
+ float inval = inputData.Get();
accumulated_scale += inval*inval;
}
}
- outputData[n * cols * rows * depth +
- c * cols * rows +
- h * cols +
- w] = inputData[n * cols * rows * depth +
- c * cols * rows +
- h * cols +
- w] / (powf((kappa + (accumulated_scale * alpha)), beta));
+
+ unsigned int index = n * cols * rows * depth +
+ c * cols * rows +
+ h * cols +
+ w;
+ inputData[index];
+ outputData[index];
+ outputData.Set(inputData.Get() / (powf((kappa + (accumulated_scale * alpha)), beta)));
}
}
}
@@ -84,8 +90,8 @@ static void NormalizeWithinUingLbr(const float* inputData,
}
// Helper function to compute "Across" normalization using Krichevsky 2012: Local Brightness Normalization.
-void NormalizeAcrossUingLbr(const float* inputData,
- float* outputData,
+void NormalizeAcrossUingLbr(Decoder<float>& inputData,
+ Encoder<float>& outputData,
const TensorShape& tensorShape,
uint32_t norm_size,
float alpha,
@@ -93,13 +99,6 @@ void NormalizeAcrossUingLbr(const float* inputData,
float kappa,
DataLayout dataLayout)
{
- TensorBufferArrayView<const float> input(tensorShape,
- inputData,
- dataLayout);
- TensorBufferArrayView<float> output(tensorShape,
- outputData,
- dataLayout);
-
DataLayoutIndexed dataLayoutIndexed(dataLayout);
const unsigned int batchSize = tensorShape[0];
@@ -127,7 +126,14 @@ void NormalizeAcrossUingLbr(const float* inputData,
continue;
}
- float inval = input.Get(n, boost::numeric_cast<unsigned int>(k), h, w);
+ unsigned inputIndex = dataLayoutIndexed.GetIndex(tensorShape,
+ n,
+ boost::numeric_cast<unsigned int>(k),
+ h,
+ w);
+
+ inputData[inputIndex];
+ float inval = inputData.Get();
accumulated_scale += inval * inval;
}
@@ -135,28 +141,42 @@ void NormalizeAcrossUingLbr(const float* inputData,
float scale = kappa + (accumulated_scale * alpha);
scale = powf(scale, -beta);
- output.Get(n, c, h, w) = scale * input.Get(n, c, h, w);
+ unsigned index = dataLayoutIndexed.GetIndex(tensorShape, n, c, h, w);
+
+ inputData[index];
+ outputData[index];
+ outputData.Set(scale * inputData.Get());
}
}
}
}
}
-void RefNormalizationFloat32Workload::Execute() const
+} // Anonymous namespace
+
+namespace armnn
+{
+
+RefNormalizationWorkload::RefNormalizationWorkload(const NormalizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : BaseWorkload(descriptor, info)
+{}
+
+void RefNormalizationWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefNormalizationFloat32Workload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefNormalizationWorkload_Execute");
const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
- float* outputData = GetOutputTensorDataFloat(0, m_Data);
- const float* inputData = GetInputTensorDataFloat(0, m_Data);
+ auto inputDecoder = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
+ auto outputEncoder = MakeEncoder<float>(inputInfo, m_Data.m_Outputs[0]->Map());
if (NormalizationAlgorithmMethod::LocalBrightness == m_Data.m_Parameters.m_NormMethodType)
{
if (NormalizationAlgorithmChannel::Within == m_Data.m_Parameters.m_NormChannelType)
{
- NormalizeWithinUingLbr(inputData,
- outputData,
+ NormalizeWithinUingLbr(*inputDecoder,
+ *outputEncoder,
inputInfo.GetShape(),
m_Data.m_Parameters.m_NormSize,
m_Data.m_Parameters.m_Alpha,
@@ -165,8 +185,8 @@ void RefNormalizationFloat32Workload::Execute() const
}
else if (NormalizationAlgorithmChannel::Across == m_Data.m_Parameters.m_NormChannelType)
{
- NormalizeAcrossUingLbr(inputData,
- outputData,
+ NormalizeAcrossUingLbr(*inputDecoder,
+ *outputEncoder,
inputInfo.GetShape(),
m_Data.m_Parameters.m_NormSize,
m_Data.m_Parameters.m_Alpha,
@@ -187,4 +207,4 @@ void RefNormalizationFloat32Workload::Execute() const
}
}
-} //namespace armnn
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefNormalizationFloat32Workload.hpp b/src/backends/reference/workloads/RefNormalizationWorkload.hpp
index 9dff187bd4..6d33c8afb2 100644
--- a/src/backends/reference/workloads/RefNormalizationFloat32Workload.hpp
+++ b/src/backends/reference/workloads/RefNormalizationWorkload.hpp
@@ -11,11 +11,13 @@
namespace armnn
{
-class RefNormalizationFloat32Workload : public Float32Workload<NormalizationQueueDescriptor>
+class RefNormalizationWorkload : public BaseWorkload<NormalizationQueueDescriptor>
{
public:
- using Float32Workload<NormalizationQueueDescriptor>::Float32Workload;
+ explicit RefNormalizationWorkload(const NormalizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info);
+
virtual void Execute() const override;
};
-} //namespace armnn
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index ce1e688dcf..96f98ee7a8 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -30,7 +30,7 @@
#include "RefSoftmaxWorkload.hpp"
#include "RefResizeBilinearFloat32Workload.hpp"
#include "ResizeBilinear.hpp"
-#include "RefNormalizationFloat32Workload.hpp"
+#include "RefNormalizationWorkload.hpp"
#include "RefDetectionPostProcessWorkload.hpp"
#include "BatchNormImpl.hpp"
#include "Activation.hpp"