aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorMatthew Jackson <matthew.jackson@arm.com>2019-09-11 09:19:18 +0100
committerMatthew Jackson <matthew.jackson@arm.com>2019-09-11 10:49:25 +0000
commit252df3a8b6bbf70a21f81d1bf239d08f8b09086f (patch)
tree6e6eaf6814d71ca61c9bd700d4c1757ff671c292 /src
parent0cfcf235c4bcd2ae570eea8bc2677f471281b8e6 (diff)
downloadarmnn-252df3a8b6bbf70a21f81d1bf239d08f8b09086f.tar.gz
IVGCVSW-3845 Add Reference FP16 support for required layers
* Working on layers required by FSRCNN, FCRN and DeepSpeaker * Updates RefLayerSupport and RefWorkloadFactory methods * Adds RefPadFloat16Workload * Tested by successful execution of these networks on Reference FP16 backend Signed-off-by: Matthew Jackson <matthew.jackson@arm.com> Change-Id: I4817dca0a89bba6902f0feffc494b27a26a0ab2d
Diffstat (limited to 'src')
-rw-r--r--src/backends/reference/RefLayerSupport.cpp45
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp32
-rw-r--r--src/backends/reference/workloads/Pad.cpp6
-rw-r--r--src/backends/reference/workloads/RefPadWorkload.cpp1
-rw-r--r--src/backends/reference/workloads/RefPadWorkload.hpp5
5 files changed, 44 insertions, 45 deletions
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 5692f9e143..4958968175 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -100,8 +100,9 @@ bool RefLayerSupport::IsActivationSupported(const TensorInfo& input,
bool supported = true;
// Define supported types.
- std::array<DataType,3> supportedTypes = {
+ std::array<DataType,4> supportedTypes = {
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -162,8 +163,9 @@ bool RefLayerSupport::IsAdditionSupported(const TensorInfo& input0,
{
bool supported = true;
- std::array<DataType,3> supportedTypes = {
+ std::array<DataType,4> supportedTypes = {
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -375,8 +377,9 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
bool supported = true;
// Define supported types.
- std::array<DataType,3> supportedTypes = {
+ std::array<DataType,4> supportedTypes = {
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -398,8 +401,9 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
if (biases.has_value())
{
- std::array<DataType,2> biasesSupportedTypes = {
+ std::array<DataType,3> biasesSupportedTypes = {
DataType::Float32,
+ DataType::Float16,
DataType::Signed32
};
supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
@@ -445,9 +449,10 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
bool supported = true;
// Define supported types.
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -469,9 +474,10 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
if (biases.has_value())
{
- std::array<DataType,2> biasesSupportedTypes =
+ std::array<DataType,3> biasesSupportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::Signed32
};
supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
@@ -656,9 +662,10 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
bool supported = true;
// Define supported types.
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -681,10 +688,11 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
if (descriptor.m_BiasEnabled)
{
// Defined supported types for bias
- std::array<DataType, 2>
+ std::array<DataType, 3>
supportedBiasTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::Signed32
};
@@ -772,9 +780,10 @@ bool RefLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
{
ignore_unused(descriptor);
// Define supported types
- std::array<DataType, 3> supportedTypes =
+ std::array<DataType, 4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -950,9 +959,10 @@ bool RefLayerSupport::IsMeanSupported(const TensorInfo& input,
std::string meanLayerStr = "Mean";
std::string outputTensorStr = "output";
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -1077,8 +1087,9 @@ bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
{
bool supported = true;
- std::array<DataType,3> supportedTypes = {
+ std::array<DataType,4> supportedTypes = {
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -1150,9 +1161,10 @@ bool RefLayerSupport::IsPadSupported(const TensorInfo& input,
bool supported = true;
// Define supported output and inputs types.
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -1206,9 +1218,10 @@ bool RefLayerSupport::IsPooling2dSupported(const TensorInfo& input,
bool supported = true;
// Define supported output and inputs types.
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -1606,9 +1619,10 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
{
bool supported = true;
- std::array<DataType,3> supportedTypes =
+ std::array<DataType,4> supportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::QuantisedAsymm8,
DataType::QuantisedSymm16
};
@@ -1630,9 +1644,10 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
if (biases.has_value())
{
- std::array<DataType,2> biasesSupportedTypes =
+ std::array<DataType,3> biasesSupportedTypes =
{
DataType::Float32,
+ DataType::Float16,
DataType::Signed32
};
supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index f2dfb980b3..a3d4bf08c2 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -131,10 +131,6 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateOutput(const OutputQueueDes
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefActivationWorkload>(descriptor, info);
}
@@ -184,10 +180,6 @@ std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreatePermute(const Permut
std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefPooling2dWorkload>(descriptor, info);
}
@@ -218,20 +210,12 @@ std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateNormalization(
std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefAdditionWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMultiplication(
const MultiplicationQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefMultiplicationWorkload>(descriptor, info);
}
@@ -293,10 +277,6 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateFakeQuantization(
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefL2NormalizationWorkload>(descriptor, info);
}
@@ -401,10 +381,6 @@ std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMaximum(
std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMean(
const MeanQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefMeanWorkload>(descriptor, info);
}
@@ -425,6 +401,10 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePad(const PadQueueDescripto
{
return std::make_unique<RefPadQSymm16Workload>(descriptor, info);
}
+ else if (IsFloat16(info))
+ {
+ return std::make_unique<RefPadFloat16Workload>(descriptor, info);
+ }
return MakeWorkload<RefPadFloat32Workload, RefPadQAsymm8Workload>(descriptor, info);
}
@@ -518,10 +498,6 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateTransposeConvolution2d(
const TransposeConvolution2dQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- if (IsFloat16(info))
- {
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
- }
return std::make_unique<RefTransposeConvolution2dWorkload>(descriptor, info);
}
diff --git a/src/backends/reference/workloads/Pad.cpp b/src/backends/reference/workloads/Pad.cpp
index 5773cac6a8..42291b8661 100644
--- a/src/backends/reference/workloads/Pad.cpp
+++ b/src/backends/reference/workloads/Pad.cpp
@@ -158,6 +158,12 @@ template void Pad<float>(const TensorInfo& inputInfo,
const float* inputData,
float* outData,
const float padValue);
+template void Pad<Half>(const TensorInfo& inputInfo,
+ const TensorInfo& outputInfo,
+ std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
+ const Half* inputData,
+ Half* outData,
+ const float padValue);
template void Pad<uint8_t>(const TensorInfo& inputInfo,
const TensorInfo& outputInfo,
std::vector<std::pair<unsigned int, unsigned int>> m_PadList,
diff --git a/src/backends/reference/workloads/RefPadWorkload.cpp b/src/backends/reference/workloads/RefPadWorkload.cpp
index 5e59d83dc9..c4b9daeb4c 100644
--- a/src/backends/reference/workloads/RefPadWorkload.cpp
+++ b/src/backends/reference/workloads/RefPadWorkload.cpp
@@ -34,6 +34,7 @@ void RefPadWorkload<DataType>::Execute() const
}
template class RefPadWorkload<DataType::Float32>;
+template class RefPadWorkload<DataType::Float16>;
template class RefPadWorkload<DataType::QuantisedAsymm8>;
template class RefPadWorkload<DataType::QuantisedSymm16>;
diff --git a/src/backends/reference/workloads/RefPadWorkload.hpp b/src/backends/reference/workloads/RefPadWorkload.hpp
index b1de53e930..d1521f4f8d 100644
--- a/src/backends/reference/workloads/RefPadWorkload.hpp
+++ b/src/backends/reference/workloads/RefPadWorkload.hpp
@@ -31,7 +31,8 @@ public:
};
using RefPadFloat32Workload = RefPadWorkload<DataType::Float32>;
-using RefPadQAsymm8Workload = RefPadWorkload<DataType::QuantisedAsymm8>;
-using RefPadQSymm16Workload = RefPadWorkload<DataType::QuantisedSymm16>;
+using RefPadFloat16Workload = RefPadWorkload<DataType::Float16>;
+using RefPadQAsymm8Workload = RefPadWorkload<DataType::QuantisedAsymm8>;
+using RefPadQSymm16Workload = RefPadWorkload<DataType::QuantisedSymm16>;
} //namespace armnn