aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/reference')
-rw-r--r--src/backends/reference/RefLayerSupport.cpp62
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp14
-rw-r--r--src/backends/reference/workloads/Debug.cpp6
-rw-r--r--src/backends/reference/workloads/RefDebugWorkload.cpp1
-rw-r--r--src/backends/reference/workloads/RefDebugWorkload.hpp9
-rw-r--r--src/backends/reference/workloads/RefQuantizeWorkload.cpp2
6 files changed, 63 insertions, 31 deletions
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index b801f70724..8410c303ae 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -147,9 +147,10 @@ bool RefLayerSupport::IsAdditionSupported(const TensorInfo& input0,
{
bool supported = true;
- std::array<DataType,4> supportedTypes = {
+ std::array<DataType,5> supportedTypes = {
DataType::Float32,
DataType::Float16,
+ DataType::QSymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -329,10 +330,11 @@ bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inp
ignore_unused(descriptor);
bool supported = true;
- std::array<DataType,4> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
DataType::Float32,
DataType::Float16,
+ DataType::QSymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -355,11 +357,12 @@ bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inp
bool RefLayerSupport::IsConstantSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- std::array<DataType,4> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
DataType::Float32,
DataType::Signed32,
DataType::QAsymmU8,
+ DataType::QSymmS8,
DataType::QSymmS16
};
@@ -417,22 +420,23 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
bool supported = true;
// Define supported types.
- std::array<DataType,4> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
+ DataType::QSymmS8,
DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
- "Reference convolution2d: input is not a supported type.");
+ "Reference Convolution2d: input is not a supported type.");
supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
- "Reference convolution2d: output is not a supported type.");
+ "Reference Convolution2d: output is not a supported type.");
supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
- "Reference convolution2d: input and output types mismatched.");
+ "Reference Convolution2d: input and output types mismatched.");
const DataType inputType = input.GetDataType();
if (inputType == DataType::QAsymmU8)
@@ -447,15 +451,15 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
ARMNN_NO_DEPRECATE_WARN_END
supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
- "Reference convolution2d: weights type not supported for quantized input.");
+ "Reference Convolution2d: weights type not supported for quantized input.");
}
else
{
supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
- "Reference convolution2d: weights is not a supported type.");
+ "Reference Convolution2d: weights is not a supported type.");
supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
- "Reference convolution2d: input and weights types mismatched.");
+ "Reference Convolution2d: input and weights types mismatched.");
}
if (biases.has_value())
@@ -468,7 +472,7 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
};
supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
- "Reference convolution2d: biases is not a supported type.");
+ "Reference Convolution2d: biases is not a supported type.");
}
ignore_unused(descriptor);
@@ -481,23 +485,24 @@ bool RefLayerSupport::IsDebugSupported(const TensorInfo& input,
{
bool supported = true;
- std::array<DataType, 5> supportedTypes =
+ std::array<DataType, 6> supportedTypes =
{
DataType::Float16,
DataType::Float32,
DataType::QAsymmU8,
+ DataType::QSymmS8,
DataType::QSymmS16,
DataType::Signed32
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
- "Reference debug: input type not supported");
+ "Reference for Debug layer: input type not supported");
supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
- "Reference debug: output type not supported");
+ "Reference for Debug layer: output type not supported");
supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
- "Reference debug: input and output types are mismatched");
+ "Reference for Debug layer: input and output types are mismatched");
return supported;
}
@@ -612,7 +617,10 @@ bool RefLayerSupport::IsDequantizeSupported(const TensorInfo& input,
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
- "Reference dequantize: input type not supported.");
+ "Reference for Dequantize layer: input type not supported.");
+
+ supported &= CheckSupportRule( TypeNotPerAxisQuantized(input), reasonIfUnsupported,
+ "Reference for Dequantize layer: per-axis quantized input not support .");
supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
"Reference dequantize: per-axis quantized input not support .");
@@ -623,10 +631,11 @@ bool RefLayerSupport::IsDequantizeSupported(const TensorInfo& input,
};
supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
- "Reference dequantize: output type not supported.");
+ "Reference for Dequantize layer: output type not supported.");
supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
- "Reference dequantize: input and output shapes have different num total elements.");
+ "Reference for Dequantize layer: input/output shapes have different num total "
+ "elements.");
return supported;
}
@@ -1104,9 +1113,10 @@ bool RefLayerSupport::IsMaximumSupported(const TensorInfo& input0,
{
bool supported = true;
- std::array<DataType,4> supportedTypes = {
+ std::array<DataType,5> supportedTypes = {
DataType::Float32,
DataType::Float16,
+ DataType::QSymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1270,9 +1280,10 @@ bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
{
bool supported = true;
- std::array<DataType,4> supportedTypes = {
+ std::array<DataType,5> supportedTypes = {
DataType::Float32,
DataType::Float16,
+ DataType::QSymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1428,8 +1439,9 @@ bool RefLayerSupport::IsQuantizeSupported(const TensorInfo& input,
bool supported = true;
// Define supported input types.
- std::array<DataType,1> supportedInputTypes = {
- DataType::Float32,
+ std::array<DataType,2> supportedInputTypes = {
+ DataType::QSymmS8,
+ DataType::Float32
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
@@ -1458,12 +1470,13 @@ bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input,
ignore_unused(output);
ignore_unused(descriptor);
// Define supported output types.
- std::array<DataType,5> supportedOutputTypes =
+ std::array<DataType,6> supportedOutputTypes =
{
DataType::Float32,
DataType::Float16,
DataType::Signed32,
DataType::QAsymmU8,
+ DataType::QSymmS8,
DataType::QSymmS16
};
return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
@@ -1502,10 +1515,11 @@ bool RefLayerSupport::IsResizeSupported(const TensorInfo& input,
{
boost::ignore_unused(descriptor);
bool supported = true;
- std::array<DataType,4> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
DataType::Float32,
DataType::Float16,
+ DataType::QSymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index e7a9c19fc7..792bd7d3ad 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -24,7 +24,8 @@ template <typename F32Workload, typename U8Workload, typename QueueDescriptorTyp
std::unique_ptr<IWorkload> RefWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkloadHelper<NullWorkload, F32Workload, U8Workload, NullWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkloadHelper<NullWorkload, F32Workload, U8Workload, NullWorkload, NullWorkload, NullWorkload>
+ (descriptor, info);
}
template <DataType ArmnnType>
@@ -54,6 +55,11 @@ bool IsQSymm16(const WorkloadInfo& info)
return IsDataType<DataType::QSymmS16>(info);
}
+bool IsQSymm8(const WorkloadInfo& info)
+{
+ return IsDataType<DataType::QSymmS8>(info);
+}
+
RefWorkloadFactory::RefWorkloadFactory(const std::shared_ptr<RefMemoryManager>& memoryManager)
: m_MemoryManager(memoryManager)
{
@@ -185,6 +191,10 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDebug(const DebugQueueDescr
{
return std::make_unique<RefDebugQSymm16Workload>(descriptor, info);
}
+ if (IsQSymm8(info))
+ {
+ return std::make_unique<RefDebugQSymm8Workload>(descriptor, info);
+ }
if (IsDataType<DataType::Signed32>(info))
{
return std::make_unique<RefDebugSigned32Workload>(descriptor, info);
@@ -419,7 +429,7 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePermute(const PermuteQueueD
return std::make_unique<RefPermuteQSymm16Workload>(descriptor, info);
}
return MakeWorkloadHelper<RefPermuteFloat16Workload, RefPermuteFloat32Workload, RefPermuteQAsymm8Workload,
- NullWorkload, NullWorkload>(descriptor, info);
+ NullWorkload, NullWorkload, NullWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
diff --git a/src/backends/reference/workloads/Debug.cpp b/src/backends/reference/workloads/Debug.cpp
index 0f192f3ab3..49e9e02ffb 100644
--- a/src/backends/reference/workloads/Debug.cpp
+++ b/src/backends/reference/workloads/Debug.cpp
@@ -106,6 +106,12 @@ template void Debug<uint8_t>(const TensorInfo& inputInfo,
const std::string& layerName,
unsigned int slotIndex);
+template void Debug<int8_t>(const TensorInfo& inputInfo,
+ const int8_t* inputData,
+ LayerGuid guid,
+ const std::string& layerName,
+ unsigned int slotIndex);
+
template void Debug<int16_t>(const TensorInfo& inputInfo,
const int16_t* inputData,
LayerGuid guid,
diff --git a/src/backends/reference/workloads/RefDebugWorkload.cpp b/src/backends/reference/workloads/RefDebugWorkload.cpp
index 59b836da09..2f0b4276c0 100644
--- a/src/backends/reference/workloads/RefDebugWorkload.cpp
+++ b/src/backends/reference/workloads/RefDebugWorkload.cpp
@@ -48,6 +48,7 @@ template class RefDebugWorkload<DataType::Float16>;
template class RefDebugWorkload<DataType::Float32>;
template class RefDebugWorkload<DataType::QAsymmU8>;
template class RefDebugWorkload<DataType::QSymmS16>;
+template class RefDebugWorkload<DataType::QSymmS8>;
template class RefDebugWorkload<DataType::Signed32>;
} // namespace armnn
diff --git a/src/backends/reference/workloads/RefDebugWorkload.hpp b/src/backends/reference/workloads/RefDebugWorkload.hpp
index 58e4464b00..a15a863892 100644
--- a/src/backends/reference/workloads/RefDebugWorkload.hpp
+++ b/src/backends/reference/workloads/RefDebugWorkload.hpp
@@ -37,10 +37,11 @@ private:
DebugCallbackFunction m_Callback;
};
-using RefDebugFloat16Workload = RefDebugWorkload<DataType::Float16>;
-using RefDebugFloat32Workload = RefDebugWorkload<DataType::Float32>;
-using RefDebugQAsymm8Workload = RefDebugWorkload<DataType::QAsymmU8>;
-using RefDebugQSymm16Workload = RefDebugWorkload<DataType::QSymmS16>;
+using RefDebugFloat16Workload = RefDebugWorkload<DataType::Float16>;
+using RefDebugFloat32Workload = RefDebugWorkload<DataType::Float32>;
+using RefDebugQAsymm8Workload = RefDebugWorkload<DataType::QAsymmU8>;
+using RefDebugQSymm16Workload = RefDebugWorkload<DataType::QSymmS16>;
+using RefDebugQSymm8Workload = RefDebugWorkload<DataType::QSymmS8>;
using RefDebugSigned32Workload = RefDebugWorkload<DataType::Signed32>;
} // namespace armnn
diff --git a/src/backends/reference/workloads/RefQuantizeWorkload.cpp b/src/backends/reference/workloads/RefQuantizeWorkload.cpp
index 31534abe3e..ab2ee7fc4e 100644
--- a/src/backends/reference/workloads/RefQuantizeWorkload.cpp
+++ b/src/backends/reference/workloads/RefQuantizeWorkload.cpp
@@ -50,7 +50,7 @@ void RefQuantizeWorkload::Execute() const
}
case DataType::QSymmS8:
{
- QuantizeImpl<int8_t>(input, output, m_NumElements, m_Scale, m_Offset);
+ QuantizeImpl<int8_t>(input, output, m_NumElements, m_Scale, 0);
break;
}
case DataType::QSymmS16: