aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKeith Davis <keith.davis@arm.com>2020-01-27 15:24:59 +0000
committerKeith Davis Arm <keith.davis@arm.com>2020-01-28 17:32:52 +0000
commit5204aa8fd1da75ccc052269f358178b54bc7792f (patch)
treed3c2b354855dab0d2da5845fd46b3950fcc75841
parentd305e1a203077bdbf2e3955abd252904127675a4 (diff)
downloadarmnn-5204aa8fd1da75ccc052269f358178b54bc7792f.tar.gz
IVGCVSW-4306 Verify the tflite Yolo v3
* Added debug layer support for QSymmS8 * QSymmS8 support for workloads Signed-off-by: Keith Davis <keith.davis@arm.com> Change-Id: I51af92fadc0be290629dd9198beab5abef9e351f
-rw-r--r--src/backends/backendsCommon/MakeWorkloadHelper.hpp8
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp13
-rw-r--r--src/backends/reference/RefLayerSupport.cpp62
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp14
-rw-r--r--src/backends/reference/workloads/Debug.cpp6
-rw-r--r--src/backends/reference/workloads/RefDebugWorkload.cpp1
-rw-r--r--src/backends/reference/workloads/RefDebugWorkload.hpp9
-rw-r--r--src/backends/reference/workloads/RefQuantizeWorkload.cpp2
8 files changed, 79 insertions, 36 deletions
diff --git a/src/backends/backendsCommon/MakeWorkloadHelper.hpp b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
index 75db73c32f..f876c6b781 100644
--- a/src/backends/backendsCommon/MakeWorkloadHelper.hpp
+++ b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
@@ -41,7 +41,7 @@ struct MakeWorkloadForType<NullWorkload>
// Makes a workload for one the specified types based on the data type requirements of the tensorinfo.
// Specify type void as the WorkloadType for unsupported DataType/WorkloadType combos.
template <typename Float16Workload, typename Float32Workload, typename Uint8Workload, typename Int32Workload,
- typename BooleanWorkload, typename QueueDescriptorType, typename... Args>
+ typename BooleanWorkload, typename Int8Workload, typename QueueDescriptorType, typename... Args>
std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descriptor,
const WorkloadInfo& info,
Args&&... args)
@@ -58,6 +58,8 @@ std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descrip
return MakeWorkloadForType<Float32Workload>::Func(descriptor, info, std::forward<Args>(args)...);
case DataType::QAsymmU8:
return MakeWorkloadForType<Uint8Workload>::Func(descriptor, info, std::forward<Args>(args)...);
+ case DataType::QSymmS8:
+ return MakeWorkloadForType<Int8Workload>::Func(descriptor, info, std::forward<Args>(args)...);
case DataType::Signed32:
return MakeWorkloadForType<Int32Workload>::Func(descriptor, info, std::forward<Args>(args)...);
case DataType::Boolean:
@@ -72,14 +74,14 @@ std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descrip
// Makes a workload for one the specified types based on the data type requirements of the tensorinfo.
// Calling this method is the equivalent of calling the five typed MakeWorkload method with <FloatWorkload,
-// FloatWorkload, Uint8Workload, NullWorkload, NullWorkload>.
+// FloatWorkload, Uint8Workload, NullWorkload, NullWorkload, NullWorkload>.
// Specify type void as the WorkloadType for unsupported DataType/WorkloadType combos.
template <typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descriptor,
const WorkloadInfo& info,
Args&&... args)
{
- return MakeWorkloadHelper<FloatWorkload, FloatWorkload, Uint8Workload, NullWorkload, NullWorkload>(
+ return MakeWorkloadHelper<FloatWorkload, FloatWorkload, Uint8Workload, NullWorkload, NullWorkload, NullWorkload>(
descriptor,
info,
std::forward<Args>(args)...);
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 075884b2da..5057c8c4df 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -32,6 +32,8 @@ DataType GetBiasDataType(DataType inputDataType)
return DataType::Float32;
case DataType::QAsymmU8:
return DataType::Signed32;
+ case DataType::QSymmS8:
+ return DataType::Signed32;
case DataType::QSymmS16:
return DataType::Signed32;
default:
@@ -418,8 +420,8 @@ void ValidatePerAxisQuantization(const TensorInfo& inputInfo,
const DataType inputDataType = inputInfo.GetDataType();
const DataType outputDataType = outputInfo.GetDataType();
- const bool canHavePerAxisQuantization =
- inputDataType == DataType::QAsymmU8 && inputDataType == outputDataType;
+ const bool canHavePerAxisQuantization = (inputDataType == DataType::QSymmS8 ||
+ inputDataType == DataType::QAsymmU8) && inputDataType == outputDataType;
if (!canHavePerAxisQuantization)
{
@@ -1038,6 +1040,7 @@ void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::Float32,
DataType::QAsymmU8,
DataType::QSymmS16,
+ DataType::QSymmS8,
DataType::Float16
};
@@ -1071,6 +1074,7 @@ void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
{
DataType::Float32,
DataType::QAsymmU8,
+ DataType::QSymmS8,
DataType::QSymmS16,
DataType::Float16
};
@@ -1178,6 +1182,7 @@ void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co
DataType::Float32,
DataType::QAsymmU8,
DataType::QSymmS16,
+ DataType::QSymmS8,
DataType::Float16
};
@@ -1377,6 +1382,7 @@ void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::Float16,
DataType::Float32,
DataType::QAsymmU8,
+ DataType::QSymmS8,
DataType::QSymmS16
};
@@ -1529,6 +1535,7 @@ void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::Float16,
DataType::Signed32,
DataType::QAsymmU8,
+ DataType::QSymmS8,
DataType::QSymmS16
};
@@ -1554,6 +1561,7 @@ void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::Float16,
DataType::Signed32,
DataType::QAsymmU8,
+ DataType::QSymmS8,
DataType::QSymmS16
};
@@ -2098,6 +2106,7 @@ void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::Float32,
DataType::Signed32,
DataType::QAsymmU8,
+ DataType::QSymmS8,
DataType::QSymmS16
};
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index b801f70724..8410c303ae 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -147,9 +147,10 @@ bool RefLayerSupport::IsAdditionSupported(const TensorInfo& input0,
{
bool supported = true;
- std::array<DataType,4> supportedTypes = {
+ std::array<DataType,5> supportedTypes = {
DataType::Float32,
DataType::Float16,
+ DataType::QSymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -329,10 +330,11 @@ bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inp
ignore_unused(descriptor);
bool supported = true;
- std::array<DataType,4> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
DataType::Float32,
DataType::Float16,
+ DataType::QSymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -355,11 +357,12 @@ bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inp
bool RefLayerSupport::IsConstantSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- std::array<DataType,4> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
DataType::Float32,
DataType::Signed32,
DataType::QAsymmU8,
+ DataType::QSymmS8,
DataType::QSymmS16
};
@@ -417,22 +420,23 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
bool supported = true;
// Define supported types.
- std::array<DataType,4> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
+ DataType::QSymmS8,
DataType::QSymmS16
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
- "Reference convolution2d: input is not a supported type.");
+ "Reference Convolution2d: input is not a supported type.");
supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
- "Reference convolution2d: output is not a supported type.");
+ "Reference Convolution2d: output is not a supported type.");
supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
- "Reference convolution2d: input and output types mismatched.");
+ "Reference Convolution2d: input and output types mismatched.");
const DataType inputType = input.GetDataType();
if (inputType == DataType::QAsymmU8)
@@ -447,15 +451,15 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
ARMNN_NO_DEPRECATE_WARN_END
supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
- "Reference convolution2d: weights type not supported for quantized input.");
+ "Reference Convolution2d: weights type not supported for quantized input.");
}
else
{
supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
- "Reference convolution2d: weights is not a supported type.");
+ "Reference Convolution2d: weights is not a supported type.");
supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
- "Reference convolution2d: input and weights types mismatched.");
+ "Reference Convolution2d: input and weights types mismatched.");
}
if (biases.has_value())
@@ -468,7 +472,7 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
};
supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
- "Reference convolution2d: biases is not a supported type.");
+ "Reference Convolution2d: biases is not a supported type.");
}
ignore_unused(descriptor);
@@ -481,23 +485,24 @@ bool RefLayerSupport::IsDebugSupported(const TensorInfo& input,
{
bool supported = true;
- std::array<DataType, 5> supportedTypes =
+ std::array<DataType, 6> supportedTypes =
{
DataType::Float16,
DataType::Float32,
DataType::QAsymmU8,
+ DataType::QSymmS8,
DataType::QSymmS16,
DataType::Signed32
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
- "Reference debug: input type not supported");
+ "Reference for Debug layer: input type not supported");
supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
- "Reference debug: output type not supported");
+ "Reference for Debug layer: output type not supported");
supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
- "Reference debug: input and output types are mismatched");
+ "Reference for Debug layer: input and output types are mismatched");
return supported;
}
@@ -612,7 +617,10 @@ bool RefLayerSupport::IsDequantizeSupported(const TensorInfo& input,
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
- "Reference dequantize: input type not supported.");
+ "Reference for Dequantize layer: input type not supported.");
+
+ supported &= CheckSupportRule( TypeNotPerAxisQuantized(input), reasonIfUnsupported,
+ "Reference for Dequantize layer: per-axis quantized input not support .");
supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
"Reference dequantize: per-axis quantized input not support .");
@@ -623,10 +631,11 @@ bool RefLayerSupport::IsDequantizeSupported(const TensorInfo& input,
};
supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
- "Reference dequantize: output type not supported.");
+ "Reference for Dequantize layer: output type not supported.");
supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
- "Reference dequantize: input and output shapes have different num total elements.");
+ "Reference for Dequantize layer: input/output shapes have different num total "
+ "elements.");
return supported;
}
@@ -1104,9 +1113,10 @@ bool RefLayerSupport::IsMaximumSupported(const TensorInfo& input0,
{
bool supported = true;
- std::array<DataType,4> supportedTypes = {
+ std::array<DataType,5> supportedTypes = {
DataType::Float32,
DataType::Float16,
+ DataType::QSymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1270,9 +1280,10 @@ bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
{
bool supported = true;
- std::array<DataType,4> supportedTypes = {
+ std::array<DataType,5> supportedTypes = {
DataType::Float32,
DataType::Float16,
+ DataType::QSymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
@@ -1428,8 +1439,9 @@ bool RefLayerSupport::IsQuantizeSupported(const TensorInfo& input,
bool supported = true;
// Define supported input types.
- std::array<DataType,1> supportedInputTypes = {
- DataType::Float32,
+ std::array<DataType,2> supportedInputTypes = {
+ DataType::QSymmS8,
+ DataType::Float32
};
supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
@@ -1458,12 +1470,13 @@ bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input,
ignore_unused(output);
ignore_unused(descriptor);
// Define supported output types.
- std::array<DataType,5> supportedOutputTypes =
+ std::array<DataType,6> supportedOutputTypes =
{
DataType::Float32,
DataType::Float16,
DataType::Signed32,
DataType::QAsymmU8,
+ DataType::QSymmS8,
DataType::QSymmS16
};
return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
@@ -1502,10 +1515,11 @@ bool RefLayerSupport::IsResizeSupported(const TensorInfo& input,
{
boost::ignore_unused(descriptor);
bool supported = true;
- std::array<DataType,4> supportedTypes =
+ std::array<DataType,5> supportedTypes =
{
DataType::Float32,
DataType::Float16,
+ DataType::QSymmS8,
DataType::QAsymmU8,
DataType::QSymmS16
};
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index e7a9c19fc7..792bd7d3ad 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -24,7 +24,8 @@ template <typename F32Workload, typename U8Workload, typename QueueDescriptorTyp
std::unique_ptr<IWorkload> RefWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkloadHelper<NullWorkload, F32Workload, U8Workload, NullWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkloadHelper<NullWorkload, F32Workload, U8Workload, NullWorkload, NullWorkload, NullWorkload>
+ (descriptor, info);
}
template <DataType ArmnnType>
@@ -54,6 +55,11 @@ bool IsQSymm16(const WorkloadInfo& info)
return IsDataType<DataType::QSymmS16>(info);
}
+bool IsQSymm8(const WorkloadInfo& info)
+{
+ return IsDataType<DataType::QSymmS8>(info);
+}
+
RefWorkloadFactory::RefWorkloadFactory(const std::shared_ptr<RefMemoryManager>& memoryManager)
: m_MemoryManager(memoryManager)
{
@@ -185,6 +191,10 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDebug(const DebugQueueDescr
{
return std::make_unique<RefDebugQSymm16Workload>(descriptor, info);
}
+ if (IsQSymm8(info))
+ {
+ return std::make_unique<RefDebugQSymm8Workload>(descriptor, info);
+ }
if (IsDataType<DataType::Signed32>(info))
{
return std::make_unique<RefDebugSigned32Workload>(descriptor, info);
@@ -419,7 +429,7 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePermute(const PermuteQueueD
return std::make_unique<RefPermuteQSymm16Workload>(descriptor, info);
}
return MakeWorkloadHelper<RefPermuteFloat16Workload, RefPermuteFloat32Workload, RefPermuteQAsymm8Workload,
- NullWorkload, NullWorkload>(descriptor, info);
+ NullWorkload, NullWorkload, NullWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
diff --git a/src/backends/reference/workloads/Debug.cpp b/src/backends/reference/workloads/Debug.cpp
index 0f192f3ab3..49e9e02ffb 100644
--- a/src/backends/reference/workloads/Debug.cpp
+++ b/src/backends/reference/workloads/Debug.cpp
@@ -106,6 +106,12 @@ template void Debug<uint8_t>(const TensorInfo& inputInfo,
const std::string& layerName,
unsigned int slotIndex);
+template void Debug<int8_t>(const TensorInfo& inputInfo,
+ const int8_t* inputData,
+ LayerGuid guid,
+ const std::string& layerName,
+ unsigned int slotIndex);
+
template void Debug<int16_t>(const TensorInfo& inputInfo,
const int16_t* inputData,
LayerGuid guid,
diff --git a/src/backends/reference/workloads/RefDebugWorkload.cpp b/src/backends/reference/workloads/RefDebugWorkload.cpp
index 59b836da09..2f0b4276c0 100644
--- a/src/backends/reference/workloads/RefDebugWorkload.cpp
+++ b/src/backends/reference/workloads/RefDebugWorkload.cpp
@@ -48,6 +48,7 @@ template class RefDebugWorkload<DataType::Float16>;
template class RefDebugWorkload<DataType::Float32>;
template class RefDebugWorkload<DataType::QAsymmU8>;
template class RefDebugWorkload<DataType::QSymmS16>;
+template class RefDebugWorkload<DataType::QSymmS8>;
template class RefDebugWorkload<DataType::Signed32>;
} // namespace armnn
diff --git a/src/backends/reference/workloads/RefDebugWorkload.hpp b/src/backends/reference/workloads/RefDebugWorkload.hpp
index 58e4464b00..a15a863892 100644
--- a/src/backends/reference/workloads/RefDebugWorkload.hpp
+++ b/src/backends/reference/workloads/RefDebugWorkload.hpp
@@ -37,10 +37,11 @@ private:
DebugCallbackFunction m_Callback;
};
-using RefDebugFloat16Workload = RefDebugWorkload<DataType::Float16>;
-using RefDebugFloat32Workload = RefDebugWorkload<DataType::Float32>;
-using RefDebugQAsymm8Workload = RefDebugWorkload<DataType::QAsymmU8>;
-using RefDebugQSymm16Workload = RefDebugWorkload<DataType::QSymmS16>;
+using RefDebugFloat16Workload = RefDebugWorkload<DataType::Float16>;
+using RefDebugFloat32Workload = RefDebugWorkload<DataType::Float32>;
+using RefDebugQAsymm8Workload = RefDebugWorkload<DataType::QAsymmU8>;
+using RefDebugQSymm16Workload = RefDebugWorkload<DataType::QSymmS16>;
+using RefDebugQSymm8Workload = RefDebugWorkload<DataType::QSymmS8>;
using RefDebugSigned32Workload = RefDebugWorkload<DataType::Signed32>;
} // namespace armnn
diff --git a/src/backends/reference/workloads/RefQuantizeWorkload.cpp b/src/backends/reference/workloads/RefQuantizeWorkload.cpp
index 31534abe3e..ab2ee7fc4e 100644
--- a/src/backends/reference/workloads/RefQuantizeWorkload.cpp
+++ b/src/backends/reference/workloads/RefQuantizeWorkload.cpp
@@ -50,7 +50,7 @@ void RefQuantizeWorkload::Execute() const
}
case DataType::QSymmS8:
{
- QuantizeImpl<int8_t>(input, output, m_NumElements, m_Scale, m_Offset);
+ QuantizeImpl<int8_t>(input, output, m_NumElements, m_Scale, 0);
break;
}
case DataType::QSymmS16: