aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon
diff options
context:
space:
mode:
authorKeith Davis <keith.davis@arm.com>2020-01-27 15:24:59 +0000
committerKeith Davis Arm <keith.davis@arm.com>2020-01-28 17:32:52 +0000
commit5204aa8fd1da75ccc052269f358178b54bc7792f (patch)
treed3c2b354855dab0d2da5845fd46b3950fcc75841 /src/backends/backendsCommon
parentd305e1a203077bdbf2e3955abd252904127675a4 (diff)
downloadarmnn-5204aa8fd1da75ccc052269f358178b54bc7792f.tar.gz
IVGCVSW-4306 Verify the tflite Yolo v3
* Added debug layer support for QSymmS8 * QSymmS8 support for workloads Signed-off-by: Keith Davis <keith.davis@arm.com> Change-Id: I51af92fadc0be290629dd9198beab5abef9e351f
Diffstat (limited to 'src/backends/backendsCommon')
-rw-r--r--src/backends/backendsCommon/MakeWorkloadHelper.hpp8
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp13
2 files changed, 16 insertions, 5 deletions
diff --git a/src/backends/backendsCommon/MakeWorkloadHelper.hpp b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
index 75db73c32f..f876c6b781 100644
--- a/src/backends/backendsCommon/MakeWorkloadHelper.hpp
+++ b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
@@ -41,7 +41,7 @@ struct MakeWorkloadForType<NullWorkload>
// Makes a workload for one the specified types based on the data type requirements of the tensorinfo.
// Specify type void as the WorkloadType for unsupported DataType/WorkloadType combos.
template <typename Float16Workload, typename Float32Workload, typename Uint8Workload, typename Int32Workload,
- typename BooleanWorkload, typename QueueDescriptorType, typename... Args>
+ typename BooleanWorkload, typename Int8Workload, typename QueueDescriptorType, typename... Args>
std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descriptor,
const WorkloadInfo& info,
Args&&... args)
@@ -58,6 +58,8 @@ std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descrip
return MakeWorkloadForType<Float32Workload>::Func(descriptor, info, std::forward<Args>(args)...);
case DataType::QAsymmU8:
return MakeWorkloadForType<Uint8Workload>::Func(descriptor, info, std::forward<Args>(args)...);
+ case DataType::QSymmS8:
+ return MakeWorkloadForType<Int8Workload>::Func(descriptor, info, std::forward<Args>(args)...);
case DataType::Signed32:
return MakeWorkloadForType<Int32Workload>::Func(descriptor, info, std::forward<Args>(args)...);
case DataType::Boolean:
@@ -72,14 +74,14 @@ std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descrip
// Makes a workload for one the specified types based on the data type requirements of the tensorinfo.
// Calling this method is the equivalent of calling the five typed MakeWorkload method with <FloatWorkload,
-// FloatWorkload, Uint8Workload, NullWorkload, NullWorkload>.
+// FloatWorkload, Uint8Workload, NullWorkload, NullWorkload, NullWorkload>.
// Specify type void as the WorkloadType for unsupported DataType/WorkloadType combos.
template <typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descriptor,
const WorkloadInfo& info,
Args&&... args)
{
- return MakeWorkloadHelper<FloatWorkload, FloatWorkload, Uint8Workload, NullWorkload, NullWorkload>(
+ return MakeWorkloadHelper<FloatWorkload, FloatWorkload, Uint8Workload, NullWorkload, NullWorkload, NullWorkload>(
descriptor,
info,
std::forward<Args>(args)...);
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 075884b2da..5057c8c4df 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -32,6 +32,8 @@ DataType GetBiasDataType(DataType inputDataType)
return DataType::Float32;
case DataType::QAsymmU8:
return DataType::Signed32;
+ case DataType::QSymmS8:
+ return DataType::Signed32;
case DataType::QSymmS16:
return DataType::Signed32;
default:
@@ -418,8 +420,8 @@ void ValidatePerAxisQuantization(const TensorInfo& inputInfo,
const DataType inputDataType = inputInfo.GetDataType();
const DataType outputDataType = outputInfo.GetDataType();
- const bool canHavePerAxisQuantization =
- inputDataType == DataType::QAsymmU8 && inputDataType == outputDataType;
+ const bool canHavePerAxisQuantization = (inputDataType == DataType::QSymmS8 ||
+ inputDataType == DataType::QAsymmU8) && inputDataType == outputDataType;
if (!canHavePerAxisQuantization)
{
@@ -1038,6 +1040,7 @@ void AdditionQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::Float32,
DataType::QAsymmU8,
DataType::QSymmS16,
+ DataType::QSymmS8,
DataType::Float16
};
@@ -1071,6 +1074,7 @@ void MultiplicationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) c
{
DataType::Float32,
DataType::QAsymmU8,
+ DataType::QSymmS8,
DataType::QSymmS16,
DataType::Float16
};
@@ -1178,6 +1182,7 @@ void Convolution2dQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) co
DataType::Float32,
DataType::QAsymmU8,
DataType::QSymmS16,
+ DataType::QSymmS8,
DataType::Float16
};
@@ -1377,6 +1382,7 @@ void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::Float16,
DataType::Float32,
DataType::QAsymmU8,
+ DataType::QSymmS8,
DataType::QSymmS16
};
@@ -1529,6 +1535,7 @@ void ConstantQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::Float16,
DataType::Signed32,
DataType::QAsymmU8,
+ DataType::QSymmS8,
DataType::QSymmS16
};
@@ -1554,6 +1561,7 @@ void ReshapeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::Float16,
DataType::Signed32,
DataType::QAsymmU8,
+ DataType::QSymmS8,
DataType::QSymmS16
};
@@ -2098,6 +2106,7 @@ void MaximumQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
DataType::Float32,
DataType::Signed32,
DataType::QAsymmU8,
+ DataType::QSymmS8,
DataType::QSymmS16
};