aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorSadik Armagan <sadik.armagan@arm.com>2020-03-19 13:53:16 +0000
committerJim Flynn <jim.flynn@arm.com>2020-03-19 21:47:24 +0000
commitb60dd243d3d8131f246f2b122309b998287151d2 (patch)
treeaf5e84f753236078006c32957a7d35f4ac708563 /src
parenta0687eef149fbf57bb6db0621ec65724f550b1ed (diff)
downloadarmnn-b60dd243d3d8131f246f2b122309b998287151d2.tar.gz
IVGCVSW-4565 TENSOR_BOOL8 data type not supported in AndroidNN Driver
* Enabled Boolean and Int32 data types in Reference Comparison inputs * Added decoder for Boolean data type * Refactored ClGreaterWorkload to work with any data type * Refactored NeonGreaterWorkload to work with any data type !android-nn-driver:2902 Signed-off-by: Sadik Armagan <sadik.armagan@arm.com> Change-Id: I92772810b744b388831c9dca0119ebf8cb7a447e
Diffstat (limited to 'src')
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp3
-rw-r--r--src/backends/cl/workloads/ClGreaterWorkload.cpp12
-rw-r--r--src/backends/cl/workloads/ClGreaterWorkload.hpp7
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp3
-rw-r--r--src/backends/neon/workloads/NeonGreaterWorkload.cpp11
-rw-r--r--src/backends/neon/workloads/NeonGreaterWorkload.hpp8
-rw-r--r--src/backends/reference/RefLayerSupport.cpp7
-rw-r--r--src/backends/reference/workloads/BaseIterator.hpp16
-rw-r--r--src/backends/reference/workloads/Decoders.hpp4
9 files changed, 34 insertions, 37 deletions
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index e7e4fa7e1b..4f707beebe 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -178,8 +178,7 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateComparison(const ComparisonQ
GreaterQueueDescriptor greaterQueueDescriptor;
greaterQueueDescriptor.m_Inputs = descriptor.m_Inputs;
greaterQueueDescriptor.m_Outputs = descriptor.m_Outputs;
-
- return MakeWorkload<ClGreaterFloat32Workload, ClGreaterUint8Workload>(greaterQueueDescriptor, info);
+ return MakeWorkload<ClGreaterWorkload>(greaterQueueDescriptor, info);
}
return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
}
diff --git a/src/backends/cl/workloads/ClGreaterWorkload.cpp b/src/backends/cl/workloads/ClGreaterWorkload.cpp
index b086122bdc..2051cc3aa3 100644
--- a/src/backends/cl/workloads/ClGreaterWorkload.cpp
+++ b/src/backends/cl/workloads/ClGreaterWorkload.cpp
@@ -38,10 +38,8 @@ arm_compute::Status ClGreaterWorkloadValidate(const TensorInfo& input0,
return aclStatus;
}
-template<DataType T>
-ClGreaterWorkload<T>::ClGreaterWorkload(const GreaterQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : MultiTypedWorkload<GreaterQueueDescriptor, T, DataType::Boolean>(descriptor, info)
+ClGreaterWorkload::ClGreaterWorkload(const GreaterQueueDescriptor& descriptor, const WorkloadInfo& info)
+ : BaseWorkload<GreaterQueueDescriptor>(descriptor, info)
{
m_Data.ValidateInputsOutputs("ClGreaterWorkload", 2, 1);
@@ -52,14 +50,10 @@ ClGreaterWorkload<T>::ClGreaterWorkload(const GreaterQueueDescriptor& descriptor
m_GreaterLayer.configure(&input0, &input1, &output, arm_compute::ComparisonOperation::Greater);
}
-template<DataType T>
-void ClGreaterWorkload<T>::Execute() const
+void ClGreaterWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_CL("ClGreaterWorkload_Execute");
RunClFunction(m_GreaterLayer, CHECK_LOCATION());
}
-template class ClGreaterWorkload<DataType::Float32>;
-template class ClGreaterWorkload<DataType::QAsymmU8>;
-
} //namespace armnn
diff --git a/src/backends/cl/workloads/ClGreaterWorkload.hpp b/src/backends/cl/workloads/ClGreaterWorkload.hpp
index 862e168334..9b2a1710bc 100644
--- a/src/backends/cl/workloads/ClGreaterWorkload.hpp
+++ b/src/backends/cl/workloads/ClGreaterWorkload.hpp
@@ -16,19 +16,14 @@ arm_compute::Status ClGreaterWorkloadValidate(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output);
-template<DataType T>
-class ClGreaterWorkload : public MultiTypedWorkload<GreaterQueueDescriptor, T, DataType::Boolean>
+class ClGreaterWorkload : public BaseWorkload<GreaterQueueDescriptor>
{
public:
ClGreaterWorkload(const GreaterQueueDescriptor& descriptor, const WorkloadInfo& info);
void Execute() const override;
private:
- using MultiTypedWorkload<GreaterQueueDescriptor, T, DataType::Boolean>::m_Data;
mutable arm_compute::CLComparison m_GreaterLayer;
};
-using ClGreaterFloat32Workload = ClGreaterWorkload<DataType::Float32>;
-using ClGreaterUint8Workload = ClGreaterWorkload<DataType::QAsymmU8>;
-
} //namespace armnn
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index cf9999f5d2..cc7dca031d 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -144,8 +144,7 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateComparison(const Compariso
GreaterQueueDescriptor greaterQueueDescriptor;
greaterQueueDescriptor.m_Inputs = descriptor.m_Inputs;
greaterQueueDescriptor.m_Outputs = descriptor.m_Outputs;
-
- return MakeWorkloadHelper<NeonGreaterFloat32Workload, NeonGreaterUint8Workload>(greaterQueueDescriptor, info);
+ return std::make_unique<NeonGreaterWorkload>(greaterQueueDescriptor, info);
}
return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
}
diff --git a/src/backends/neon/workloads/NeonGreaterWorkload.cpp b/src/backends/neon/workloads/NeonGreaterWorkload.cpp
index 6380dfada5..1ec5ac4be0 100644
--- a/src/backends/neon/workloads/NeonGreaterWorkload.cpp
+++ b/src/backends/neon/workloads/NeonGreaterWorkload.cpp
@@ -23,9 +23,8 @@ arm_compute::Status NeonGreaterWorkloadValidate(const TensorInfo& input0,
&aclOutput);
}
-template <DataType T>
-NeonGreaterWorkload<T>::NeonGreaterWorkload(const GreaterQueueDescriptor& descriptor, const WorkloadInfo& info)
- : MultiTypedWorkload<GreaterQueueDescriptor, T, DataType::Boolean>(descriptor, info)
+NeonGreaterWorkload::NeonGreaterWorkload(const GreaterQueueDescriptor& descriptor, const WorkloadInfo& info)
+ : BaseWorkload<GreaterQueueDescriptor>(descriptor, info)
{
m_Data.ValidateInputsOutputs("NeonGreaterWorkload", 2, 1);
@@ -36,14 +35,10 @@ NeonGreaterWorkload<T>::NeonGreaterWorkload(const GreaterQueueDescriptor& descri
m_GreaterLayer.configure(&input0, &input1, &output);
}
-template <DataType T>
-void NeonGreaterWorkload<T>::Execute() const
+void NeonGreaterWorkload::Execute() const
{
ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonGreaterWorkload_Execute");
m_GreaterLayer.run();
}
-template class NeonGreaterWorkload<DataType::Float32>;
-template class NeonGreaterWorkload<DataType::QAsymmU8>;
-
} //namespace armnn \ No newline at end of file
diff --git a/src/backends/neon/workloads/NeonGreaterWorkload.hpp b/src/backends/neon/workloads/NeonGreaterWorkload.hpp
index bcab27e7a6..503e60e784 100644
--- a/src/backends/neon/workloads/NeonGreaterWorkload.hpp
+++ b/src/backends/neon/workloads/NeonGreaterWorkload.hpp
@@ -16,12 +16,9 @@ arm_compute::Status NeonGreaterWorkloadValidate(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output);
-template <DataType T>
-class NeonGreaterWorkload : public MultiTypedWorkload<GreaterQueueDescriptor, T, DataType::Boolean>
+class NeonGreaterWorkload : public BaseWorkload<GreaterQueueDescriptor>
{
public:
- using MultiTypedWorkload<GreaterQueueDescriptor, T, DataType::Boolean>::m_Data;
-
NeonGreaterWorkload(const GreaterQueueDescriptor& descriptor, const WorkloadInfo& info);
virtual void Execute() const override;
@@ -30,7 +27,4 @@ private:
mutable arm_compute::NEGreater m_GreaterLayer;
};
-using NeonGreaterFloat32Workload = NeonGreaterWorkload<DataType::Float32>;
-using NeonGreaterUint8Workload = NeonGreaterWorkload<DataType::QAsymmU8>;
-
} //namespace armnn \ No newline at end of file
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 9f22b9ef0e..7d3600cf09 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -302,14 +302,15 @@ bool RefLayerSupport::IsComparisonSupported(const TensorInfo& input0,
Optional<std::string&> reasonIfUnsupported) const
{
IgnoreUnused(descriptor);
-
- std::array<DataType, 5> supportedInputTypes =
+ std::array<DataType, 7> supportedInputTypes =
{
+ DataType::Boolean,
DataType::BFloat16,
DataType::Float32,
DataType::Float16,
DataType::QAsymmU8,
- DataType::QSymmS16
+ DataType::QSymmS16,
+ DataType::Signed32
};
bool supported = true;
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index 5cae5bda83..f43e8b67a9 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -275,6 +275,22 @@ public:
}
};
+class BooleanDecoder : public TypedIterator<const uint8_t, Decoder<float>>
+{
+public:
+ BooleanDecoder(const uint8_t* data)
+ : TypedIterator(data) {}
+
+ BooleanDecoder()
+ : BooleanDecoder(nullptr) {}
+
+ float Get() const override
+ {
+ return *m_Iterator;
+ }
+
+};
+
class QASymm8Encoder : public TypedIterator<uint8_t, Encoder<float>>
{
public:
diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp
index 83c57c1169..3434ccb764 100644
--- a/src/backends/reference/workloads/Decoders.hpp
+++ b/src/backends/reference/workloads/Decoders.hpp
@@ -136,6 +136,10 @@ inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const
info.GetQuantizationOffset());
}
}
+ case armnn::DataType::Boolean:
+ {
+ return std::make_unique<BooleanDecoder>(static_cast<const uint8_t*>(data));
+ }
default:
{
BOOST_ASSERT_MSG(false, "Unsupported Data Type!");