aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/backends/backendsCommon/StringMapping.hpp16
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp13
-rwxr-xr-xsrc/backends/backendsCommon/test/LayerTests.cpp173
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp24
-rw-r--r--src/backends/cl/ClLayerSupport.cpp12
-rw-r--r--src/backends/cl/ClLayerSupport.hpp5
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp12
-rw-r--r--src/backends/neon/NeonLayerSupport.hpp5
-rw-r--r--src/backends/reference/RefLayerSupport.cpp15
-rw-r--r--src/backends/reference/RefLayerSupport.hpp5
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp2
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp8
-rw-r--r--src/backends/reference/workloads/ElementwiseFunction.cpp3
-rw-r--r--src/backends/reference/workloads/RefElementwiseWorkload.cpp13
-rw-r--r--src/backends/reference/workloads/RefElementwiseWorkload.hpp21
15 files changed, 307 insertions, 20 deletions
diff --git a/src/backends/backendsCommon/StringMapping.hpp b/src/backends/backendsCommon/StringMapping.hpp
index 8541195356..073a5a6833 100644
--- a/src/backends/backendsCommon/StringMapping.hpp
+++ b/src/backends/backendsCommon/StringMapping.hpp
@@ -19,11 +19,12 @@ public:
enum Id {
RefAdditionWorkload_Execute,
RefEqualWorkload_Execute,
- RefSubtractionWorkload_Execute,
- RefMaximumWorkload_Execute,
- RefMultiplicationWorkload_Execute,
RefDivisionWorkload_Execute,
+ RefGreaterWorkload_Execute,
+ RefMaximumWorkload_Execute,
RefMinimumWorkload_Execute,
+ RefMultiplicationWorkload_Execute,
+ RefSubtractionWorkload_Execute,
MAX_STRING_ID
};
@@ -38,12 +39,13 @@ private:
StringMapping()
{
m_Strings[RefAdditionWorkload_Execute] = "RefAdditionWorkload_Execute";
+ m_Strings[RefDivisionWorkload_Execute] = "RefDivisionWorkload_Execute";
m_Strings[RefEqualWorkload_Execute] = "RefEqualWorkload_Execute";
- m_Strings[RefSubtractionWorkload_Execute] = "RefSubtractionWorkload_Execute";
+ m_Strings[RefGreaterWorkload_Execute] = "RefGreaterWorkload_Execute";
m_Strings[RefMaximumWorkload_Execute] = "RefMaximumWorkload_Execute";
- m_Strings[RefMultiplicationWorkload_Execute] = "RefMultiplicationWorkload_Execute";
- m_Strings[RefDivisionWorkload_Execute] = "RefDivisionWorkload_Execute";
m_Strings[RefMinimumWorkload_Execute] = "RefMinimumWorkload_Execute";
+ m_Strings[RefMultiplicationWorkload_Execute] = "RefMultiplicationWorkload_Execute";
+ m_Strings[RefSubtractionWorkload_Execute] = "RefSubtractionWorkload_Execute";
}
StringMapping(const StringMapping &) = delete;
@@ -52,4 +54,4 @@ private:
const char * m_Strings[MAX_STRING_ID];
};
-} //namespace armnn \ No newline at end of file
+} //namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 67cee1cc0d..8847b4efbf 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1026,4 +1026,17 @@ void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
"second input");
}
+void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+ ValidateTwoInputs(workloadInfo, "GreaterQueueDescriptor");
+ ValidateSingleOutput(workloadInfo, "GreaterQueueDescriptor");
+
+ ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
+ workloadInfo.m_InputTensorInfos[1],
+ workloadInfo.m_OutputTensorInfos[0],
+ "GreaterQueueDescriptor",
+ "first input",
+ "second input");
+}
+
} //namespace armnn
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index 4dc49f97a2..43b0d33bdd 100755
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -1674,6 +1674,15 @@ std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
return workloadFactory.CreateEqual(descriptor, info);
}
+template<>
+std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
+ const armnn::IWorkloadFactory& workloadFactory,
+ const armnn::WorkloadInfo& info,
+ const armnn::GreaterQueueDescriptor& descriptor)
+{
+ return workloadFactory.CreateGreater(descriptor, info);
+}
+
namespace {
template <typename Descriptor, typename dataType>
LayerTestResult<dataType, 4> ElementwiseTestHelper
@@ -1897,6 +1906,170 @@ LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
0);
}
+LayerTestResult<float, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const unsigned int width = 2;
+ const unsigned int height = 2;
+ const unsigned int channelCount = 2;
+ const unsigned int batchSize = 2;
+
+ unsigned int shape[] = { batchSize, channelCount, height, width };
+
+ std::vector<float> input0({ 1, 1, 1, 1, 5, 5, 5, 5,
+ 3, 3, 3, 3, 4, 4, 4, 4 });
+
+ std::vector<float> input1({ 1, 1, 1, 1, 3, 3, 3, 3,
+ 5, 5, 5, 5, 4, 4, 4, 4 });
+
+ std::vector<float> output({ 0, 0, 0, 0, 1, 1, 1, 1,
+ 0, 0, 0, 0, 0, 0, 0, 0 });
+
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, float>
+ (workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output);
+}
+
+LayerTestResult<float, 4> GreaterBroadcast1ElementTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ unsigned int shape0[] = { 1, 2, 2, 2 };
+ std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
+
+ unsigned int shape1[] = { 1, 1, 1, 1 };
+ std::vector<float> input1({ 1 });
+
+ std::vector<float> output({ 0, 1, 1, 1, 1, 1, 1, 1});
+
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, float>
+ (workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
+}
+
+LayerTestResult<float, 4> GreaterBroadcast1DVectorTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+ std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12 });
+
+ std::vector<float> input1({ 1, 3, 2});
+
+ std::vector<float> output({ 0, 0, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1 });
+
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, float>
+ (workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
+}
+
+LayerTestResult<uint8_t, 4> GreaterUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ unsigned int shape[] = { 2, 2, 2, 2 };
+
+ // See dequantized values to the right.
+ std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
+ 3, 3, 3, 3, 5, 5, 5, 5 });
+
+ std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
+ 2, 2, 2, 2, 5, 5, 5, 5 });
+
+ std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 0, 0, 0, 0 });
+
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, uint8_t >
+ (workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output,
+ 1.0f,
+ 0);
+}
+
+LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+ std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12 });
+
+ std::vector<uint8_t> input1({ 1 });
+
+ std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1 });
+
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, uint8_t >
+ (workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
+}
+
+LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+ std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12 });
+
+ std::vector<uint8_t> input1({ 1, 1, 3});
+
+ std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1 });
+
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, uint8_t>
+ (workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
+}
+
LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 029418e850..146f8c4cfe 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -891,6 +891,30 @@ LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+LayerTestResult<float, 4> GreaterSimpleTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> GreaterBroadcast1ElementTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> GreaterBroadcast1DVectorTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> GreaterUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
LayerTestResult<float, 2> FullyConnectedLargeTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 82b46476ea..d02dfb9938 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -303,6 +303,18 @@ bool ClLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
descriptor);
}
+bool ClLayerSupport::IsGreaterSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ ignore_unused(input0);
+ ignore_unused(input1);
+ ignore_unused(output);
+ ignore_unused(reasonIfUnsupported);
+ return false;
+}
+
bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported) const
{
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index 82efd0016a..d040a54507 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -86,6 +86,11 @@ public:
const FullyConnectedDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsGreaterSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& ouput,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 0033b86917..869fd03251 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -261,6 +261,18 @@ bool NeonLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
descriptor);
}
+bool NeonLayerSupport::IsGreaterSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ ignore_unused(input0);
+ ignore_unused(input1);
+ ignore_unused(output);
+ ignore_unused(reasonIfUnsupported);
+ return false;
+}
+
bool NeonLayerSupport::IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported) const
{
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 5724ed85df..43d0bd9a81 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -81,6 +81,11 @@ public:
const FullyConnectedDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsGreaterSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& ouput,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 2952ae1a80..a64339ec69 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -257,6 +257,21 @@ bool RefLayerSupport::IsFullyConnectedSupported(const TensorInfo& input,
&TrueFunc<>);
}
+bool RefLayerSupport::IsGreaterSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ ignore_unused(input0);
+ ignore_unused(input1);
+ ignore_unused(output);
+ ignore_unused(reasonIfUnsupported);
+ return IsSupportedForDataTypeRef(reasonIfUnsupported,
+ input0.GetDataType(),
+ &TrueFunc<>,
+ &TrueFunc<>);
+}
+
bool RefLayerSupport::IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported) const
{
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 399f7b5699..3941f4bc56 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -91,6 +91,11 @@ public:
const FullyConnectedDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsGreaterSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& ouput,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsInputSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 8173bbb952..eb8807eef6 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -303,7 +303,7 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateStridedSlice(const StridedS
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkload<RefGreaterFloat32Workload, RefGreaterUint8Workload>(descriptor, info);
}
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index eda58a99b1..6e7da13831 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -241,6 +241,14 @@ ARMNN_AUTO_TEST_CASE(EqualUint8, EqualUint8Test)
ARMNN_AUTO_TEST_CASE(EqualBroadcast1ElementUint8, EqualBroadcast1ElementUint8Test)
ARMNN_AUTO_TEST_CASE(EqualBroadcast1DVectorUint8, EqualBroadcast1DVectorUint8Test)
+// Greater
+ARMNN_AUTO_TEST_CASE(SimpleGreater, GreaterSimpleTest)
+ARMNN_AUTO_TEST_CASE(GreaterBroadcast1Element, GreaterBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE(GreaterBroadcast1DVector, GreaterBroadcast1DVectorTest)
+ARMNN_AUTO_TEST_CASE(GreaterUint8, GreaterUint8Test)
+ARMNN_AUTO_TEST_CASE(GreaterBroadcast1ElementUint8, GreaterBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE(GreaterBroadcast1DVectorUint8, GreaterBroadcast1DVectorUint8Test)
+
// Max
ARMNN_AUTO_TEST_CASE(SimpleMaximum, MaximumSimpleTest)
ARMNN_AUTO_TEST_CASE(MaximumBroadcast1Element, MaximumBroadcast1ElementTest)
diff --git a/src/backends/reference/workloads/ElementwiseFunction.cpp b/src/backends/reference/workloads/ElementwiseFunction.cpp
index 18ceade113..cb8aa7089c 100644
--- a/src/backends/reference/workloads/ElementwiseFunction.cpp
+++ b/src/backends/reference/workloads/ElementwiseFunction.cpp
@@ -32,4 +32,5 @@ template struct armnn::ElementwiseFunction<std::multiplies<float>>;
template struct armnn::ElementwiseFunction<std::divides<float>>;
template struct armnn::ElementwiseFunction<armnn::maximum<float>>;
template struct armnn::ElementwiseFunction<armnn::minimum<float>>;
-template struct armnn::ElementwiseFunction<std::equal_to<float>>; \ No newline at end of file
+template struct armnn::ElementwiseFunction<std::equal_to<float>>;
+template struct armnn::ElementwiseFunction<std::greater<float>>;
diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.cpp b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
index d00bfd01b4..13d6e70a96 100644
--- a/src/backends/reference/workloads/RefElementwiseWorkload.cpp
+++ b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
@@ -45,11 +45,11 @@ void BaseUint8ElementwiseWorkload<ParentDescriptor, Functor>::ExecuteImpl(const
std::vector<float> results(outputInfo.GetNumElements());
ElementwiseFunction<Functor>(inputInfo0.GetShape(),
- inputInfo1.GetShape(),
- outputInfo.GetShape(),
- dequant0.data(),
- dequant1.data(),
- results.data());
+ inputInfo1.GetShape(),
+ outputInfo.GetShape(),
+ dequant0.data(),
+ dequant1.data(),
+ results.data());
Quantize(GetOutputTensorDataU8(0, data), results.data(), outputInfo);
}
@@ -76,3 +76,6 @@ template class armnn::BaseUint8ElementwiseWorkload<armnn::MinimumQueueDescriptor
template class armnn::BaseFloat32ElementwiseWorkload<armnn::EqualQueueDescriptor, std::equal_to<float>>;
template class armnn::BaseUint8ElementwiseWorkload<armnn::EqualQueueDescriptor, std::equal_to<float>>;
+
+template class armnn::BaseFloat32ElementwiseWorkload<armnn::GreaterQueueDescriptor, std::greater<float>>;
+template class armnn::BaseUint8ElementwiseWorkload<armnn::GreaterQueueDescriptor, std::greater<float>>;
diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.hpp b/src/backends/reference/workloads/RefElementwiseWorkload.hpp
index c2855b0550..1b3200f85c 100644
--- a/src/backends/reference/workloads/RefElementwiseWorkload.hpp
+++ b/src/backends/reference/workloads/RefElementwiseWorkload.hpp
@@ -12,8 +12,6 @@
#include "Maximum.hpp"
#include "Minimum.hpp"
-
-
namespace armnn
{
@@ -86,7 +84,6 @@ using RefAdditionUint8Workload =
AdditionQueueDescriptor,
StringMapping::RefAdditionWorkload_Execute>;
-
using RefSubtractionFloat32Workload =
RefElementwiseWorkload<std::minus<float>,
DataType::Float32,
@@ -132,9 +129,9 @@ using RefMaximumFloat32Workload =
using RefMaximumUint8Workload =
RefElementwiseWorkload<armnn::maximum<float>,
- DataType::QuantisedAsymm8,
- MaximumQueueDescriptor,
- StringMapping::RefMaximumWorkload_Execute>;
+ DataType::QuantisedAsymm8,
+ MaximumQueueDescriptor,
+ StringMapping::RefMaximumWorkload_Execute>;
using RefMinimumFloat32Workload =
RefElementwiseWorkload<minimum<float>,
@@ -159,4 +156,16 @@ using RefEqualUint8Workload =
DataType::QuantisedAsymm8,
EqualQueueDescriptor,
StringMapping::RefEqualWorkload_Execute>;
+
+using RefGreaterFloat32Workload =
+ RefElementwiseWorkload<std::greater<float>,
+ DataType::Float32,
+ GreaterQueueDescriptor,
+ StringMapping::RefGreaterWorkload_Execute>;
+
+using RefGreaterUint8Workload =
+ RefElementwiseWorkload<std::greater<float>,
+ DataType::QuantisedAsymm8,
+ GreaterQueueDescriptor,
+ StringMapping::RefGreaterWorkload_Execute>;
} // armnn