aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorÉanna Ó Catháin <eanna.ocathain@arm.com>2018-12-04 10:29:06 +0000
committerLes Bell <les.bell@arm.com>2018-12-04 11:59:51 +0000
commit20e58806b94636f579c5e8b0ca91ab771b6310e6 (patch)
tree1537942c0a2d2c94cfc98d3ddaebf44d4d537f99
parent975c09aab8e628b8052226d7a2e2ed2b76aa6702 (diff)
downloadarmnn-20e58806b94636f579c5e8b0ca91ab771b6310e6.tar.gz
IVGCVSW-2247 Adding a min Elementwise Workload and tests
Change-Id: I017ca6c23b62a8978982de0ca4ad204cb8cf7c67
-rw-r--r--src/armnn/Network.cpp10
-rw-r--r--src/backends/backendsCommon/StringMapping.hpp2
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp1
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.hpp6
-rwxr-xr-xsrc/backends/backendsCommon/test/LayerTests.cpp83
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp12
-rw-r--r--src/backends/reference/RefLayerSupport.cpp13
-rw-r--r--src/backends/reference/RefLayerSupport.hpp6
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp12
-rw-r--r--src/backends/reference/test/RefEndToEndTests.cpp66
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp7
-rw-r--r--src/backends/reference/workloads/CMakeLists.txt1
-rw-r--r--src/backends/reference/workloads/ElementwiseFunction.cpp4
-rw-r--r--src/backends/reference/workloads/Minimum.hpp22
-rw-r--r--src/backends/reference/workloads/RefElementwiseWorkload.cpp3
-rw-r--r--src/backends/reference/workloads/RefElementwiseWorkload.hpp14
16 files changed, 244 insertions, 18 deletions
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index bed6400cf7..2cb3edcf3f 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -512,6 +512,11 @@ IConnectableLayer* Network::AddMaximumLayer(const char* name)
return m_Graph->AddLayer<MaximumLayer>(name);
}
+IConnectableLayer* Network::AddMinimumLayer(const char* name)
+{
+ return m_Graph->AddLayer<MinimumLayer>(name);
+}
+
IConnectableLayer* Network::AddMergerLayer(const OriginsDescriptor& mergerDescriptor,
const char* name)
{
@@ -705,11 +710,6 @@ IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& s
return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
}
-IConnectableLayer* Network::AddMinimumLayer(const char* name)
-{
- return m_Graph->AddLayer<MinimumLayer>(name);
-}
-
OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
: m_Graph(std::move(graph))
{
diff --git a/src/backends/backendsCommon/StringMapping.hpp b/src/backends/backendsCommon/StringMapping.hpp
index 8fca3d3f40..aa7fb6df61 100644
--- a/src/backends/backendsCommon/StringMapping.hpp
+++ b/src/backends/backendsCommon/StringMapping.hpp
@@ -22,6 +22,7 @@ public:
RefMaximumWorkload_Execute,
RefMultiplicationWorkload_Execute,
RefDivisionWorkload_Execute,
+ RefMinimumWorkload_Execute,
MAX_STRING_ID
};
@@ -40,6 +41,7 @@ private:
m_Strings[RefMaximumWorkload_Execute] = "RefMaximumWorkload_Execute";
m_Strings[RefMultiplicationWorkload_Execute] = "RefMultiplicationWorkload_Execute";
m_Strings[RefDivisionWorkload_Execute] = "RefDivisionWorkload_Execute";
+ m_Strings[RefMinimumWorkload_Execute] = "RefMinimumWorkload_Execute";
}
StringMapping(const StringMapping &) = delete;
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index ee1a054c41..b0c1e6a857 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -351,6 +351,7 @@ struct StridedSliceQueueDescriptor : QueueDescriptorWithParameters<StridedSliceD
void Validate(const WorkloadInfo& workloadInfo) const;
};
+// Minimum layer workload data.
struct MinimumQueueDescriptor : QueueDescriptor
{
void Validate(const WorkloadInfo& workloadInfo) const;
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index 2c3c8aa091..eb24b64dd7 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -139,15 +139,15 @@ public:
virtual std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& descriptor,
const WorkloadInfo& Info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const = 0;
+
virtual std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& descriptor,
const WorkloadInfo& Info) const = 0;
virtual std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
const WorkloadInfo& Info) const = 0;
- virtual std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
-
virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const = 0;
};
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index c3822bd7a6..131b84c859 100755
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -1655,6 +1655,15 @@ std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
return workloadFactory.CreateMaximum(descriptor, info);
}
+template<>
+std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MinimumQueueDescriptor>(
+ const armnn::IWorkloadFactory& workloadFactory,
+ const armnn::WorkloadInfo& info,
+ const armnn::MinimumQueueDescriptor& descriptor)
+{
+ return workloadFactory.CreateMinimum(descriptor, info);
+}
+
namespace {
template <typename Descriptor, typename dataType>
LayerTestResult<dataType, 4> ElementwiseTestHelper
@@ -1866,7 +1875,7 @@ LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
7, 10, 9, 10, 11, 12 });
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t >
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t>
(workloadFactory,
memoryManager,
shape0,
@@ -1879,6 +1888,78 @@ LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
0);
}
+LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ unsigned int shape0[] = { 1, 2, 2, 2 };
+ std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
+
+ unsigned int shape1[] = { 1, 1, 1, 1 };
+ std::vector<float> input1({ 2 });
+
+ std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
+
+ return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, float>(workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
+}
+
+
+LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ unsigned int shape0[] = { 1, 2, 2, 2 };
+ std::vector<float> input0({ 1, 6, 3, 2, 8, 9, 1, 10});
+
+ unsigned int shape1[] = { 1, 1, 1, 1 };
+ std::vector<float> input1({ 5 });
+
+ std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
+
+ return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, float>(workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
+}
+
+LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
+ armnn::IWorkloadFactory & workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager)
+{
+ const unsigned int shape0[] = { 1, 2, 2, 3 };
+ const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+ std::vector<uint8_t> input0({ 1, 2, 3, 3, 2, 1,
+ 7, 1, 2, 3, 4, 5 });
+
+ std::vector<uint8_t> input1({ 1, 2, 3});
+
+ std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
+ 1, 1, 2, 1, 2, 3 });
+
+ return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, uint8_t>(workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
+}
+
namespace {
LayerTestResult<float,4> MultiplicationTestHelper(
armnn::IWorkloadFactory& workloadFactory,
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index fa1c864ba5..1797f9fa5e 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -1008,6 +1008,18 @@ LayerTestResult<float, 3> MeanVtsFloat3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> MinimumBroadcast1ElementTest2(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
+ armnn::IWorkloadFactory & workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager);
+
LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 7222af6402..fffea587a0 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -347,6 +347,19 @@ bool RefLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inp
&TrueFunc<>);
}
+bool RefLayerSupport::IsMinimumSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ ignore_unused(input1);
+ ignore_unused(output);
+ return IsSupportedForDataTypeRef(reasonIfUnsupported,
+ input0.GetDataType(),
+ &TrueFunc<>,
+ &TrueFunc<>);
+}
+
bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 73e5394fc7..0d34c08bac 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -121,7 +121,6 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsMeanSupported(const TensorInfo& input,
const TensorInfo& output,
const MeanDescriptor& descriptor,
@@ -132,6 +131,11 @@ public:
const OriginsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsMinimumSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsMultiplicationSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index d32e1833c2..43651cf790 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -270,6 +270,12 @@ std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMean(
return MakeWorkload<RefMeanFloat32Workload, RefMeanUint8Workload>(descriptor, info);
}
+std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMinimum(
+ const MinimumQueueDescriptor& descriptor, const WorkloadInfo& info) const
+{
+ return MakeWorkload<RefMinimumFloat32Workload, RefMinimumUint8Workload>(descriptor, info);
+}
+
std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
@@ -288,12 +294,6 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateStridedSlice(const StridedS
return MakeWorkload<RefStridedSliceFloat32Workload, RefStridedSliceUint8Workload>(descriptor, info);
}
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
-}
-
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 97bec51645..8ad6f5a4d1 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -7,6 +7,7 @@
#include <backendsCommon/test/MergerTestImpl.hpp>
#include <boost/test/unit_test.hpp>
+#include <boost/test/execution_monitor.hpp>
BOOST_AUTO_TEST_SUITE(RefEndToEnd)
@@ -246,6 +247,71 @@ BOOST_AUTO_TEST_CASE(MultipleOutputs)
BOOST_TEST(output3Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 5.f, 2.f, 2.f, 2.f, 3.f, 3.f })); // [2, 5]
}
+BOOST_AUTO_TEST_CASE(TrivialMin)
+{
+ using namespace armnn;
+
+ // Create runtime in which test will run
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ // Builds up the structure of the network.
+ armnn::INetworkPtr net(INetwork::Create());
+
+ IConnectableLayer* input1 = net->AddInputLayer(0);
+ IConnectableLayer* input2 = net->AddInputLayer(1);
+ IConnectableLayer* min = net->AddMinimumLayer();
+ IConnectableLayer* output = net->AddOutputLayer(0);
+
+ input1->GetOutputSlot(0).Connect(min->GetInputSlot(0));
+ input2->GetOutputSlot(0).Connect(min->GetInputSlot(1));
+ min->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ // Sets the tensors in the network.
+ TensorInfo tensorInfo(TensorShape({1, 1, 1, 4}), DataType::Float32);
+ input1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+ input2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+ min->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ // optimize the network
+ IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
+
+ // Loads it into the runtime.
+ NetworkId netId;
+ runtime->LoadNetwork(netId, std::move(optNet));
+
+ // Creates structures for input & output - matching android nn test.
+ std::vector<float> input1Data
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f
+ };
+ std::vector<float> input2Data
+ {
+ 2.0f, 1.0f, 5.0f, 2.0f
+ };
+ std::vector<float> outputData(4);
+
+ InputTensors inputTensors
+ {
+ {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())},
+ {1,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input2Data.data())}
+ };
+ OutputTensors outputTensors
+ {
+ {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
+ };
+
+ // Does the inference.
+ runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+
+ // Checks the results
+ BOOST_TEST(outputData[0] == 1);
+ BOOST_TEST(outputData[1] == 1);
+ BOOST_TEST(outputData[2] == 3);
+ BOOST_TEST(outputData[3] == 2);
+}
+
+
BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Test)
{
MergerDim0EndToEnd<float>(defaultBackends);
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 54ec697ec2..fa4af96c46 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -233,7 +233,7 @@ ARMNN_AUTO_TEST_CASE(DivisionUint8, DivisionUint8Test)
ARMNN_AUTO_TEST_CASE(DivisionUint8Broadcast1Element, DivisionBroadcast1ElementUint8Test)
ARMNN_AUTO_TEST_CASE(DivisionUint8Broadcast1DVector, DivisionBroadcast1DVectorUint8Test)
-//Max
+// Max
ARMNN_AUTO_TEST_CASE(SimpleMaximum, MaximumSimpleTest)
ARMNN_AUTO_TEST_CASE(MaximumBroadcast1Element, MaximumBroadcast1ElementTest)
ARMNN_AUTO_TEST_CASE(MaximumBroadcast1DVector, MaximumBroadcast1DVectorTest)
@@ -241,6 +241,11 @@ ARMNN_AUTO_TEST_CASE(MaximumUint8, MaximumUint8Test)
ARMNN_AUTO_TEST_CASE(MaximumBroadcast1ElementUint8, MaximumBroadcast1ElementUint8Test)
ARMNN_AUTO_TEST_CASE(MaximumBroadcast1DVectorUint8, MaximumBroadcast1DVectorUint8Test)
+// Min
+ARMNN_AUTO_TEST_CASE(SimpleMinimum1, MinimumBroadcast1ElementTest1)
+ARMNN_AUTO_TEST_CASE(SimpleMinimum2, MinimumBroadcast1ElementTest2)
+ARMNN_AUTO_TEST_CASE(Minimum1DVectorUint8, MinimumBroadcast1DVectorUint8Test)
+
// Mul
ARMNN_AUTO_TEST_CASE(SimpleMultiplication, MultiplicationTest)
ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1Element, MultiplicationBroadcast1ElementTest)
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index b9c150f6d4..7028f18e2d 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -19,6 +19,7 @@ list(APPEND armnnRefBackendWorkloads_sources
FullyConnected.hpp
Maximum.hpp
Merger.hpp
+ Minimum.hpp
Pad.cpp
Pad.hpp
Pooling2d.cpp
diff --git a/src/backends/reference/workloads/ElementwiseFunction.cpp b/src/backends/reference/workloads/ElementwiseFunction.cpp
index bb15049faa..88d51908fe 100644
--- a/src/backends/reference/workloads/ElementwiseFunction.cpp
+++ b/src/backends/reference/workloads/ElementwiseFunction.cpp
@@ -6,6 +6,7 @@
#include "ElementwiseFunction.hpp"
#include "Broadcast.hpp"
#include <functional>
+#include "Minimum.hpp"
#include "Maximum.hpp"
@@ -29,4 +30,5 @@ template struct armnn::ElementwiseFunction<std::plus<float>>;
template struct armnn::ElementwiseFunction<std::minus<float>>;
template struct armnn::ElementwiseFunction<std::multiplies<float>>;
template struct armnn::ElementwiseFunction<std::divides<float>>;
-template struct armnn::ElementwiseFunction<armnn::maximum<float>>; \ No newline at end of file
+template struct armnn::ElementwiseFunction<armnn::maximum<float>>;
+template struct armnn::ElementwiseFunction<armnn::minimum<float>>; \ No newline at end of file
diff --git a/src/backends/reference/workloads/Minimum.hpp b/src/backends/reference/workloads/Minimum.hpp
new file mode 100644
index 0000000000..2f3bdc1c02
--- /dev/null
+++ b/src/backends/reference/workloads/Minimum.hpp
@@ -0,0 +1,22 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+namespace armnn
+{
+
+template<typename T>
+struct minimum
+{
+ T
+ operator()(const T& input1, const T& input2) const
+ {
+ return std::min(input1, input2);
+ }
+};
+
+} //namespace armnn
+
diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.cpp b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
index 60a1b990f7..a18c7c569e 100644
--- a/src/backends/reference/workloads/RefElementwiseWorkload.cpp
+++ b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
@@ -70,3 +70,6 @@ template class armnn::BaseUint8ElementwiseWorkload<armnn::DivisionQueueDescripto
template class armnn::BaseFloat32ElementwiseWorkload<armnn::MaximumQueueDescriptor, armnn::maximum<float>>;
template class armnn::BaseUint8ElementwiseWorkload<armnn::MaximumQueueDescriptor, armnn::maximum<float>>;
+
+template class armnn::BaseFloat32ElementwiseWorkload<armnn::MinimumQueueDescriptor, armnn::minimum<float>>;
+template class armnn::BaseUint8ElementwiseWorkload<armnn::MinimumQueueDescriptor, armnn::minimum<float>>;
diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.hpp b/src/backends/reference/workloads/RefElementwiseWorkload.hpp
index 2772b77631..b5205938b2 100644
--- a/src/backends/reference/workloads/RefElementwiseWorkload.hpp
+++ b/src/backends/reference/workloads/RefElementwiseWorkload.hpp
@@ -10,6 +10,9 @@
#include <backendsCommon/Workload.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include "Maximum.hpp"
+#include "Minimum.hpp"
+
+
namespace armnn
{
@@ -133,4 +136,15 @@ using RefMaximumUint8Workload =
MaximumQueueDescriptor,
StringMapping::RefMaximumWorkload_Execute>;
+using RefMinimumFloat32Workload =
+ RefElementwiseWorkload<minimum<float>,
+ DataType::Float32,
+ MinimumQueueDescriptor,
+ StringMapping::RefMinimumWorkload_Execute>;
+
+using RefMinimumUint8Workload =
+ RefElementwiseWorkload<minimum<float>,
+ DataType::QuantisedAsymm8,
+ MinimumQueueDescriptor,
+ StringMapping::RefMinimumWorkload_Execute>;
} // armnn