aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference
diff options
context:
space:
mode:
authorÉanna Ó Catháin <eanna.ocathain@arm.com>2018-12-04 10:29:06 +0000
committerLes Bell <les.bell@arm.com>2018-12-04 11:59:51 +0000
commit20e58806b94636f579c5e8b0ca91ab771b6310e6 (patch)
tree1537942c0a2d2c94cfc98d3ddaebf44d4d537f99 /src/backends/reference
parent975c09aab8e628b8052226d7a2e2ed2b76aa6702 (diff)
downloadarmnn-20e58806b94636f579c5e8b0ca91ab771b6310e6.tar.gz
IVGCVSW-2247 Adding a min Elementwise Workload and tests
Change-Id: I017ca6c23b62a8978982de0ca4ad204cb8cf7c67
Diffstat (limited to 'src/backends/reference')
-rw-r--r--src/backends/reference/RefLayerSupport.cpp13
-rw-r--r--src/backends/reference/RefLayerSupport.hpp6
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp12
-rw-r--r--src/backends/reference/test/RefEndToEndTests.cpp66
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp7
-rw-r--r--src/backends/reference/workloads/CMakeLists.txt1
-rw-r--r--src/backends/reference/workloads/ElementwiseFunction.cpp4
-rw-r--r--src/backends/reference/workloads/Minimum.hpp22
-rw-r--r--src/backends/reference/workloads/RefElementwiseWorkload.cpp3
-rw-r--r--src/backends/reference/workloads/RefElementwiseWorkload.hpp14
10 files changed, 139 insertions, 9 deletions
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 7222af6402..fffea587a0 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -347,6 +347,19 @@ bool RefLayerSupport::IsMergerSupported(const std::vector<const TensorInfo*> inp
&TrueFunc<>);
}
+bool RefLayerSupport::IsMinimumSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ ignore_unused(input1);
+ ignore_unused(output);
+ return IsSupportedForDataTypeRef(reasonIfUnsupported,
+ input0.GetDataType(),
+ &TrueFunc<>,
+ &TrueFunc<>);
+}
+
bool RefLayerSupport::IsMultiplicationSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 73e5394fc7..0d34c08bac 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -121,7 +121,6 @@ public:
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
-
bool IsMeanSupported(const TensorInfo& input,
const TensorInfo& output,
const MeanDescriptor& descriptor,
@@ -132,6 +131,11 @@ public:
const OriginsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsMinimumSupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsMultiplicationSupported(const TensorInfo& input0,
const TensorInfo& input1,
const TensorInfo& output,
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index d32e1833c2..43651cf790 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -270,6 +270,12 @@ std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMean(
return MakeWorkload<RefMeanFloat32Workload, RefMeanUint8Workload>(descriptor, info);
}
+std::unique_ptr<armnn::IWorkload> RefWorkloadFactory::CreateMinimum(
+ const MinimumQueueDescriptor& descriptor, const WorkloadInfo& info) const
+{
+ return MakeWorkload<RefMinimumFloat32Workload, RefMinimumUint8Workload>(descriptor, info);
+}
+
std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
@@ -288,12 +294,6 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateStridedSlice(const StridedS
return MakeWorkload<RefStridedSliceFloat32Workload, RefStridedSliceUint8Workload>(descriptor, info);
}
-std::unique_ptr<IWorkload> RefWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
-}
-
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 97bec51645..8ad6f5a4d1 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -7,6 +7,7 @@
#include <backendsCommon/test/MergerTestImpl.hpp>
#include <boost/test/unit_test.hpp>
+#include <boost/test/execution_monitor.hpp>
BOOST_AUTO_TEST_SUITE(RefEndToEnd)
@@ -246,6 +247,71 @@ BOOST_AUTO_TEST_CASE(MultipleOutputs)
BOOST_TEST(output3Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 5.f, 2.f, 2.f, 2.f, 3.f, 3.f })); // [2, 5]
}
+BOOST_AUTO_TEST_CASE(TrivialMin)
+{
+ using namespace armnn;
+
+ // Create runtime in which test will run
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ // Builds up the structure of the network.
+ armnn::INetworkPtr net(INetwork::Create());
+
+ IConnectableLayer* input1 = net->AddInputLayer(0);
+ IConnectableLayer* input2 = net->AddInputLayer(1);
+ IConnectableLayer* min = net->AddMinimumLayer();
+ IConnectableLayer* output = net->AddOutputLayer(0);
+
+ input1->GetOutputSlot(0).Connect(min->GetInputSlot(0));
+ input2->GetOutputSlot(0).Connect(min->GetInputSlot(1));
+ min->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ // Sets the tensors in the network.
+ TensorInfo tensorInfo(TensorShape({1, 1, 1, 4}), DataType::Float32);
+ input1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+ input2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+ min->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ // optimize the network
+ IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
+
+ // Loads it into the runtime.
+ NetworkId netId;
+ runtime->LoadNetwork(netId, std::move(optNet));
+
+ // Creates structures for input & output - matching android nn test.
+ std::vector<float> input1Data
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f
+ };
+ std::vector<float> input2Data
+ {
+ 2.0f, 1.0f, 5.0f, 2.0f
+ };
+ std::vector<float> outputData(4);
+
+ InputTensors inputTensors
+ {
+ {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())},
+ {1,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input2Data.data())}
+ };
+ OutputTensors outputTensors
+ {
+ {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
+ };
+
+ // Does the inference.
+ runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+
+ // Checks the results
+ BOOST_TEST(outputData[0] == 1);
+ BOOST_TEST(outputData[1] == 1);
+ BOOST_TEST(outputData[2] == 3);
+ BOOST_TEST(outputData[3] == 2);
+}
+
+
BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Test)
{
MergerDim0EndToEnd<float>(defaultBackends);
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 54ec697ec2..fa4af96c46 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -233,7 +233,7 @@ ARMNN_AUTO_TEST_CASE(DivisionUint8, DivisionUint8Test)
ARMNN_AUTO_TEST_CASE(DivisionUint8Broadcast1Element, DivisionBroadcast1ElementUint8Test)
ARMNN_AUTO_TEST_CASE(DivisionUint8Broadcast1DVector, DivisionBroadcast1DVectorUint8Test)
-//Max
+// Max
ARMNN_AUTO_TEST_CASE(SimpleMaximum, MaximumSimpleTest)
ARMNN_AUTO_TEST_CASE(MaximumBroadcast1Element, MaximumBroadcast1ElementTest)
ARMNN_AUTO_TEST_CASE(MaximumBroadcast1DVector, MaximumBroadcast1DVectorTest)
@@ -241,6 +241,11 @@ ARMNN_AUTO_TEST_CASE(MaximumUint8, MaximumUint8Test)
ARMNN_AUTO_TEST_CASE(MaximumBroadcast1ElementUint8, MaximumBroadcast1ElementUint8Test)
ARMNN_AUTO_TEST_CASE(MaximumBroadcast1DVectorUint8, MaximumBroadcast1DVectorUint8Test)
+// Min
+ARMNN_AUTO_TEST_CASE(SimpleMinimum1, MinimumBroadcast1ElementTest1)
+ARMNN_AUTO_TEST_CASE(SimpleMinimum2, MinimumBroadcast1ElementTest2)
+ARMNN_AUTO_TEST_CASE(Minimum1DVectorUint8, MinimumBroadcast1DVectorUint8Test)
+
// Mul
ARMNN_AUTO_TEST_CASE(SimpleMultiplication, MultiplicationTest)
ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1Element, MultiplicationBroadcast1ElementTest)
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index b9c150f6d4..7028f18e2d 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -19,6 +19,7 @@ list(APPEND armnnRefBackendWorkloads_sources
FullyConnected.hpp
Maximum.hpp
Merger.hpp
+ Minimum.hpp
Pad.cpp
Pad.hpp
Pooling2d.cpp
diff --git a/src/backends/reference/workloads/ElementwiseFunction.cpp b/src/backends/reference/workloads/ElementwiseFunction.cpp
index bb15049faa..88d51908fe 100644
--- a/src/backends/reference/workloads/ElementwiseFunction.cpp
+++ b/src/backends/reference/workloads/ElementwiseFunction.cpp
@@ -6,6 +6,7 @@
#include "ElementwiseFunction.hpp"
#include "Broadcast.hpp"
#include <functional>
+#include "Minimum.hpp"
#include "Maximum.hpp"
@@ -29,4 +30,5 @@ template struct armnn::ElementwiseFunction<std::plus<float>>;
template struct armnn::ElementwiseFunction<std::minus<float>>;
template struct armnn::ElementwiseFunction<std::multiplies<float>>;
template struct armnn::ElementwiseFunction<std::divides<float>>;
-template struct armnn::ElementwiseFunction<armnn::maximum<float>>; \ No newline at end of file
+template struct armnn::ElementwiseFunction<armnn::maximum<float>>;
+template struct armnn::ElementwiseFunction<armnn::minimum<float>>; \ No newline at end of file
diff --git a/src/backends/reference/workloads/Minimum.hpp b/src/backends/reference/workloads/Minimum.hpp
new file mode 100644
index 0000000000..2f3bdc1c02
--- /dev/null
+++ b/src/backends/reference/workloads/Minimum.hpp
@@ -0,0 +1,22 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+namespace armnn
+{
+
+template<typename T>
+struct minimum
+{
+ T
+ operator()(const T& input1, const T& input2) const
+ {
+ return std::min(input1, input2);
+ }
+};
+
+} //namespace armnn
+
diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.cpp b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
index 60a1b990f7..a18c7c569e 100644
--- a/src/backends/reference/workloads/RefElementwiseWorkload.cpp
+++ b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
@@ -70,3 +70,6 @@ template class armnn::BaseUint8ElementwiseWorkload<armnn::DivisionQueueDescripto
template class armnn::BaseFloat32ElementwiseWorkload<armnn::MaximumQueueDescriptor, armnn::maximum<float>>;
template class armnn::BaseUint8ElementwiseWorkload<armnn::MaximumQueueDescriptor, armnn::maximum<float>>;
+
+template class armnn::BaseFloat32ElementwiseWorkload<armnn::MinimumQueueDescriptor, armnn::minimum<float>>;
+template class armnn::BaseUint8ElementwiseWorkload<armnn::MinimumQueueDescriptor, armnn::minimum<float>>;
diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.hpp b/src/backends/reference/workloads/RefElementwiseWorkload.hpp
index 2772b77631..b5205938b2 100644
--- a/src/backends/reference/workloads/RefElementwiseWorkload.hpp
+++ b/src/backends/reference/workloads/RefElementwiseWorkload.hpp
@@ -10,6 +10,9 @@
#include <backendsCommon/Workload.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include "Maximum.hpp"
+#include "Minimum.hpp"
+
+
namespace armnn
{
@@ -133,4 +136,15 @@ using RefMaximumUint8Workload =
MaximumQueueDescriptor,
StringMapping::RefMaximumWorkload_Execute>;
+using RefMinimumFloat32Workload =
+ RefElementwiseWorkload<minimum<float>,
+ DataType::Float32,
+ MinimumQueueDescriptor,
+ StringMapping::RefMinimumWorkload_Execute>;
+
+using RefMinimumUint8Workload =
+ RefElementwiseWorkload<minimum<float>,
+ DataType::QuantisedAsymm8,
+ MinimumQueueDescriptor,
+ StringMapping::RefMinimumWorkload_Execute>;
} // armnn