aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Beck <david.beck@arm.com>2018-09-10 14:47:28 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-01 14:56:48 +0100
commitbc3924503cf7838eea7e9fcca4a22e2bf54ba97f (patch)
tree6ea56e796bef35ffa9397eedad956363486241c7
parent32b9046ea74d2387a08819cf5e67c183e03f6d3f (diff)
downloadarmnn-bc3924503cf7838eea7e9fcca4a22e2bf54ba97f.tar.gz
IVGCVSW-1802 : Neon Sub Workload
Change-Id: I1e914b046cd8fd25669390d477f101098fe0d476
-rw-r--r--Android.mk1
-rw-r--r--CMakeLists.txt2
-rw-r--r--src/armnn/backends/NeonLayerSupport.cpp8
-rw-r--r--src/armnn/backends/NeonWorkloadFactory.cpp2
-rw-r--r--src/armnn/backends/NeonWorkloads.hpp1
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonSubtractionFloatWorkload.cpp46
-rw-r--r--src/armnn/backends/NeonWorkloads/NeonSubtractionFloatWorkload.hpp27
-rw-r--r--src/armnn/backends/test/ArmComputeCl.cpp12
-rw-r--r--src/armnn/backends/test/ArmComputeNeon.cpp4
-rw-r--r--src/armnn/backends/test/CreateWorkloadNeon.cpp90
-rw-r--r--src/armnn/backends/test/CreateWorkloadRef.cpp92
-rw-r--r--src/armnn/test/CreateWorkload.hpp60
12 files changed, 203 insertions, 142 deletions
diff --git a/Android.mk b/Android.mk
index db3c6b3af6..c070b28f87 100644
--- a/Android.mk
+++ b/Android.mk
@@ -113,6 +113,7 @@ LOCAL_SRC_FILES := \
src/armnn/backends/NeonWorkloads/NeonSoftmaxUint8Workload.cpp \
src/armnn/backends/NeonWorkloads/NeonSplitterFloatWorkload.cpp \
src/armnn/backends/NeonWorkloads/NeonSplitterUint8Workload.cpp \
+ src/armnn/backends/NeonWorkloads/NeonSubtractionFloatWorkload.cpp \
src/armnn/backends/ClWorkloadFactory.cpp \
src/armnn/backends/ClContextControl.cpp \
src/armnn/backends/CpuTensorHandle.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 7656c5d329..0fc3f1ccd7 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -458,6 +458,8 @@ if(ARMCOMPUTENEON)
src/armnn/backends/NeonWorkloads/NeonSplitterFloatWorkload.hpp
src/armnn/backends/NeonWorkloads/NeonSplitterUint8Workload.cpp
src/armnn/backends/NeonWorkloads/NeonSplitterUint8Workload.hpp
+ src/armnn/backends/NeonWorkloads/NeonSubtractionFloatWorkload.cpp
+ src/armnn/backends/NeonWorkloads/NeonSubtractionFloatWorkload.hpp
src/armnn/backends/NeonWorkloadUtils.cpp
src/armnn/backends/NeonWorkloadUtils.hpp
src/armnn/backends/NeonTensorHandle.hpp
diff --git a/src/armnn/backends/NeonLayerSupport.cpp b/src/armnn/backends/NeonLayerSupport.cpp
index 7f33c48ed1..30956dfba0 100644
--- a/src/armnn/backends/NeonLayerSupport.cpp
+++ b/src/armnn/backends/NeonLayerSupport.cpp
@@ -27,6 +27,7 @@
#include "NeonWorkloads/NeonPermuteWorkload.hpp"
#include "NeonWorkloads/NeonPooling2dBaseWorkload.hpp"
#include "NeonWorkloads/NeonSoftmaxBaseWorkload.hpp"
+#include "NeonWorkloads/NeonSubtractionFloatWorkload.hpp"
#endif
using namespace boost;
@@ -239,8 +240,11 @@ bool IsSubtractionSupportedNeon(const TensorInfo& input0,
const TensorInfo& output,
std::string* reasonIfUnsupported)
{
- // At the moment subtraction is not supported
- return false;
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonSubtractionWorkloadValidate,
+ reasonIfUnsupported,
+ input0,
+ input1,
+ output);
}
bool IsFullyConnectedSupportedNeon(const TensorInfo& input,
diff --git a/src/armnn/backends/NeonWorkloadFactory.cpp b/src/armnn/backends/NeonWorkloadFactory.cpp
index b3d84d14a3..ccbec9a8bb 100644
--- a/src/armnn/backends/NeonWorkloadFactory.cpp
+++ b/src/armnn/backends/NeonWorkloadFactory.cpp
@@ -165,7 +165,7 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateDivision(
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateSubtraction(
const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkload<NeonSubtractionFloatWorkload, NullWorkload>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateBatchNormalization(
diff --git a/src/armnn/backends/NeonWorkloads.hpp b/src/armnn/backends/NeonWorkloads.hpp
index 9377818e25..676c23cc4d 100644
--- a/src/armnn/backends/NeonWorkloads.hpp
+++ b/src/armnn/backends/NeonWorkloads.hpp
@@ -38,3 +38,4 @@
#include "backends/NeonWorkloads/NeonSoftmaxUint8Workload.hpp"
#include "backends/NeonWorkloads/NeonSplitterFloatWorkload.hpp"
#include "backends/NeonWorkloads/NeonSplitterUint8Workload.hpp"
+#include "backends/NeonWorkloads/NeonSubtractionFloatWorkload.hpp"
diff --git a/src/armnn/backends/NeonWorkloads/NeonSubtractionFloatWorkload.cpp b/src/armnn/backends/NeonWorkloads/NeonSubtractionFloatWorkload.cpp
new file mode 100644
index 0000000000..e3c6467d10
--- /dev/null
+++ b/src/armnn/backends/NeonWorkloads/NeonSubtractionFloatWorkload.cpp
@@ -0,0 +1,46 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonSubtractionFloatWorkload.hpp"
+#include "backends/ArmComputeTensorUtils.hpp"
+#include "backends/CpuTensorHandle.hpp"
+
+namespace armnn
+{
+
+arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output)
+{
+ const arm_compute::TensorInfo aclInput0 = armcomputetensorutils::BuildArmComputeTensorInfo(input0);
+ const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input1);
+ const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+ return arm_compute::NEArithmeticSubtraction::validate(&aclInput0,
+ &aclInput1,
+ &aclOutput,
+ arm_compute::ConvertPolicy::SATURATE);
+}
+
+NeonSubtractionFloatWorkload::NeonSubtractionFloatWorkload(const SubtractionQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : FloatWorkload<SubtractionQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("NeonSubtractionFloatWorkload", 2, 1);
+
+ arm_compute::ITensor& input1 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& input2 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ m_AddLayer.configure(&input1, &input2, &output, arm_compute::ConvertPolicy::SATURATE);
+}
+
+void NeonSubtractionFloatWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonSubtractionFloatWorkload_Execute");
+ m_AddLayer.run();
+}
+
+} //namespace armnn
diff --git a/src/armnn/backends/NeonWorkloads/NeonSubtractionFloatWorkload.hpp b/src/armnn/backends/NeonWorkloads/NeonSubtractionFloatWorkload.hpp
new file mode 100644
index 0000000000..6c136622cd
--- /dev/null
+++ b/src/armnn/backends/NeonWorkloads/NeonSubtractionFloatWorkload.hpp
@@ -0,0 +1,27 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backends/NeonWorkloadUtils.hpp>
+
+namespace armnn
+{
+
+arm_compute::Status NeonSubtractionWorkloadValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output);
+
+class NeonSubtractionFloatWorkload : public FloatWorkload<SubtractionQueueDescriptor>
+{
+public:
+ NeonSubtractionFloatWorkload(const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info);
+ virtual void Execute() const override;
+
+private:
+ mutable arm_compute::NEArithmeticSubtraction m_AddLayer;
+};
+
+} //namespace armnn
diff --git a/src/armnn/backends/test/ArmComputeCl.cpp b/src/armnn/backends/test/ArmComputeCl.cpp
index 22445edf18..2c1d8b66cf 100644
--- a/src/armnn/backends/test/ArmComputeCl.cpp
+++ b/src/armnn/backends/test/ArmComputeCl.cpp
@@ -148,19 +148,9 @@ ARMNN_AUTO_TEST_CASE(AddBroadcast1ElementUint8, AdditionBroadcast1ElementUint8Te
// Sub
ARMNN_AUTO_TEST_CASE(SimpleSub, SubtractionTest)
-// TODO :
-// 1, enable broadcast tests for SUB when COMPMID-1566 is implemented (IVGCVSW-1837)
-// 2, enable quantized tests for SUB when COMPMID-1564 is implemented (IVGCVSW-1836)
-
-// ARMNN_AUTO_TEST_CASE(SubBroadcast1Element, SubtractionBroadcast1ElementTest)
-// ARMNN_AUTO_TEST_CASE(SubBroadcast, SubtractionBroadcastTest)
-
-// ARMNN_AUTO_TEST_CASE(SubtractionUint8, SubtractionUint8Test)
-// ARMNN_AUTO_TEST_CASE(SubBroadcastUint8, SubtractionBroadcastUint8Test)
-// ARMNN_AUTO_TEST_CASE(SubBroadcast1ElementUint8, SubtractionBroadcast1ElementUint8Test)
-
// Div
ARMNN_AUTO_TEST_CASE(SimpleDivision, DivisionTest)
+ARMNN_AUTO_TEST_CASE(DivisionByZero, DivisionByZeroTest)
ARMNN_AUTO_TEST_CASE(DivisionBroadcast1Element, DivisionBroadcast1ElementTest)
ARMNN_AUTO_TEST_CASE(DivisionBroadcast1DVector, DivisionBroadcast1DVectorTest)
// NOTE: quantized division is not supported by CL and not required by the
diff --git a/src/armnn/backends/test/ArmComputeNeon.cpp b/src/armnn/backends/test/ArmComputeNeon.cpp
index 56ee296e47..f1a2cf65bd 100644
--- a/src/armnn/backends/test/ArmComputeNeon.cpp
+++ b/src/armnn/backends/test/ArmComputeNeon.cpp
@@ -319,8 +319,12 @@ ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, tru
// Add
ARMNN_AUTO_TEST_CASE(SimpleAdd, AdditionTest)
+ARMNN_AUTO_TEST_CASE(AddBroadcast, AdditionBroadcastTest)
ARMNN_AUTO_TEST_CASE(AddBroadcast1Element, AdditionBroadcast1ElementTest)
+// Sub
+ARMNN_AUTO_TEST_CASE(SimpleSub, SubtractionTest)
+
// Mul
ARMNN_AUTO_TEST_CASE(SimpleMultiplication, MultiplicationTest)
ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1Element, MultiplicationBroadcast1ElementTest)
diff --git a/src/armnn/backends/test/CreateWorkloadNeon.cpp b/src/armnn/backends/test/CreateWorkloadNeon.cpp
index dde8c39251..fbe064e1c4 100644
--- a/src/armnn/backends/test/CreateWorkloadNeon.cpp
+++ b/src/armnn/backends/test/CreateWorkloadNeon.cpp
@@ -78,15 +78,17 @@ BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload)
NeonCreateActivationWorkloadTest<NeonActivationFloatWorkload, DataType::Float32>();
}
-template <typename AdditionWorkloadType, typename armnn::DataType DataType>
-static void NeonCreateAdditionWorkloadTest()
+template <typename WorkloadType,
+ typename DescriptorType,
+ typename LayerType,
+ armnn::DataType DataType>
+static void NeonCreateArithmethicWorkloadTest()
{
- Graph graph;
+ Graph graph;
NeonWorkloadFactory factory;
- auto workload = CreateAdditionWorkloadTest<AdditionWorkloadType, DataType>(factory, graph);
+ auto workload = CreateArithmeticWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(factory, graph);
- // Checks that inputs/outputs are as we expect them (see definition of CreateAdditionWorkloadTest).
- AdditionQueueDescriptor queueDescriptor = workload->GetData();
+ DescriptorType queueDescriptor = workload->GetData();
auto inputHandle1 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]);
auto inputHandle2 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[1]);
auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]);
@@ -98,13 +100,55 @@ static void NeonCreateAdditionWorkloadTest()
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload)
{
- NeonCreateAdditionWorkloadTest<NeonAdditionFloatWorkload, DataType::Float16>();
+ NeonCreateArithmethicWorkloadTest<NeonAdditionFloatWorkload,
+ AdditionQueueDescriptor,
+ AdditionLayer,
+ DataType::Float16>();
}
#endif
BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
{
- NeonCreateAdditionWorkloadTest<NeonAdditionFloatWorkload, DataType::Float32>();
+ NeonCreateArithmethicWorkloadTest<NeonAdditionFloatWorkload,
+ AdditionQueueDescriptor,
+ AdditionLayer,
+ DataType::Float32>();
+}
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
+{
+ NeonCreateArithmethicWorkloadTest<NeonSubtractionFloatWorkload,
+ SubtractionQueueDescriptor,
+ SubtractionLayer,
+ DataType::Float16>();
+}
+#endif
+
+BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
+{
+ NeonCreateArithmethicWorkloadTest<NeonSubtractionFloatWorkload,
+ SubtractionQueueDescriptor,
+ SubtractionLayer,
+ DataType::Float32>();
+}
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16Workload)
+{
+ NeonCreateArithmethicWorkloadTest<NeonMultiplicationFloatWorkload,
+ MultiplicationQueueDescriptor,
+ MultiplicationLayer,
+ DataType::Float16>();
+}
+#endif
+
+BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
+{
+ NeonCreateArithmethicWorkloadTest<NeonMultiplicationFloatWorkload,
+ MultiplicationQueueDescriptor,
+ MultiplicationLayer,
+ DataType::Float32>();
}
template <typename BatchNormalizationWorkloadType, typename armnn::DataType DataType>
@@ -190,36 +234,6 @@ BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloatWorkload)
NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedFloatWorkload, DataType::Float32>();
}
-template <typename MultiplicationWorkloadType, typename armnn::DataType DataType>
-static void NeonCreateMultiplicationWorkloadTest()
-{
- Graph graph;
- NeonWorkloadFactory factory;
- auto workload = CreateMultiplicationWorkloadTest<MultiplicationWorkloadType,
- DataType>(factory, graph);
-
- // Checks that inputs/outputs are as we expect them (see definition of CreateMultiplicationWorkloadTest).
- MultiplicationQueueDescriptor queueDescriptor = workload->GetData();
- auto inputHandle1 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[0]);
- auto inputHandle2 = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Inputs[1]);
- auto outputHandle = boost::polymorphic_downcast<INeonTensorHandle*>(queueDescriptor.m_Outputs[0]);
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({2, 3}, DataType)));
- BOOST_TEST(TestNeonTensorHandleInfo(inputHandle2, TensorInfo({2, 3}, DataType)));
- BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3}, DataType)));
-}
-
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16Workload)
-{
- NeonCreateMultiplicationWorkloadTest<NeonMultiplicationFloatWorkload, DataType::Float16>();
-}
-#endif
-
-BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
-{
- NeonCreateMultiplicationWorkloadTest<NeonMultiplicationFloatWorkload, DataType::Float32>();
-}
-
template <typename NormalizationWorkloadType, typename armnn::DataType DataType>
static void NeonCreateNormalizationWorkloadTest()
{
diff --git a/src/armnn/backends/test/CreateWorkloadRef.cpp b/src/armnn/backends/test/CreateWorkloadRef.cpp
index 46ee3225a0..41419dafd0 100644
--- a/src/armnn/backends/test/CreateWorkloadRef.cpp
+++ b/src/armnn/backends/test/CreateWorkloadRef.cpp
@@ -62,14 +62,16 @@ BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload)
RefCreateActivationWorkloadTest<RefActivationUint8Workload, armnn::DataType::QuantisedAsymm8>();
}
-template <typename AdditionWorkloadType, armnn::DataType DataType>
-static void RefCreateAdditionWorkloadTest()
+template <typename WorkloadType,
+ typename DescriptorType,
+ typename LayerType,
+ armnn::DataType DataType>
+static void RefCreateArithmethicWorkloadTest()
{
Graph graph;
RefWorkloadFactory factory;
- auto workload = CreateAdditionWorkloadTest<AdditionWorkloadType, DataType>(factory, graph);
+ auto workload = CreateArithmeticWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(factory, graph);
- // Checks that outputs are as we expect them (see definition of CreateAdditionWorkloadTest).
CheckInputsOutput(std::move(workload),
TensorInfo({ 2, 3 }, DataType),
TensorInfo({ 2, 3 }, DataType),
@@ -78,12 +80,66 @@ static void RefCreateAdditionWorkloadTest()
BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
{
- RefCreateAdditionWorkloadTest<RefAdditionFloat32Workload, armnn::DataType::Float32>();
+ RefCreateArithmethicWorkloadTest<RefAdditionFloat32Workload,
+ AdditionQueueDescriptor,
+ AdditionLayer,
+ armnn::DataType::Float32>();
}
BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload)
{
- RefCreateAdditionWorkloadTest<RefAdditionUint8Workload, armnn::DataType::QuantisedAsymm8>();
+ RefCreateArithmethicWorkloadTest<RefAdditionUint8Workload,
+ AdditionQueueDescriptor,
+ AdditionLayer,
+ armnn::DataType::QuantisedAsymm8>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
+{
+ RefCreateArithmethicWorkloadTest<RefSubtractionFloat32Workload,
+ SubtractionQueueDescriptor,
+ SubtractionLayer,
+ armnn::DataType::Float32>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
+{
+ RefCreateArithmethicWorkloadTest<RefSubtractionUint8Workload,
+ SubtractionQueueDescriptor,
+ SubtractionLayer,
+ armnn::DataType::QuantisedAsymm8>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
+{
+ RefCreateArithmethicWorkloadTest<RefMultiplicationFloat32Workload,
+ MultiplicationQueueDescriptor,
+ MultiplicationLayer,
+ armnn::DataType::Float32>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
+{
+ RefCreateArithmethicWorkloadTest<RefMultiplicationUint8Workload,
+ MultiplicationQueueDescriptor,
+ MultiplicationLayer,
+ armnn::DataType::QuantisedAsymm8>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkload)
+{
+ RefCreateArithmethicWorkloadTest<RefDivisionFloat32Workload,
+ DivisionQueueDescriptor,
+ DivisionLayer,
+ armnn::DataType::Float32>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload)
+{
+ RefCreateArithmethicWorkloadTest<RefDivisionUint8Workload,
+ DivisionQueueDescriptor,
+ DivisionLayer,
+ armnn::DataType::QuantisedAsymm8>();
}
BOOST_AUTO_TEST_CASE(CreateBatchNormalizationWorkload)
@@ -171,30 +227,6 @@ BOOST_AUTO_TEST_CASE(CreateFullyConnectedUint8Workload)
RefCreateFullyConnectedWorkloadTest<RefFullyConnectedUint8Workload, armnn::DataType::QuantisedAsymm8>();
}
-template <typename MultiplicationWorkloadType, armnn::DataType DataType>
-static void RefCreateMultiplicationWorkloadTest()
-{
- Graph graph;
- RefWorkloadFactory factory;
- auto workload = CreateMultiplicationWorkloadTest<MultiplicationWorkloadType, DataType>(factory, graph);
-
- // Checks that outputs are as we expect them (see definition of CreateMultiplicationWorkloadTest).
- CheckInputsOutput(std::move(workload),
- TensorInfo({ 2, 3 }, DataType),
- TensorInfo({ 2, 3 }, DataType),
- TensorInfo({ 2, 3 }, DataType));
-}
-
-BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
-{
- RefCreateMultiplicationWorkloadTest<RefMultiplicationFloat32Workload, armnn::DataType::Float32>();
-}
-
-BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
-{
- RefCreateMultiplicationWorkloadTest<RefMultiplicationUint8Workload, armnn::DataType::QuantisedAsymm8>();
-}
-
BOOST_AUTO_TEST_CASE(CreateNormalizationWorkload)
{
Graph graph;
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index fb562e2ad0..52f0673772 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -96,36 +96,6 @@ std::unique_ptr<ActivationWorkload> CreateActivationWorkloadTest(armnn::IWorkloa
return workload;
}
-template <typename AdditionWorkload, armnn::DataType DataType>
-std::unique_ptr<AdditionWorkload> CreateAdditionWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph)
-{
- // Creates the layer we're testing.
- Layer* const layer = graph.AddLayer<AdditionLayer>("layer");
-
- // Creates extra layers.
- Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
- Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- armnn::TensorInfo tensorInfo({2, 3}, DataType);
- Connect(input1, layer, tensorInfo, 0, 0);
- Connect(input2, layer, tensorInfo, 0, 1);
- Connect(layer, output, tensorInfo);
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<AdditionWorkload>(*layer, graph, factory);
-
- AdditionQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 2);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
template <typename WorkloadType,
typename DescriptorType,
typename LayerType,
@@ -514,36 +484,6 @@ std::unique_ptr<FullyConnectedWorkload> CreateFullyConnectedWorkloadTest(armnn::
return workload;
}
-template <typename MultiplicationWorkload, armnn::DataType DataType>
-std::unique_ptr<MultiplicationWorkload> CreateMultiplicationWorkloadTest(armnn::IWorkloadFactory& factory,
- armnn::Graph& graph)
-{
- // Creates the layer we're testing.
- Layer* const layer = graph.AddLayer<MultiplicationLayer>("layer");
-
- // Creates extra layers.
- Layer* const input1 = graph.AddLayer<InputLayer>(1, "input1");
- Layer* const input2 = graph.AddLayer<InputLayer>(2, "input2");
- Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-
- // Connects up.
- armnn::TensorInfo tensorInfo({2, 3}, DataType);
- Connect(input1, layer, tensorInfo, 0, 0);
- Connect(input2, layer, tensorInfo, 0, 1);
- Connect(layer, output, tensorInfo);
- CreateTensorHandles(graph, factory);
-
- // Makes the workload and checks it.
- auto workload = MakeAndCheckWorkload<MultiplicationWorkload>(*layer, graph, factory);
-
- MultiplicationQueueDescriptor queueDescriptor = workload->GetData();
- BOOST_TEST(queueDescriptor.m_Inputs.size() == 2);
- BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
-
- // Returns so we can do extra, backend-specific tests.
- return workload;
-}
-
template <typename NormalizationFloat32Workload, armnn::DataType DataType>
std::unique_ptr<NormalizationFloat32Workload> CreateNormalizationWorkloadTest(armnn::IWorkloadFactory& factory,
armnn::Graph& graph)