aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJames Conroy <james.conroy@arm.com>2020-11-13 10:18:51 +0000
committerTeresa Charlin <teresa.charlinreyes@arm.com>2020-11-18 20:26:13 +0000
commit177df1e4483184e526f61a6bd1c00f9b33577571 (patch)
tree66c98f7bb80af2f7f86319c18c0ee0f4291cf1f0
parent0c95f4cd319874ffa4aba3a378e0e3346f688fdc (diff)
downloadarmnn-177df1e4483184e526f61a6bd1c00f9b33577571.tar.gz
IVGCVSW-5093 Add NEON Logical workload
* Add NEON Logical workloads for NOT, AND and OR. * Enable Layer and IsSupported tests on NEON. Signed-off-by: James Conroy <james.conroy@arm.com> Change-Id: Ibca59530457a664ca3d77751825642f8daf52fab
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp64
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp33
-rw-r--r--src/backends/neon/NeonLayerSupport.hpp6
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp52
-rw-r--r--src/backends/neon/NeonWorkloadFactory.hpp6
-rw-r--r--src/backends/neon/backend.mk3
-rw-r--r--src/backends/neon/test/NeonLayerSupportTests.cpp20
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp16
-rw-r--r--src/backends/neon/workloads/CMakeLists.txt10
-rw-r--r--src/backends/neon/workloads/NeonLogicalAndWorkload.cpp51
-rw-r--r--src/backends/neon/workloads/NeonLogicalAndWorkload.hpp30
-rw-r--r--src/backends/neon/workloads/NeonLogicalNotWorkload.cpp48
-rw-r--r--src/backends/neon/workloads/NeonLogicalNotWorkload.hpp28
-rw-r--r--src/backends/neon/workloads/NeonLogicalOrWorkload.cpp51
-rw-r--r--src/backends/neon/workloads/NeonLogicalOrWorkload.hpp30
-rw-r--r--src/backends/neon/workloads/NeonWorkloads.hpp3
16 files changed, 437 insertions, 14 deletions
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 7c7ad5f159..1492a8092f 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -907,6 +907,70 @@ bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
}
template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
+bool IsLogicalBinaryLayerSupportedTests(std::string& reasonIfUnsupported)
+{
+ armnn::Graph graph;
+ armnn::LogicalBinaryDescriptor desc(armnn::LogicalBinaryOperation::LogicalOr);
+
+ armnn::Layer* const input0 = graph.AddLayer<armnn::InputLayer>(0, "input0");
+ armnn::Layer* const input1 = graph.AddLayer<armnn::InputLayer>(1, "input1");
+
+ armnn::Layer* const layer = graph.AddLayer<armnn::LogicalBinaryLayer>(desc, "logicalOrLayer");
+
+ armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output1");
+
+ armnn::TensorInfo inputTensorInfo0({1, 1, 1, 4}, InputDataType);
+ armnn::TensorInfo inputTensorInfo1({1, 1, 1, 4}, InputDataType);
+
+ armnn::TensorInfo outputTensorInfo({1, 1, 1, 4}, OutputDataType);
+
+ input0->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
+ input1->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
+
+ input0->GetOutputHandler(0).SetTensorInfo(inputTensorInfo0);
+ input1->GetOutputHandler(0).SetTensorInfo(inputTensorInfo1);
+
+ layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+ layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
+
+ bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
+
+ return result;
+}
+
+template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
+bool IsLogicalBinaryLayerBroadcastSupportedTests(std::string& reasonIfUnsupported)
+{
+ armnn::Graph graph;
+ armnn::LogicalBinaryDescriptor desc(armnn::LogicalBinaryOperation::LogicalAnd);
+
+ armnn::Layer* const input0 = graph.AddLayer<armnn::InputLayer>(0, "input0");
+ armnn::Layer* const input1 = graph.AddLayer<armnn::InputLayer>(1, "input1");
+
+ armnn::Layer* const layer = graph.AddLayer<armnn::LogicalBinaryLayer>(desc, "logicalAndLayer");
+
+ armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output2");
+
+ armnn::TensorInfo inputTensorInfo0({1, 1, 1, 4}, InputDataType);
+ armnn::TensorInfo inputTensorInfo1({1, 1, 1, 1}, InputDataType);
+
+ armnn::TensorInfo outputTensorInfo({1, 1, 1, 4}, OutputDataType);
+
+ input0->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
+ input1->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
+
+ input0->GetOutputHandler(0).SetTensorInfo(inputTensorInfo0);
+ input1->GetOutputHandler(0).SetTensorInfo(inputTensorInfo1);
+
+ layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+ layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
+
+ bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
+
+ return result;
+}
+
+template<typename FactoryType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
bool IsMeanLayerSupportedTests(std::string& reasonIfUnsupported)
{
armnn::Graph graph;
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index f55d1c8df6..2d22576e57 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -37,6 +37,9 @@
#include "workloads/NeonInstanceNormalizationWorkload.hpp"
#include "workloads/NeonL2NormalizationFloatWorkload.hpp"
#include "workloads/NeonLogSoftmaxWorkload.hpp"
+#include "workloads/NeonLogicalAndWorkload.hpp"
+#include "workloads/NeonLogicalNotWorkload.hpp"
+#include "workloads/NeonLogicalOrWorkload.hpp"
#include "workloads/NeonLstmFloatWorkload.hpp"
#include "workloads/NeonMaximumWorkload.hpp"
#include "workloads/NeonMeanWorkload.hpp"
@@ -434,6 +437,11 @@ bool NeonLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
reasonIfUnsupported,
input,
output);
+ case UnaryOperation::LogicalNot:
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogicalNotWorkloadValidate,
+ reasonIfUnsupported,
+ input,
+ output);
default:
return false;
}
@@ -532,6 +540,31 @@ bool NeonLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonL2NormalizationWorkloadValidate, reasonIfUnsupported, input, output, descriptor);
}
+bool NeonLayerSupport::IsLogicalBinarySupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ const LogicalBinaryDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ switch(descriptor.m_Operation)
+ {
+ case LogicalBinaryOperation::LogicalAnd:
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogicalAndWorkloadValidate,
+ reasonIfUnsupported,
+ input0,
+ input1,
+ output);
+ case LogicalBinaryOperation::LogicalOr:
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonLogicalOrWorkloadValidate,
+ reasonIfUnsupported,
+ input0,
+ input1,
+ output);
+ default:
+ return false;
+ }
+}
+
bool NeonLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input,
const TensorInfo& output,
const LogSoftmaxDescriptor& descriptor,
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index d477dcdd7c..dc13cc2e4e 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -160,6 +160,12 @@ public:
const L2NormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsLogicalBinarySupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ const LogicalBinaryDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const override;
+
bool IsLogSoftmaxSupported(const TensorInfo& input,
const TensorInfo& output,
const LogSoftmaxDescriptor& descriptor,
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 709dd93e9b..3077ae0a8c 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -260,25 +260,27 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary(
switch(descriptor.m_Parameters.m_Operation)
{
case UnaryOperation::Abs:
- {
- AbsQueueDescriptor absQueueDescriptor;
- absQueueDescriptor.m_Inputs = descriptor.m_Inputs;
- absQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+ {
+ AbsQueueDescriptor absQueueDescriptor;
+ absQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+ absQueueDescriptor.m_Outputs = descriptor.m_Outputs;
- return std::make_unique<NeonAbsWorkload>(absQueueDescriptor, info);
- }
+ return std::make_unique<NeonAbsWorkload>(absQueueDescriptor, info);
+ }
case UnaryOperation::Rsqrt:
- {
- RsqrtQueueDescriptor rsqrtQueueDescriptor;
- rsqrtQueueDescriptor.m_Inputs = descriptor.m_Inputs;
- rsqrtQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+ {
+ RsqrtQueueDescriptor rsqrtQueueDescriptor;
+ rsqrtQueueDescriptor.m_Inputs = descriptor.m_Inputs;
+ rsqrtQueueDescriptor.m_Outputs = descriptor.m_Outputs;
- return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor, info);
- }
+ return std::make_unique<NeonRsqrtWorkload>(rsqrtQueueDescriptor, info);
+ }
case UnaryOperation::Neg:
return std::make_unique<NeonNegWorkload>(descriptor, info);
case UnaryOperation::Exp:
return std::make_unique<NeonExpWorkload>(descriptor, info);
+ case UnaryOperation::LogicalNot:
+ return std::make_unique<NeonLogicalNotWorkload>(descriptor, info);
default:
return nullptr;
}
@@ -356,6 +358,32 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateLogSoftmax(const LogSoftma
return std::make_unique<NeonLogSoftmaxWorkload>(descriptor, info, m_MemoryManager->GetIntraLayerManager());
}
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ switch(descriptor.m_Parameters.m_Operation)
+ {
+ case LogicalBinaryOperation::LogicalAnd:
+ return std::make_unique<NeonLogicalAndWorkload>(descriptor, info);
+ case LogicalBinaryOperation::LogicalOr:
+ return std::make_unique<NeonLogicalOrWorkload>(descriptor, info);
+ default:
+ return nullptr;
+ }
+}
+
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ switch(descriptor.m_Parameters.m_Operation)
+ {
+ case UnaryOperation::LogicalNot:
+ return std::make_unique<NeonLogicalNotWorkload>(descriptor, info);
+ default:
+ return nullptr;
+ }
+}
+
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index 6a514e2812..f98a7f9f1d 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -143,6 +143,12 @@ public:
std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
+ std::unique_ptr<IWorkload> CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
std::unique_ptr<IWorkload> CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index 9bd08a1033..54560cb0fa 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -47,6 +47,9 @@ BACKEND_SOURCES := \
workloads/NeonGatherWorkload.cpp \
workloads/NeonInstanceNormalizationWorkload.cpp \
workloads/NeonL2NormalizationFloatWorkload.cpp \
+ workloads/NeonLogicalAndWorkload.cpp \
+ workloads/NeonLogicalNotWorkload.cpp \
+ workloads/NeonLogicalOrWorkload.cpp \
workloads/NeonLogSoftmaxWorkload.cpp \
workloads/NeonLstmFloatWorkload.cpp \
workloads/NeonMaximumWorkload.cpp \
diff --git a/src/backends/neon/test/NeonLayerSupportTests.cpp b/src/backends/neon/test/NeonLayerSupportTests.cpp
index 3b086ad28f..a14122f573 100644
--- a/src/backends/neon/test/NeonLayerSupportTests.cpp
+++ b/src/backends/neon/test/NeonLayerSupportTests.cpp
@@ -75,6 +75,26 @@ BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedNeon)
BOOST_CHECK(result);
}
+BOOST_AUTO_TEST_CASE(IsLogicalBinarySupportedNeon)
+{
+ std::string reasonIfUnsupported;
+
+ bool result = IsLogicalBinaryLayerSupportedTests<armnn::NeonWorkloadFactory,
+ armnn::DataType::Boolean, armnn::DataType::Boolean>(reasonIfUnsupported);
+
+ BOOST_CHECK(result);
+}
+
+BOOST_AUTO_TEST_CASE(IsLogicalBinaryBroadcastSupportedNeon)
+{
+ std::string reasonIfUnsupported;
+
+ bool result = IsLogicalBinaryLayerBroadcastSupportedTests<armnn::NeonWorkloadFactory,
+ armnn::DataType::Boolean, armnn::DataType::Boolean>(reasonIfUnsupported);
+
+ BOOST_CHECK(result);
+}
+
BOOST_AUTO_TEST_CASE(IsMeanSupportedNeon)
{
std::string reasonIfUnsupported;
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 59f00fc833..8e7742ab55 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -1323,6 +1323,22 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFill, SimpleFillTest<DataType::Float32>)
ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFillF16, SimpleFillTest<DataType::Float16>)
ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFillS32, SimpleFillTest<DataType::Signed32>)
+// Logical
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalNot, LogicalNotTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalNotInt, LogicalNotIntTest)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAnd, LogicalAndTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndInt, LogicalAndIntTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndBroadcast1, LogicalAndBroadcast1Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndBroadcast2, LogicalAndBroadcast2Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndBroadcast3, LogicalAndBroadcast3Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOr, LogicalOrTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrInt, LogicalOrIntTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrBroadcast1, LogicalOrBroadcast1Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrBroadcast2, LogicalOrBroadcast2Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrBroadcast3, LogicalOrBroadcast3Test)
+
#if defined(ARMNNREF_ENABLED)
// The ARMNN_COMPARE_REF_AUTO_TEST_CASE and the ARMNN_COMPARE_REF_FIXTURE_TEST_CASE test units are not available
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index ca9497e393..b03db99989 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -54,10 +54,16 @@ list(APPEND armnnNeonBackendWorkloads_sources
NeonInstanceNormalizationWorkload.hpp
NeonL2NormalizationFloatWorkload.cpp
NeonL2NormalizationFloatWorkload.hpp
- NeonLstmFloatWorkload.cpp
- NeonLstmFloatWorkload.hpp
+ NeonLogicalAndWorkload.cpp
+ NeonLogicalAndWorkload.hpp
+ NeonLogicalNotWorkload.cpp
+ NeonLogicalNotWorkload.hpp
+ NeonLogicalOrWorkload.cpp
+ NeonLogicalOrWorkload.hpp
NeonLogSoftmaxWorkload.cpp
NeonLogSoftmaxWorkload.hpp
+ NeonLstmFloatWorkload.cpp
+ NeonLstmFloatWorkload.hpp
NeonMaximumWorkload.cpp
NeonMaximumWorkload.hpp
NeonMeanWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonLogicalAndWorkload.cpp b/src/backends/neon/workloads/NeonLogicalAndWorkload.cpp
new file mode 100644
index 0000000000..d85e05cfe8
--- /dev/null
+++ b/src/backends/neon/workloads/NeonLogicalAndWorkload.cpp
@@ -0,0 +1,51 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonLogicalAndWorkload.hpp"
+
+#include "NeonWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorHandle.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
+namespace armnn
+{
+
+arm_compute::Status NeonLogicalAndWorkloadValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output)
+{
+ const arm_compute::TensorInfo aclInputInfo0 = BuildArmComputeTensorInfo(input0);
+ const arm_compute::TensorInfo aclInputInfo1 = BuildArmComputeTensorInfo(input1);
+ const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+
+ const arm_compute::Status aclStatus = arm_compute::NELogicalAnd::validate(&aclInputInfo0,
+ &aclInputInfo1,
+ &aclOutputInfo);
+ return aclStatus;
+}
+
+NeonLogicalAndWorkload::NeonLogicalAndWorkload(const LogicalBinaryQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : BaseWorkload<LogicalBinaryQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("NeonLogicalAndWorkload", 2, 1);
+
+ arm_compute::ITensor& input0 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& input1 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ m_LogicalAndLayer.configure(&input0, &input1, &output);
+}
+
+void NeonLogicalAndWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonLogicalAndWorkload_Execute");
+ m_LogicalAndLayer.run();
+}
+
+} // namespace armnn
diff --git a/src/backends/neon/workloads/NeonLogicalAndWorkload.hpp b/src/backends/neon/workloads/NeonLogicalAndWorkload.hpp
new file mode 100644
index 0000000000..1daadab9bb
--- /dev/null
+++ b/src/backends/neon/workloads/NeonLogicalAndWorkload.hpp
@@ -0,0 +1,30 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/NEON/functions/NELogical.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonLogicalAndWorkloadValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output);
+
+class NeonLogicalAndWorkload : public BaseWorkload<LogicalBinaryQueueDescriptor>
+{
+public:
+ NeonLogicalAndWorkload(const LogicalBinaryQueueDescriptor& descriptor, const WorkloadInfo& info);
+ virtual void Execute() const override;
+
+private:
+ mutable arm_compute::NELogicalAnd m_LogicalAndLayer;
+};
+
+} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonLogicalNotWorkload.cpp b/src/backends/neon/workloads/NeonLogicalNotWorkload.cpp
new file mode 100644
index 0000000000..cff5eaf2ba
--- /dev/null
+++ b/src/backends/neon/workloads/NeonLogicalNotWorkload.cpp
@@ -0,0 +1,48 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonLogicalNotWorkload.hpp"
+
+#include "NeonWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorHandle.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
+
+namespace armnn
+{
+
+arm_compute::Status NeonLogicalNotWorkloadValidate(const TensorInfo& input,
+ const TensorInfo& output)
+{
+ const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
+ const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+
+ const arm_compute::Status aclStatus = arm_compute::NELogicalNot::validate(&aclInputInfo,
+ &aclOutputInfo);
+ return aclStatus;
+}
+
+NeonLogicalNotWorkload::NeonLogicalNotWorkload(const ElementwiseUnaryQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : BaseWorkload<ElementwiseUnaryQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("NeonLogicalNotWorkload", 1, 1);
+
+ arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ m_LogicalNotLayer.configure(&input, &output);
+}
+
+void NeonLogicalNotWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonLogicalNotWorkload_Execute");
+ m_LogicalNotLayer.run();
+}
+
+} // namespace armnn
diff --git a/src/backends/neon/workloads/NeonLogicalNotWorkload.hpp b/src/backends/neon/workloads/NeonLogicalNotWorkload.hpp
new file mode 100644
index 0000000000..31420f7e9b
--- /dev/null
+++ b/src/backends/neon/workloads/NeonLogicalNotWorkload.hpp
@@ -0,0 +1,28 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/NEON/functions/NELogical.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonLogicalNotWorkloadValidate(const TensorInfo& input, const TensorInfo& output);
+
+class NeonLogicalNotWorkload : public BaseWorkload<ElementwiseUnaryQueueDescriptor>
+{
+public:
+ NeonLogicalNotWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info);
+ virtual void Execute() const override;
+
+private:
+ mutable arm_compute::NELogicalNot m_LogicalNotLayer;
+};
+
+} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonLogicalOrWorkload.cpp b/src/backends/neon/workloads/NeonLogicalOrWorkload.cpp
new file mode 100644
index 0000000000..c3f21e149d
--- /dev/null
+++ b/src/backends/neon/workloads/NeonLogicalOrWorkload.cpp
@@ -0,0 +1,51 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonLogicalOrWorkload.hpp"
+
+#include "NeonWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorHandle.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
+namespace armnn
+{
+
+arm_compute::Status NeonLogicalOrWorkloadValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output)
+{
+ const arm_compute::TensorInfo aclInputInfo0 = BuildArmComputeTensorInfo(input0);
+ const arm_compute::TensorInfo aclInputInfo1 = BuildArmComputeTensorInfo(input1);
+ const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+
+ const arm_compute::Status aclStatus = arm_compute::NELogicalOr::validate(&aclInputInfo0,
+ &aclInputInfo1,
+ &aclOutputInfo);
+ return aclStatus;
+}
+
+NeonLogicalOrWorkload::NeonLogicalOrWorkload(const LogicalBinaryQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : BaseWorkload<LogicalBinaryQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("NeonLogicalOrWorkload", 2, 1);
+
+ arm_compute::ITensor& input0 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& input1 = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ m_LogicalOrLayer.configure(&input0, &input1, &output);
+}
+
+void NeonLogicalOrWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonLogicalOrWorkload_Execute");
+ m_LogicalOrLayer.run();
+}
+
+} // namespace armnn
diff --git a/src/backends/neon/workloads/NeonLogicalOrWorkload.hpp b/src/backends/neon/workloads/NeonLogicalOrWorkload.hpp
new file mode 100644
index 0000000000..3b4ddb2d86
--- /dev/null
+++ b/src/backends/neon/workloads/NeonLogicalOrWorkload.hpp
@@ -0,0 +1,30 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/NEON/functions/NELogical.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonLogicalOrWorkloadValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output);
+
+class NeonLogicalOrWorkload : public BaseWorkload<LogicalBinaryQueueDescriptor>
+{
+public:
+ NeonLogicalOrWorkload(const LogicalBinaryQueueDescriptor& descriptor, const WorkloadInfo& info);
+ virtual void Execute() const override;
+
+private:
+ mutable arm_compute::NELogicalOr m_LogicalOrLayer;
+};
+
+} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index 590b6f7a29..1a17b9aea9 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -30,6 +30,9 @@
#include "NeonGatherWorkload.hpp"
#include "NeonInstanceNormalizationWorkload.hpp"
#include "NeonL2NormalizationFloatWorkload.hpp"
+#include "NeonLogicalAndWorkload.hpp"
+#include "NeonLogicalNotWorkload.hpp"
+#include "NeonLogicalOrWorkload.hpp"
#include "NeonLogSoftmaxWorkload.hpp"
#include "NeonLstmFloatWorkload.hpp"
#include "NeonMaximumWorkload.hpp"