aboutsummaryrefslogtreecommitdiff
path: root/src/backends
diff options
context:
space:
mode:
authorJames Conroy <james.conroy@arm.com>2020-11-06 16:28:18 +0000
committerJames Conroy <james.conroy@arm.com>2020-11-09 18:26:28 +0000
commitaba90cd608eb65ab459cd71a6724511a1507763b (patch)
tree8c83548e02de2bc6c34811ea2eb9c3dac0976068 /src/backends
parentc9bc80e1d93d27ad298133c7345627e6a946fb92 (diff)
downloadarmnn-aba90cd608eb65ab459cd71a6724511a1507763b.tar.gz
IVGCVSW-5091 Add Logical ops frontend and ref impl
* Add frontend and reference implementation for logical ops NOT, AND, OR. * Unary NOT uses existing ElementwiseUnary layer and ElementwiseUnary descriptor. * Binary AND/OR uses new layer LogicalBinary and new LogicalBinary descriptor. * Add serialization/deserializion support and add missing ElementwiseUnary deserializer code. * Add additional Boolean decoder in BaseIterator.hpp. Signed-off-by: James Conroy <james.conroy@arm.com> Change-Id: Id343b01174053a166de1b98b6175e04a5065f720
Diffstat (limited to 'src/backends')
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.cpp19
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.hpp11
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp50
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp5
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp27
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.hpp6
-rw-r--r--src/backends/backendsCommon/common.mk1
-rw-r--r--src/backends/backendsCommon/test/CMakeLists.txt2
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp1
-rw-r--r--src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp572
-rw-r--r--src/backends/backendsCommon/test/layerTests/LogicalTestImpl.hpp83
-rw-r--r--src/backends/reference/RefLayerSupport.cpp47
-rw-r--r--src/backends/reference/RefLayerSupport.hpp11
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp13
-rw-r--r--src/backends/reference/RefWorkloadFactory.hpp6
-rw-r--r--src/backends/reference/backend.mk2
-rw-r--r--src/backends/reference/test/RefLayerTests.cpp13
-rw-r--r--src/backends/reference/workloads/BaseIterator.hpp34
-rw-r--r--src/backends/reference/workloads/CMakeLists.txt4
-rw-r--r--src/backends/reference/workloads/Decoders.hpp18
-rw-r--r--src/backends/reference/workloads/ElementwiseFunction.cpp25
-rw-r--r--src/backends/reference/workloads/ElementwiseFunction.hpp26
-rw-r--r--src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp75
-rw-r--r--src/backends/reference/workloads/RefLogicalBinaryWorkload.hpp34
-rw-r--r--src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp64
-rw-r--r--src/backends/reference/workloads/RefLogicalUnaryWorkload.hpp33
-rw-r--r--src/backends/reference/workloads/RefWorkloads.hpp2
28 files changed, 1184 insertions, 2 deletions
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 92c1023583..543591091b 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -313,13 +313,30 @@ bool LayerSupportBase::IsInstanceNormalizationSupported(const TensorInfo&, // in
}
bool LayerSupportBase::IsL2NormalizationSupported(const TensorInfo&, // input
- const TensorInfo&, // output
+ const TensorInfo&, // output
const L2NormalizationDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool LayerSupportBase::IsLogicalBinarySupported(const TensorInfo&, // input0
+ const TensorInfo&, // input1
+ const TensorInfo&, // output
+ const LogicalBinaryDescriptor&, // descriptor
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsLogicalUnarySupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const ElementwiseUnaryDescriptor&, // descriptor
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
bool LayerSupportBase::IsLogSoftmaxSupported(const TensorInfo&, // input
const TensorInfo&, // output
const LogSoftmaxDescriptor&, // descriptor
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index 13fd39ea2e..7b873e3d6c 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -191,6 +191,17 @@ public:
const L2NormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsLogicalBinarySupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ const LogicalBinaryDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsLogicalUnarySupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ElementwiseUnaryDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsLogSoftmaxSupported(const TensorInfo& input,
const TensorInfo& output,
const LogSoftmaxDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 6d88664728..b39d6b3c4c 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -3534,7 +3534,21 @@ void ElementwiseUnaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo)
DataType::Signed32
};
- ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
+ std::vector<DataType> logicalSupportedTypes =
+ {
+ DataType::Boolean
+ };
+
+ if (m_Parameters.m_Operation == UnaryOperation::LogicalNot)
+ {
+ ValidateDataTypes(inputTensorInfo, logicalSupportedTypes, descriptorName);
+ }
+ else
+ {
+ ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
+ }
+
+
ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
}
@@ -3567,4 +3581,38 @@ void RankQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
ValidateDataTypes(outputTensorInfo, { DataType::Signed32 }, descriptorName);
}
+void LogicalBinaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+ const std::string descriptorName{"LogicalBinaryQueueDescriptor"};
+
+ ValidateNumInputs(workloadInfo, descriptorName, 2);
+ ValidateNumOutputs(workloadInfo, descriptorName, 1);
+
+ const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
+ const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
+ const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
+
+ ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
+ inputTensorInfo1,
+ outputTensorInfo,
+ descriptorName,
+ "input_0",
+ "input_1");
+
+ if (inputTensorInfo0.GetDataType() != DataType::Boolean)
+ {
+ throw InvalidArgumentException(descriptorName + ": Input tensor 0 type must be Boolean.");
+ }
+
+ if (inputTensorInfo1.GetDataType() != DataType::Boolean)
+ {
+ throw InvalidArgumentException(descriptorName + ": Input tensor 1 type must be Boolean.");
+ }
+
+ if (outputTensorInfo.GetDataType() != DataType::Boolean)
+ {
+ throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
+ }
+}
+
} // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 952ddc323a..dd39d312b7 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -663,4 +663,9 @@ struct ElementwiseUnaryQueueDescriptor : QueueDescriptorWithParameters<Elementwi
void Validate(const WorkloadInfo& workloadInfo) const;
};
+struct LogicalBinaryQueueDescriptor : QueueDescriptorWithParameters<LogicalBinaryDescriptor>
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
} // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 5e3eed086a..3a8a2ae18f 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -458,6 +458,21 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
reason);
break;
}
+ case LayerType::LogicalBinary:
+ {
+ auto cLayer = PolymorphicDowncast<const LogicalBinaryLayer*>(&layer);
+
+ const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+
+ result = layerSupportObject->IsLogicalBinarySupported(input0,
+ input1,
+ output,
+ cLayer->GetParameters(),
+ reason);
+ break;
+ }
case LayerType::LogSoftmax:
{
auto cLayer = PolymorphicDowncast<const LogSoftmaxLayer*>(&layer);
@@ -1441,6 +1456,18 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateL2Normalization(const L2Norma
return std::unique_ptr<IWorkload>();
}
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& /*desc*/,
+ const WorkloadInfo& /*info*/) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/,
+ const WorkloadInfo& /*info*/) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
const WorkloadInfo& /*info*/) const
{
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index 5096c3ba51..df08b9a81d 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -159,6 +159,12 @@ public:
virtual std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ virtual std::unique_ptr<IWorkload> CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
+ const WorkloadInfo& Info) const;
+
+ virtual std::unique_ptr<IWorkload> CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
+ const WorkloadInfo& Info) const;
+
virtual std::unique_ptr<IWorkload> CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index dd47d0a31f..7254d21f05 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -64,6 +64,7 @@ COMMON_TEST_SOURCES := \
test/layerTests/GatherTestImpl.cpp \
test/layerTests/InstanceNormalizationTestImpl.cpp \
test/layerTests/L2NormalizationTestImpl.cpp \
+ test/layerTests/LogicalTestImpl.cpp \
test/layerTests/LogSoftmaxTestImpl.cpp \
test/layerTests/LstmTestImpl.cpp \
test/layerTests/MaximumTestImpl.cpp \
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index a1271cdd99..7894895c39 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -110,6 +110,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources
layerTests/L2NormalizationTestImpl.cpp
layerTests/L2NormalizationTestImpl.hpp
layerTests/LayerTestResult.hpp
+ layerTests/LogicalTestImpl.cpp
+ layerTests/LogicalTestImpl.hpp
layerTests/LogSoftmaxTestImpl.cpp
layerTests/LogSoftmaxTestImpl.hpp
layerTests/LstmTestImpl.cpp
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index a8465b45f6..7c7ad5f159 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -614,6 +614,8 @@ DECLARE_LAYER_POLICY_2_PARAM(InstanceNormalization)
DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
+DECLARE_LAYER_POLICY_2_PARAM(LogicalBinary)
+
DECLARE_LAYER_POLICY_2_PARAM(LogSoftmax)
DECLARE_LAYER_POLICY_2_PARAM(Lstm)
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index a4615914d3..e9eb5b9553 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -33,6 +33,7 @@
#include <backendsCommon/test/layerTests/GatherTestImpl.hpp>
#include <backendsCommon/test/layerTests/InstanceNormalizationTestImpl.hpp>
#include <backendsCommon/test/layerTests/L2NormalizationTestImpl.hpp>
+#include <backendsCommon/test/layerTests/LogicalTestImpl.hpp>
#include <backendsCommon/test/layerTests/LogSoftmaxTestImpl.hpp>
#include <backendsCommon/test/layerTests/LstmTestImpl.hpp>
#include <backendsCommon/test/layerTests/MaximumTestImpl.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp
new file mode 100644
index 0000000000..2225de31e4
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp
@@ -0,0 +1,572 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "LogicalTestImpl.hpp"
+
+#include <armnn/utility/Assert.hpp>
+#include <ResolveType.hpp>
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+namespace {
+
+template <std::size_t NumDims>
+LayerTestResult<uint8_t, NumDims> LogicalUnaryTestHelper(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::UnaryOperation op,
+ const armnn::TensorShape& inputShape,
+ std::vector<uint8_t> input,
+ const armnn::TensorShape& outputShape,
+ std::vector<uint8_t> expectedOutput,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ ARMNN_ASSERT(inputShape.GetNumDimensions() == NumDims);
+ armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Boolean);
+
+ ARMNN_ASSERT(outputShape.GetNumDimensions() == NumDims);
+ armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Boolean);
+
+ auto inputTensor = MakeTensor<uint8_t, NumDims>(inputTensorInfo, input);
+
+ LayerTestResult <uint8_t, NumDims> ret(outputTensorInfo);
+
+ std::unique_ptr <armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr <armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::ElementwiseUnaryDescriptor desc(op);
+ armnn::ElementwiseUnaryQueueDescriptor qDesc;
+ qDesc.m_Parameters = desc;
+
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(qDesc, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(qDesc, info, outputTensorInfo, outputHandle.get());
+
+ auto workload = workloadFactory.CreateLogicalUnary(qDesc, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin());
+
+ workload->PostAllocationConfigure();
+ ExecuteWorkload(*workload, memoryManager);
+
+ CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
+
+ ret.outputExpected = MakeTensor<uint8_t, NumDims>(outputTensorInfo, expectedOutput);
+ ret.compareBoolean = true;
+ return ret;
+}
+
+template <std::size_t NumDims>
+LayerTestResult<uint8_t, NumDims> LogicalBinaryTestHelper(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::LogicalBinaryOperation op,
+ const armnn::TensorShape& inputShape0,
+ const armnn::TensorShape& inputShape1,
+ std::vector<uint8_t> input0,
+ std::vector<uint8_t> input1,
+ const armnn::TensorShape& outputShape,
+ std::vector<uint8_t> expectedOutput,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ ARMNN_ASSERT(inputShape0.GetNumDimensions() == NumDims);
+ armnn::TensorInfo inputTensorInfo0(inputShape0, armnn::DataType::Boolean);
+
+ ARMNN_ASSERT(inputShape1.GetNumDimensions() == NumDims);
+ armnn::TensorInfo inputTensorInfo1(inputShape1, armnn::DataType::Boolean);
+
+ ARMNN_ASSERT(outputShape.GetNumDimensions() == NumDims);
+ armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Boolean);
+
+ auto inputTensor0 = MakeTensor<uint8_t, NumDims>(inputTensorInfo0, input0);
+ auto inputTensor1 = MakeTensor<uint8_t, NumDims>(inputTensorInfo1, input1);
+
+ LayerTestResult <uint8_t, NumDims> ret(outputTensorInfo);
+
+ std::unique_ptr <armnn::ITensorHandle> inputHandle0 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo0);
+ std::unique_ptr <armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
+ std::unique_ptr <armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::LogicalBinaryDescriptor desc(op);
+ armnn::LogicalBinaryQueueDescriptor qDesc;
+ qDesc.m_Parameters = desc;
+
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(qDesc, info, inputTensorInfo0, inputHandle0.get());
+ AddInputToWorkload(qDesc, info, inputTensorInfo1, inputHandle1.get());
+ AddOutputToWorkload(qDesc, info, outputTensorInfo, outputHandle.get());
+
+ auto workload = workloadFactory.CreateLogicalBinary(qDesc, info);
+
+ inputHandle0->Allocate();
+ inputHandle1->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle0.get(), inputTensor0.origin());
+ CopyDataToITensorHandle(inputHandle1.get(), inputTensor1.origin());
+
+ workload->PostAllocationConfigure();
+ ExecuteWorkload(*workload, memoryManager);
+
+ CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
+
+ ret.outputExpected = MakeTensor<uint8_t, NumDims>(outputTensorInfo, expectedOutput);
+ ret.compareBoolean = true;
+ return ret;
+}
+
+class UnaryTestData
+{
+public:
+ UnaryTestData() = default;
+ virtual ~UnaryTestData() = default;
+
+ armnn::TensorShape m_InputShape;
+ armnn::TensorShape m_OutputShape;
+
+ std::vector<uint8_t> m_InputData;
+
+ std::vector<uint8_t> m_OutputNot;
+};
+
+class BinaryTestData
+{
+public:
+ BinaryTestData() = default;
+ virtual ~BinaryTestData() = default;
+
+ armnn::TensorShape m_InputShape0;
+ armnn::TensorShape m_InputShape1;
+ armnn::TensorShape m_OutputShape;
+
+ std::vector<uint8_t> m_InputData0;
+ std::vector<uint8_t> m_InputData1;
+
+ std::vector<uint8_t> m_OutputAnd;
+ std::vector<uint8_t> m_OutputOr;
+};
+
+class SimpleUnaryTestData : public UnaryTestData
+{
+public:
+ SimpleUnaryTestData() : UnaryTestData()
+ {
+ m_InputShape = { 1, 1, 1, 4 };
+ m_OutputShape = m_InputShape;
+
+ m_InputData =
+ {
+ true, false, false, true
+ };
+
+ m_OutputNot =
+ {
+ false, true, true, false
+ };
+ }
+};
+
+class SimpleUnaryIntTestData : public UnaryTestData
+{
+public:
+ SimpleUnaryIntTestData() : UnaryTestData()
+ {
+ m_InputShape = { 1, 1, 1, 4 };
+ m_OutputShape = m_InputShape;
+
+ m_InputData =
+ {
+ 1, 11, 111, 0
+ };
+
+ m_OutputNot =
+ {
+ 0, 0, 0, 1
+ };
+ }
+};
+
+class SimpleBinaryTestData : public BinaryTestData
+{
+public:
+ SimpleBinaryTestData() : BinaryTestData()
+ {
+ m_InputShape0 = { 1, 1, 1, 4 };
+ m_InputShape1 = m_InputShape0;
+ m_OutputShape = m_InputShape1;
+
+ m_InputData0 =
+ {
+ true, false, false, true
+ };
+
+ m_InputData1 =
+ {
+ true, false, true, false
+ };
+
+ m_OutputAnd =
+ {
+ true, false, false, false
+ };
+
+ m_OutputOr =
+ {
+ true, false, true, true
+ };
+ }
+};
+
+class SimpleBinaryIntTestData : public BinaryTestData
+{
+public:
+ SimpleBinaryIntTestData() : BinaryTestData()
+ {
+ m_InputShape0 = { 1, 1, 1, 4 };
+ m_InputShape1 = m_InputShape0;
+ m_OutputShape = m_InputShape1;
+
+ m_InputData0 =
+ {
+ 1, 11, 111, 0
+ };
+
+ m_InputData1 =
+ {
+ 0, 111, 111, 0
+ };
+
+ m_OutputAnd =
+ {
+ 0, 1, 1, 0
+ };
+
+ m_OutputOr =
+ {
+ 1, 1, 1, 0
+ };
+ }
+};
+
+class BroadcastBinary1TestData : public BinaryTestData
+{
+public:
+ BroadcastBinary1TestData() : BinaryTestData()
+ {
+ m_InputShape0 = { 1, 1, 1, 4 };
+ m_InputShape1 = { 1, 1, 1, 1 };
+ m_OutputShape = m_InputShape0;
+
+ m_InputData0 =
+ {
+ true, false, false, true
+ };
+
+ m_InputData1 =
+ {
+ true
+ };
+
+ m_OutputAnd =
+ {
+ true, false, false, true
+ };
+
+ m_OutputOr =
+ {
+ true, true, true, true
+ };
+ }
+};
+
+class BroadcastBinary2TestData : public BinaryTestData
+{
+public:
+ BroadcastBinary2TestData() : BinaryTestData()
+ {
+ m_InputShape0 = { 1, 1, 1, 1 };
+ m_InputShape1 = { 1, 1, 1, 4 };
+ m_OutputShape = m_InputShape1;
+
+ m_InputData0 =
+ {
+ true
+ };
+
+ m_InputData1 =
+ {
+ true, false, false, true
+ };
+
+ m_OutputAnd =
+ {
+ true, false, false, true
+ };
+
+ m_OutputOr =
+ {
+ true, true, true, true
+ };
+ }
+};
+
+class BroadcastBinary3TestData : public BinaryTestData
+{
+public:
+ BroadcastBinary3TestData() : BinaryTestData()
+ {
+ m_InputShape0 = { 1, 1, 1, 4 };
+ m_InputShape1 = { 1, 1, 1, 1 };
+ m_OutputShape = m_InputShape0;
+
+ m_InputData0 =
+ {
+ true, false, false, true
+ };
+
+ m_InputData1 =
+ {
+ false
+ };
+
+ m_OutputAnd =
+ {
+ false, false, false, false
+ };
+
+ m_OutputOr =
+ {
+ true, false, false, true
+ };
+ }
+};
+
+static SimpleUnaryTestData s_SimpleUnaryTestData;
+static SimpleBinaryTestData s_SimpleBinaryTestData;
+
+static SimpleUnaryIntTestData s_SimpleUnaryIntTestData;
+static SimpleBinaryIntTestData s_SimpleBinaryIntTestData;
+
+static BroadcastBinary1TestData s_BroadcastBinary1TestData;
+static BroadcastBinary2TestData s_BroadcastBinary2TestData;
+static BroadcastBinary3TestData s_BroadcastBinary3TestData;
+
+
+} // anonymous namespace
+
+// Unary - Not
+LayerTestResult<uint8_t, 4> LogicalNotTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalUnaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::UnaryOperation::LogicalNot,
+ s_SimpleUnaryTestData.m_InputShape,
+ s_SimpleUnaryTestData.m_InputData,
+ s_SimpleUnaryTestData.m_OutputShape,
+ s_SimpleUnaryTestData.m_OutputNot,
+ tensorHandleFactory);
+}
+
+// Unary - Not with integers
+LayerTestResult<uint8_t, 4> LogicalNotIntTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalUnaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::UnaryOperation::LogicalNot,
+ s_SimpleUnaryIntTestData.m_InputShape,
+ s_SimpleUnaryIntTestData.m_InputData,
+ s_SimpleUnaryIntTestData.m_OutputShape,
+ s_SimpleUnaryIntTestData.m_OutputNot,
+ tensorHandleFactory);
+}
+
+// Binary - And
+LayerTestResult<uint8_t, 4> LogicalAndTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalBinaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::LogicalBinaryOperation::LogicalAnd,
+ s_SimpleBinaryTestData.m_InputShape0,
+ s_SimpleBinaryTestData.m_InputShape1,
+ s_SimpleBinaryTestData.m_InputData0,
+ s_SimpleBinaryTestData.m_InputData1,
+ s_SimpleBinaryTestData.m_OutputShape,
+ s_SimpleBinaryTestData.m_OutputAnd,
+ tensorHandleFactory);
+}
+
+// Binary - Or
+LayerTestResult<uint8_t, 4> LogicalOrTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalBinaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::LogicalBinaryOperation::LogicalOr,
+ s_SimpleBinaryTestData.m_InputShape0,
+ s_SimpleBinaryTestData.m_InputShape1,
+ s_SimpleBinaryTestData.m_InputData0,
+ s_SimpleBinaryTestData.m_InputData1,
+ s_SimpleBinaryTestData.m_OutputShape,
+ s_SimpleBinaryTestData.m_OutputOr,
+ tensorHandleFactory);
+}
+
+// Binary - And with integers
+LayerTestResult<uint8_t, 4> LogicalAndIntTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalBinaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::LogicalBinaryOperation::LogicalAnd,
+ s_SimpleBinaryIntTestData.m_InputShape0,
+ s_SimpleBinaryIntTestData.m_InputShape1,
+ s_SimpleBinaryIntTestData.m_InputData0,
+ s_SimpleBinaryIntTestData.m_InputData1,
+ s_SimpleBinaryIntTestData.m_OutputShape,
+ s_SimpleBinaryIntTestData.m_OutputAnd,
+ tensorHandleFactory);
+}
+
+// Binary - Or with integers
+LayerTestResult<uint8_t, 4> LogicalOrIntTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalBinaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::LogicalBinaryOperation::LogicalOr,
+ s_SimpleBinaryIntTestData.m_InputShape0,
+ s_SimpleBinaryIntTestData.m_InputShape1,
+ s_SimpleBinaryIntTestData.m_InputData0,
+ s_SimpleBinaryIntTestData.m_InputData1,
+ s_SimpleBinaryIntTestData.m_OutputShape,
+ s_SimpleBinaryIntTestData.m_OutputOr,
+ tensorHandleFactory);
+}
+
+// Binary - And Broadcast
+LayerTestResult<uint8_t, 4> LogicalAndBroadcast1Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalBinaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::LogicalBinaryOperation::LogicalAnd,
+ s_BroadcastBinary1TestData.m_InputShape0,
+ s_BroadcastBinary1TestData.m_InputShape1,
+ s_BroadcastBinary1TestData.m_InputData0,
+ s_BroadcastBinary1TestData.m_InputData1,
+ s_BroadcastBinary1TestData.m_OutputShape,
+ s_BroadcastBinary1TestData.m_OutputAnd,
+ tensorHandleFactory);
+}
+
+// Binary - Or Broadcast
+LayerTestResult<uint8_t, 4> LogicalOrBroadcast1Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalBinaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::LogicalBinaryOperation::LogicalOr,
+ s_BroadcastBinary1TestData.m_InputShape0,
+ s_BroadcastBinary1TestData.m_InputShape1,
+ s_BroadcastBinary1TestData.m_InputData0,
+ s_BroadcastBinary1TestData.m_InputData1,
+ s_BroadcastBinary1TestData.m_OutputShape,
+ s_BroadcastBinary1TestData.m_OutputOr,
+ tensorHandleFactory);
+}
+
+// Binary - And Broadcast
+LayerTestResult<uint8_t, 4> LogicalAndBroadcast2Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalBinaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::LogicalBinaryOperation::LogicalAnd,
+ s_BroadcastBinary2TestData.m_InputShape0,
+ s_BroadcastBinary2TestData.m_InputShape1,
+ s_BroadcastBinary2TestData.m_InputData0,
+ s_BroadcastBinary2TestData.m_InputData1,
+ s_BroadcastBinary2TestData.m_OutputShape,
+ s_BroadcastBinary2TestData.m_OutputAnd,
+ tensorHandleFactory);
+}
+
+// Binary - Or Broadcast
+LayerTestResult<uint8_t, 4> LogicalOrBroadcast2Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalBinaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::LogicalBinaryOperation::LogicalOr,
+ s_BroadcastBinary2TestData.m_InputShape0,
+ s_BroadcastBinary2TestData.m_InputShape1,
+ s_BroadcastBinary2TestData.m_InputData0,
+ s_BroadcastBinary2TestData.m_InputData1,
+ s_BroadcastBinary2TestData.m_OutputShape,
+ s_BroadcastBinary2TestData.m_OutputOr,
+ tensorHandleFactory);
+}
+
+// Binary - And Broadcast
+LayerTestResult<uint8_t, 4> LogicalAndBroadcast3Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalBinaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::LogicalBinaryOperation::LogicalAnd,
+ s_BroadcastBinary3TestData.m_InputShape0,
+ s_BroadcastBinary3TestData.m_InputShape1,
+ s_BroadcastBinary3TestData.m_InputData0,
+ s_BroadcastBinary3TestData.m_InputData1,
+ s_BroadcastBinary3TestData.m_OutputShape,
+ s_BroadcastBinary3TestData.m_OutputAnd,
+ tensorHandleFactory);
+}
+
+// Binary - Or Broadcast
+LayerTestResult<uint8_t, 4> LogicalOrBroadcast3Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalBinaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::LogicalBinaryOperation::LogicalOr,
+ s_BroadcastBinary3TestData.m_InputShape0,
+ s_BroadcastBinary3TestData.m_InputShape1,
+ s_BroadcastBinary3TestData.m_InputData0,
+ s_BroadcastBinary3TestData.m_InputData1,
+ s_BroadcastBinary3TestData.m_OutputShape,
+ s_BroadcastBinary3TestData.m_OutputOr,
+ tensorHandleFactory);
+} \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.hpp
new file mode 100644
index 0000000000..1711d90d5a
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.hpp
@@ -0,0 +1,83 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+// Unary - Logical Not
+LayerTestResult<uint8_t, 4> LogicalNotTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Unary - Logical Not with integers
+LayerTestResult<uint8_t, 4> LogicalNotIntTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Binary - Logical And
+LayerTestResult<uint8_t, 4> LogicalAndTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Binary - Logical Or
+LayerTestResult<uint8_t, 4> LogicalOrTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Binary - Logical And with integers
+LayerTestResult<uint8_t, 4> LogicalAndIntTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Binary - Logical Or with integers
+LayerTestResult<uint8_t, 4> LogicalOrIntTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Binary - Logical And Broadcast
+LayerTestResult<uint8_t, 4> LogicalAndBroadcast1Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Binary - Logical Or Broadcast
+LayerTestResult<uint8_t, 4> LogicalOrBroadcast1Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Binary - Logical And Broadcast
+LayerTestResult<uint8_t, 4> LogicalAndBroadcast2Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Binary - Logical Or Broadcast
+LayerTestResult<uint8_t, 4> LogicalOrBroadcast2Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Binary - Logical And Broadcast
+LayerTestResult<uint8_t, 4> LogicalAndBroadcast3Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Binary - Logical Or Broadcast
+LayerTestResult<uint8_t, 4> LogicalOrBroadcast3Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 52c079fae4..f48c120203 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -1105,6 +1105,53 @@ bool RefLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
return supported;
}
+bool RefLayerSupport::IsLogicalBinarySupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ const LogicalBinaryDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ IgnoreUnused(descriptor);
+
+ std::array<DataType, 1> supportedTypes =
+ {
+ DataType::Boolean
+ };
+
+ bool supported = true;
+ supported &= CheckSupportRule(TypeAnyOf(input0, supportedTypes), reasonIfUnsupported,
+ "Reference LogicalBinary: input 0 type not supported");
+ supported &= CheckSupportRule(TypeAnyOf(input1, supportedTypes), reasonIfUnsupported,
+ "Reference LogicalBinary: input 1 type not supported");
+
+ supported &= CheckSupportRule(TypesAreEqual(input0, output), reasonIfUnsupported,
+ "Reference LogicalBinary: input and output types do not match");
+
+ return supported;
+}
+
+bool RefLayerSupport::IsLogicalUnarySupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ElementwiseUnaryDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ IgnoreUnused(descriptor);
+
+ std::array<DataType, 1> supportedTypes =
+ {
+ DataType::Boolean
+ };
+
+ bool supported = true;
+ supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+ "Reference LogicalUnary: input type not supported");
+
+ supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
+ "Reference LogicalUnary: input and output types do not match");
+
+ return supported;
+}
+
bool RefLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input,
const TensorInfo& output,
const LogSoftmaxDescriptor& descriptor,
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index a233082aaa..318eb4064b 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -182,6 +182,17 @@ public:
const L2NormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsLogicalBinarySupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ const LogicalBinaryDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const override;
+
+ bool IsLogicalUnarySupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ElementwiseUnaryDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported) const override;
+
bool IsLogSoftmaxSupported(const TensorInfo& input,
const TensorInfo& output,
const LogSoftmaxDescriptor& descriptor,
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index e7e57b15d1..9080028e72 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -401,6 +401,19 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateL2Normalization(const L2Nor
return std::make_unique<RefL2NormalizationWorkload>(descriptor, info);
}
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::make_unique<RefLogicalBinaryWorkload>(descriptor, info);
+}
+
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::make_unique<RefLogicalUnaryWorkload>(descriptor, info);
+}
+
+
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index 5f22c9eac2..8c3d719ae0 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -162,6 +162,12 @@ public:
std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
+ std::unique_ptr<IWorkload> CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
+ std::unique_ptr<IWorkload> CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const override;
+
std::unique_ptr<IWorkload> CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index bf5f340afc..b4aa3a0953 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -69,6 +69,8 @@ BACKEND_SOURCES := \
workloads/RefGatherWorkload.cpp \
workloads/RefInstanceNormalizationWorkload.cpp \
workloads/RefL2NormalizationWorkload.cpp \
+ workloads/RefLogicalBinaryWorkload.cpp \
+ workloads/RefLogicalUnaryWorkload.cpp \
workloads/RefLogSoftmaxWorkload.cpp \
workloads/RefLstmWorkload.cpp \
workloads/RefMeanWorkload.cpp \
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 7542a64711..60400c514e 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -2215,4 +2215,17 @@ ARMNN_AUTO_TEST_CASE_WITH_THF(Exp3dQuantisedAsymm8, Exp3dTest<DataType::QAsymmU8
ARMNN_AUTO_TEST_CASE_WITH_THF(Exp2dQuantisedSymm16, Exp2dTest<DataType::QSymmS16>)
ARMNN_AUTO_TEST_CASE_WITH_THF(Exp3dQuantisedSymm16, Exp3dTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalNot, LogicalNotTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalNotInt, LogicalNotIntTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAnd, LogicalAndTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOr, LogicalOrTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndInt, LogicalAndIntTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrInt, LogicalOrIntTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndBroadcast1, LogicalAndBroadcast1Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrBroadcast1, LogicalOrBroadcast1Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndBroadcast2, LogicalAndBroadcast2Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrBroadcast2, LogicalOrBroadcast2Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndBroadcast3, LogicalAndBroadcast3Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrBroadcast3, LogicalOrBroadcast3Test)
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index a10f383e90..73e24691d9 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -515,6 +515,40 @@ public:
}
};
+class BooleanDecoderBool : public TypedIterator<const uint8_t, Decoder<bool>>
+{
+public:
+ BooleanDecoderBool(const uint8_t* data)
+ : TypedIterator(data) {}
+
+ BooleanDecoderBool()
+ : BooleanDecoderBool(nullptr) {}
+
+ bool Get() const override
+ {
+ return *m_Iterator;
+ }
+
+ std::vector<float> DecodeTensor(const TensorShape& tensorShape,
+ const unsigned int channelMultiplier,
+ const bool isDepthwise) override
+ {
+ IgnoreUnused(channelMultiplier, isDepthwise);
+
+ const unsigned int size = tensorShape.GetNumElements();
+ std::vector<float> decodedTensor;
+ decodedTensor.reserve(size);
+
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ this->operator[](i);
+ decodedTensor.emplace_back(*m_Iterator);
+ }
+
+ return decodedTensor;
+ }
+};
+
class QASymm8Encoder : public TypedIterator<uint8_t, Encoder<float>>
{
public:
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index cd9efc96af..1b20e5bf2d 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -107,6 +107,10 @@ list(APPEND armnnRefBackendWorkloads_sources
RefInstanceNormalizationWorkload.hpp
RefL2NormalizationWorkload.cpp
RefL2NormalizationWorkload.hpp
+ RefLogicalBinaryWorkload.cpp
+ RefLogicalBinaryWorkload.hpp
+ RefLogicalUnaryWorkload.cpp
+ RefLogicalUnaryWorkload.hpp
RefLogSoftmaxWorkload.cpp
RefLogSoftmaxWorkload.hpp
RefLstmWorkload.cpp
diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp
index 08e0140fad..0b3f36047d 100644
--- a/src/backends/reference/workloads/Decoders.hpp
+++ b/src/backends/reference/workloads/Decoders.hpp
@@ -150,6 +150,24 @@ inline std::unique_ptr<Decoder<float>> MakeDecoder(const TensorInfo& info, const
}
template<>
+inline std::unique_ptr<Decoder<bool>> MakeDecoder(const TensorInfo& info, const void* data)
+{
+ switch(info.GetDataType())
+ {
+ case DataType::Boolean:
+ {
+ return std::make_unique<BooleanDecoderBool>(static_cast<const uint8_t*>(data));
+ }
+ default:
+ {
+ ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
+ break;
+ }
+ }
+ return nullptr;
+}
+
+template<>
inline std::unique_ptr<Decoder<int32_t>> MakeDecoder(const TensorInfo& info, const void* data)
{
switch(info.GetDataType())
diff --git a/src/backends/reference/workloads/ElementwiseFunction.cpp b/src/backends/reference/workloads/ElementwiseFunction.cpp
index afae188bd6..d6f3f42478 100644
--- a/src/backends/reference/workloads/ElementwiseFunction.cpp
+++ b/src/backends/reference/workloads/ElementwiseFunction.cpp
@@ -37,6 +37,26 @@ ElementwiseUnaryFunction<Functor>::ElementwiseUnaryFunction(const TensorShape& i
BroadcastLoop(inShape, outShape).Unroll(Functor(), 0, inData, outData);
}
+template <typename Functor>
+LogicalBinaryFunction<Functor>::LogicalBinaryFunction(const TensorShape& inShape0,
+ const TensorShape& inShape1,
+ const TensorShape& outShape,
+ Decoder<InType>& inData0,
+ Decoder<InType>& inData1,
+ Encoder<OutType>& outData)
+{
+ BroadcastLoop(inShape0, inShape1, outShape).Unroll(Functor(), 0, inData0, inData1, outData);
+}
+
+template <typename Functor>
+LogicalUnaryFunction<Functor>::LogicalUnaryFunction(const TensorShape& inShape,
+ const TensorShape& outShape,
+ Decoder<InType>& inData,
+ Encoder<OutType>& outData)
+{
+ BroadcastLoop(inShape, outShape).Unroll(Functor(), 0, inData, outData);
+}
+
} //namespace armnn
template struct armnn::ElementwiseBinaryFunction<std::plus<float>>;
@@ -67,3 +87,8 @@ template struct armnn::ElementwiseUnaryFunction<armnn::exp<float>>;
template struct armnn::ElementwiseUnaryFunction<std::negate<float>>;
template struct armnn::ElementwiseUnaryFunction<armnn::rsqrt<float>>;
template struct armnn::ElementwiseUnaryFunction<armnn::sqrt<float>>;
+
+// Logical Unary
+template struct armnn::LogicalUnaryFunction<std::logical_not<bool>>;
+template struct armnn::LogicalBinaryFunction<std::logical_and<bool>>;
+template struct armnn::LogicalBinaryFunction<std::logical_or<bool>>;
diff --git a/src/backends/reference/workloads/ElementwiseFunction.hpp b/src/backends/reference/workloads/ElementwiseFunction.hpp
index 8259ba5ac7..ef4a2dc7d5 100644
--- a/src/backends/reference/workloads/ElementwiseFunction.hpp
+++ b/src/backends/reference/workloads/ElementwiseFunction.hpp
@@ -37,4 +37,30 @@ struct ElementwiseUnaryFunction
Encoder<OutType>& outData);
};
+template <typename Functor>
+struct LogicalBinaryFunction
+{
+ using OutType = bool;
+ using InType = bool;
+
+ LogicalBinaryFunction(const TensorShape& inShape0,
+ const TensorShape& inShape1,
+ const TensorShape& outShape,
+ Decoder<InType>& inData0,
+ Decoder<InType>& inData1,
+ Encoder<OutType>& outData);
+};
+
+template <typename Functor>
+struct LogicalUnaryFunction
+{
+ using OutType = bool;
+ using InType = bool;
+
+ LogicalUnaryFunction(const TensorShape& inShape,
+ const TensorShape& outShape,
+ Decoder<InType>& inData,
+ Encoder<OutType>& outData);
+};
+
} //namespace armnn
diff --git a/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp b/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp
new file mode 100644
index 0000000000..1b4e8f9aa0
--- /dev/null
+++ b/src/backends/reference/workloads/RefLogicalBinaryWorkload.cpp
@@ -0,0 +1,75 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefLogicalBinaryWorkload.hpp"
+
+#include "Decoders.hpp"
+#include "ElementwiseFunction.hpp"
+#include "Encoders.hpp"
+#include "RefWorkloadUtils.hpp"
+
+#include <Profiling.hpp>
+
+#include <armnn/TypesUtils.hpp>
+
+namespace armnn
+{
+
+RefLogicalBinaryWorkload::RefLogicalBinaryWorkload(const LogicalBinaryQueueDescriptor& desc,
+ const WorkloadInfo& info)
+ : BaseWorkload<LogicalBinaryQueueDescriptor>(desc, info)
+{}
+
+void RefLogicalBinaryWorkload::PostAllocationConfigure()
+{
+ const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]);
+ const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]);
+ const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+ m_Input0 = MakeDecoder<InType>(inputInfo0);
+ m_Input1 = MakeDecoder<InType>(inputInfo1);
+ m_Output = MakeEncoder<OutType>(outputInfo);
+}
+
+void RefLogicalBinaryWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefLogicalBinaryWorkload_Execute");
+
+ const TensorInfo& inputInfo0 = GetTensorInfo(m_Data.m_Inputs[0]);
+ const TensorInfo& inputInfo1 = GetTensorInfo(m_Data.m_Inputs[1]);
+ const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+ const TensorShape& inShape0 = inputInfo0.GetShape();
+ const TensorShape& inShape1 = inputInfo1.GetShape();
+ const TensorShape& outShape = outputInfo.GetShape();
+
+ m_Input0->Reset(m_Data.m_Inputs[0]->Map());
+ m_Input1->Reset(m_Data.m_Inputs[1]->Map());
+ m_Output->Reset(m_Data.m_Outputs[0]->Map());
+
+ using AndFunction = LogicalBinaryFunction<std::logical_and<bool>>;
+ using OrFunction = LogicalBinaryFunction<std::logical_or<bool>>;
+
+ switch (m_Data.m_Parameters.m_Operation)
+ {
+ case LogicalBinaryOperation::LogicalAnd:
+ {
+ AndFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
+ break;
+ }
+ case LogicalBinaryOperation::LogicalOr:
+ {
+ OrFunction(inShape0, inShape1, outShape, *m_Input0, *m_Input1, *m_Output);
+ break;
+ }
+ default:
+ {
+ throw InvalidArgumentException(std::string("Unsupported Logical Binary operation") +
+ GetLogicalBinaryOperationAsCString(m_Data.m_Parameters.m_Operation), CHECK_LOCATION());
+ }
+ }
+}
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefLogicalBinaryWorkload.hpp b/src/backends/reference/workloads/RefLogicalBinaryWorkload.hpp
new file mode 100644
index 0000000000..4d6baf5fa4
--- /dev/null
+++ b/src/backends/reference/workloads/RefLogicalBinaryWorkload.hpp
@@ -0,0 +1,34 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "BaseIterator.hpp"
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+namespace armnn
+{
+
+class RefLogicalBinaryWorkload : public BaseWorkload<LogicalBinaryQueueDescriptor>
+{
+public:
+ using BaseWorkload<LogicalBinaryQueueDescriptor>::m_Data;
+
+ RefLogicalBinaryWorkload(const LogicalBinaryQueueDescriptor& descriptor, const WorkloadInfo& info);
+ void PostAllocationConfigure() override;
+ virtual void Execute() const override;
+
+private:
+ using InType = bool;
+ using OutType = bool;
+
+ std::unique_ptr<Decoder<InType>> m_Input0;
+ std::unique_ptr<Decoder<InType>> m_Input1;
+ std::unique_ptr<Encoder<OutType>> m_Output;
+};
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp b/src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp
new file mode 100644
index 0000000000..76eb5ac39f
--- /dev/null
+++ b/src/backends/reference/workloads/RefLogicalUnaryWorkload.cpp
@@ -0,0 +1,64 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefLogicalUnaryWorkload.hpp"
+
+#include "Decoders.hpp"
+#include "ElementwiseFunction.hpp"
+#include "Encoders.hpp"
+#include "RefWorkloadUtils.hpp"
+
+#include <Profiling.hpp>
+
+#include <armnn/TypesUtils.hpp>
+
+namespace armnn
+{
+
+RefLogicalUnaryWorkload::RefLogicalUnaryWorkload(const ElementwiseUnaryQueueDescriptor& desc,
+ const WorkloadInfo& info)
+ : BaseWorkload<ElementwiseUnaryQueueDescriptor>(desc, info)
+{}
+
+void RefLogicalUnaryWorkload::PostAllocationConfigure()
+{
+ const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+ const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+ m_Input = MakeDecoder<InType>(inputInfo);
+ m_Output = MakeEncoder<OutType>(outputInfo);
+}
+
+void RefLogicalUnaryWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefLogicalUnaryWorkload_Execute");
+
+ const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+ const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+ const TensorShape& inShape = inputInfo.GetShape();
+ const TensorShape& outShape = outputInfo.GetShape();
+
+ m_Input->Reset(m_Data.m_Inputs[0]->Map());
+ m_Output->Reset(m_Data.m_Outputs[0]->Map());
+
+ using NotFunction = LogicalUnaryFunction<std::logical_not<bool>>;
+
+ switch (m_Data.m_Parameters.m_Operation)
+ {
+ case UnaryOperation::LogicalNot:
+ {
+ NotFunction(inShape, outShape, *m_Input, *m_Output);
+ break;
+ }
+ default:
+ {
+ throw InvalidArgumentException(std::string("Unsupported Logical Unary operation") +
+ GetUnaryOperationAsCString(m_Data.m_Parameters.m_Operation), CHECK_LOCATION());
+ }
+ }
+}
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefLogicalUnaryWorkload.hpp b/src/backends/reference/workloads/RefLogicalUnaryWorkload.hpp
new file mode 100644
index 0000000000..0d8b35495c
--- /dev/null
+++ b/src/backends/reference/workloads/RefLogicalUnaryWorkload.hpp
@@ -0,0 +1,33 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "BaseIterator.hpp"
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+namespace armnn
+{
+
+class RefLogicalUnaryWorkload : public BaseWorkload<ElementwiseUnaryQueueDescriptor>
+{
+public:
+ using BaseWorkload<ElementwiseUnaryQueueDescriptor>::m_Data;
+
+ RefLogicalUnaryWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info);
+ void PostAllocationConfigure() override;
+ virtual void Execute() const override;
+
+private:
+ using InType = bool;
+ using OutType = bool;
+
+ std::unique_ptr<Decoder<InType>> m_Input;
+ std::unique_ptr<Encoder<OutType>> m_Output;
+};
+
+} // namespace armnn
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index fc47cff84f..390b2a8d55 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -41,6 +41,8 @@
#include "RefGatherWorkload.hpp"
#include "RefInstanceNormalizationWorkload.hpp"
#include "RefL2NormalizationWorkload.hpp"
+#include "RefLogicalBinaryWorkload.hpp"
+#include "RefLogicalUnaryWorkload.hpp"
#include "RefLogSoftmaxWorkload.hpp"
#include "RefLstmWorkload.hpp"
#include "RefMeanWorkload.hpp"