aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/backendsCommon')
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.cpp19
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.hpp11
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp50
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp5
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp27
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.hpp6
-rw-r--r--src/backends/backendsCommon/common.mk1
-rw-r--r--src/backends/backendsCommon/test/CMakeLists.txt2
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp1
-rw-r--r--src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp572
-rw-r--r--src/backends/backendsCommon/test/layerTests/LogicalTestImpl.hpp83
12 files changed, 777 insertions, 2 deletions
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 92c1023583..543591091b 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -313,13 +313,30 @@ bool LayerSupportBase::IsInstanceNormalizationSupported(const TensorInfo&, // in
}
bool LayerSupportBase::IsL2NormalizationSupported(const TensorInfo&, // input
- const TensorInfo&, // output
+ const TensorInfo&, // output
const L2NormalizationDescriptor&, // descriptor
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool LayerSupportBase::IsLogicalBinarySupported(const TensorInfo&, // input0
+ const TensorInfo&, // input1
+ const TensorInfo&, // output
+ const LogicalBinaryDescriptor&, // descriptor
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
+bool LayerSupportBase::IsLogicalUnarySupported(const TensorInfo&, // input
+ const TensorInfo&, // output
+ const ElementwiseUnaryDescriptor&, // descriptor
+ Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
bool LayerSupportBase::IsLogSoftmaxSupported(const TensorInfo&, // input
const TensorInfo&, // output
const LogSoftmaxDescriptor&, // descriptor
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index 13fd39ea2e..7b873e3d6c 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -191,6 +191,17 @@ public:
const L2NormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsLogicalBinarySupported(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ const LogicalBinaryDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
+ bool IsLogicalUnarySupported(const TensorInfo& input,
+ const TensorInfo& output,
+ const ElementwiseUnaryDescriptor& descriptor,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsLogSoftmaxSupported(const TensorInfo& input,
const TensorInfo& output,
const LogSoftmaxDescriptor& descriptor,
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 6d88664728..b39d6b3c4c 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -3534,7 +3534,21 @@ void ElementwiseUnaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo)
DataType::Signed32
};
- ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
+ std::vector<DataType> logicalSupportedTypes =
+ {
+ DataType::Boolean
+ };
+
+ if (m_Parameters.m_Operation == UnaryOperation::LogicalNot)
+ {
+ ValidateDataTypes(inputTensorInfo, logicalSupportedTypes, descriptorName);
+ }
+ else
+ {
+ ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
+ }
+
+
ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
}
@@ -3567,4 +3581,38 @@ void RankQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
ValidateDataTypes(outputTensorInfo, { DataType::Signed32 }, descriptorName);
}
+void LogicalBinaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+ const std::string descriptorName{"LogicalBinaryQueueDescriptor"};
+
+ ValidateNumInputs(workloadInfo, descriptorName, 2);
+ ValidateNumOutputs(workloadInfo, descriptorName, 1);
+
+ const TensorInfo& inputTensorInfo0 = workloadInfo.m_InputTensorInfos[0];
+ const TensorInfo& inputTensorInfo1 = workloadInfo.m_InputTensorInfos[1];
+ const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
+
+ ValidateBroadcastTensorShapesMatch(inputTensorInfo0,
+ inputTensorInfo1,
+ outputTensorInfo,
+ descriptorName,
+ "input_0",
+ "input_1");
+
+ if (inputTensorInfo0.GetDataType() != DataType::Boolean)
+ {
+ throw InvalidArgumentException(descriptorName + ": Input tensor 0 type must be Boolean.");
+ }
+
+ if (inputTensorInfo1.GetDataType() != DataType::Boolean)
+ {
+ throw InvalidArgumentException(descriptorName + ": Input tensor 1 type must be Boolean.");
+ }
+
+ if (outputTensorInfo.GetDataType() != DataType::Boolean)
+ {
+ throw InvalidArgumentException(descriptorName + ": Output tensor type must be Boolean.");
+ }
+}
+
} // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 952ddc323a..dd39d312b7 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -663,4 +663,9 @@ struct ElementwiseUnaryQueueDescriptor : QueueDescriptorWithParameters<Elementwi
void Validate(const WorkloadInfo& workloadInfo) const;
};
+struct LogicalBinaryQueueDescriptor : QueueDescriptorWithParameters<LogicalBinaryDescriptor>
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
} // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 5e3eed086a..3a8a2ae18f 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -458,6 +458,21 @@ bool IWorkloadFactory::IsLayerConfigurationSupported(const BackendId& backendId,
reason);
break;
}
+ case LayerType::LogicalBinary:
+ {
+ auto cLayer = PolymorphicDowncast<const LogicalBinaryLayer*>(&layer);
+
+ const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& input1 = layer.GetInputSlot(1).GetConnection()->GetTensorInfo();
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+
+ result = layerSupportObject->IsLogicalBinarySupported(input0,
+ input1,
+ output,
+ cLayer->GetParameters(),
+ reason);
+ break;
+ }
case LayerType::LogSoftmax:
{
auto cLayer = PolymorphicDowncast<const LogSoftmaxLayer*>(&layer);
@@ -1441,6 +1456,18 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateL2Normalization(const L2Norma
return std::unique_ptr<IWorkload>();
}
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& /*desc*/,
+ const WorkloadInfo& /*info*/) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& /*desc*/,
+ const WorkloadInfo& /*info*/) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
std::unique_ptr<IWorkload> IWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& /*descriptor*/,
const WorkloadInfo& /*info*/) const
{
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index 5096c3ba51..df08b9a81d 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -159,6 +159,12 @@ public:
virtual std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
+ virtual std::unique_ptr<IWorkload> CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
+ const WorkloadInfo& Info) const;
+
+ virtual std::unique_ptr<IWorkload> CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
+ const WorkloadInfo& Info) const;
+
virtual std::unique_ptr<IWorkload> CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const;
diff --git a/src/backends/backendsCommon/common.mk b/src/backends/backendsCommon/common.mk
index dd47d0a31f..7254d21f05 100644
--- a/src/backends/backendsCommon/common.mk
+++ b/src/backends/backendsCommon/common.mk
@@ -64,6 +64,7 @@ COMMON_TEST_SOURCES := \
test/layerTests/GatherTestImpl.cpp \
test/layerTests/InstanceNormalizationTestImpl.cpp \
test/layerTests/L2NormalizationTestImpl.cpp \
+ test/layerTests/LogicalTestImpl.cpp \
test/layerTests/LogSoftmaxTestImpl.cpp \
test/layerTests/LstmTestImpl.cpp \
test/layerTests/MaximumTestImpl.cpp \
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index a1271cdd99..7894895c39 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -110,6 +110,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources
layerTests/L2NormalizationTestImpl.cpp
layerTests/L2NormalizationTestImpl.hpp
layerTests/LayerTestResult.hpp
+ layerTests/LogicalTestImpl.cpp
+ layerTests/LogicalTestImpl.hpp
layerTests/LogSoftmaxTestImpl.cpp
layerTests/LogSoftmaxTestImpl.hpp
layerTests/LstmTestImpl.cpp
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index a8465b45f6..7c7ad5f159 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -614,6 +614,8 @@ DECLARE_LAYER_POLICY_2_PARAM(InstanceNormalization)
DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
+DECLARE_LAYER_POLICY_2_PARAM(LogicalBinary)
+
DECLARE_LAYER_POLICY_2_PARAM(LogSoftmax)
DECLARE_LAYER_POLICY_2_PARAM(Lstm)
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index a4615914d3..e9eb5b9553 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -33,6 +33,7 @@
#include <backendsCommon/test/layerTests/GatherTestImpl.hpp>
#include <backendsCommon/test/layerTests/InstanceNormalizationTestImpl.hpp>
#include <backendsCommon/test/layerTests/L2NormalizationTestImpl.hpp>
+#include <backendsCommon/test/layerTests/LogicalTestImpl.hpp>
#include <backendsCommon/test/layerTests/LogSoftmaxTestImpl.hpp>
#include <backendsCommon/test/layerTests/LstmTestImpl.hpp>
#include <backendsCommon/test/layerTests/MaximumTestImpl.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp
new file mode 100644
index 0000000000..2225de31e4
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp
@@ -0,0 +1,572 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "LogicalTestImpl.hpp"
+
+#include <armnn/utility/Assert.hpp>
+#include <ResolveType.hpp>
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+namespace {
+
+template <std::size_t NumDims>
+LayerTestResult<uint8_t, NumDims> LogicalUnaryTestHelper(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::UnaryOperation op,
+ const armnn::TensorShape& inputShape,
+ std::vector<uint8_t> input,
+ const armnn::TensorShape& outputShape,
+ std::vector<uint8_t> expectedOutput,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ ARMNN_ASSERT(inputShape.GetNumDimensions() == NumDims);
+ armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Boolean);
+
+ ARMNN_ASSERT(outputShape.GetNumDimensions() == NumDims);
+ armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Boolean);
+
+ auto inputTensor = MakeTensor<uint8_t, NumDims>(inputTensorInfo, input);
+
+ LayerTestResult <uint8_t, NumDims> ret(outputTensorInfo);
+
+ std::unique_ptr <armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr <armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::ElementwiseUnaryDescriptor desc(op);
+ armnn::ElementwiseUnaryQueueDescriptor qDesc;
+ qDesc.m_Parameters = desc;
+
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(qDesc, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(qDesc, info, outputTensorInfo, outputHandle.get());
+
+ auto workload = workloadFactory.CreateLogicalUnary(qDesc, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin());
+
+ workload->PostAllocationConfigure();
+ ExecuteWorkload(*workload, memoryManager);
+
+ CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
+
+ ret.outputExpected = MakeTensor<uint8_t, NumDims>(outputTensorInfo, expectedOutput);
+ ret.compareBoolean = true;
+ return ret;
+}
+
+template <std::size_t NumDims>
+LayerTestResult<uint8_t, NumDims> LogicalBinaryTestHelper(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::LogicalBinaryOperation op,
+ const armnn::TensorShape& inputShape0,
+ const armnn::TensorShape& inputShape1,
+ std::vector<uint8_t> input0,
+ std::vector<uint8_t> input1,
+ const armnn::TensorShape& outputShape,
+ std::vector<uint8_t> expectedOutput,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ ARMNN_ASSERT(inputShape0.GetNumDimensions() == NumDims);
+ armnn::TensorInfo inputTensorInfo0(inputShape0, armnn::DataType::Boolean);
+
+ ARMNN_ASSERT(inputShape1.GetNumDimensions() == NumDims);
+ armnn::TensorInfo inputTensorInfo1(inputShape1, armnn::DataType::Boolean);
+
+ ARMNN_ASSERT(outputShape.GetNumDimensions() == NumDims);
+ armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Boolean);
+
+ auto inputTensor0 = MakeTensor<uint8_t, NumDims>(inputTensorInfo0, input0);
+ auto inputTensor1 = MakeTensor<uint8_t, NumDims>(inputTensorInfo1, input1);
+
+ LayerTestResult <uint8_t, NumDims> ret(outputTensorInfo);
+
+ std::unique_ptr <armnn::ITensorHandle> inputHandle0 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo0);
+ std::unique_ptr <armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
+ std::unique_ptr <armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::LogicalBinaryDescriptor desc(op);
+ armnn::LogicalBinaryQueueDescriptor qDesc;
+ qDesc.m_Parameters = desc;
+
+ armnn::WorkloadInfo info;
+ AddInputToWorkload(qDesc, info, inputTensorInfo0, inputHandle0.get());
+ AddInputToWorkload(qDesc, info, inputTensorInfo1, inputHandle1.get());
+ AddOutputToWorkload(qDesc, info, outputTensorInfo, outputHandle.get());
+
+ auto workload = workloadFactory.CreateLogicalBinary(qDesc, info);
+
+ inputHandle0->Allocate();
+ inputHandle1->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle0.get(), inputTensor0.origin());
+ CopyDataToITensorHandle(inputHandle1.get(), inputTensor1.origin());
+
+ workload->PostAllocationConfigure();
+ ExecuteWorkload(*workload, memoryManager);
+
+ CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
+
+ ret.outputExpected = MakeTensor<uint8_t, NumDims>(outputTensorInfo, expectedOutput);
+ ret.compareBoolean = true;
+ return ret;
+}
+
+class UnaryTestData
+{
+public:
+ UnaryTestData() = default;
+ virtual ~UnaryTestData() = default;
+
+ armnn::TensorShape m_InputShape;
+ armnn::TensorShape m_OutputShape;
+
+ std::vector<uint8_t> m_InputData;
+
+ std::vector<uint8_t> m_OutputNot;
+};
+
+class BinaryTestData
+{
+public:
+ BinaryTestData() = default;
+ virtual ~BinaryTestData() = default;
+
+ armnn::TensorShape m_InputShape0;
+ armnn::TensorShape m_InputShape1;
+ armnn::TensorShape m_OutputShape;
+
+ std::vector<uint8_t> m_InputData0;
+ std::vector<uint8_t> m_InputData1;
+
+ std::vector<uint8_t> m_OutputAnd;
+ std::vector<uint8_t> m_OutputOr;
+};
+
+class SimpleUnaryTestData : public UnaryTestData
+{
+public:
+ SimpleUnaryTestData() : UnaryTestData()
+ {
+ m_InputShape = { 1, 1, 1, 4 };
+ m_OutputShape = m_InputShape;
+
+ m_InputData =
+ {
+ true, false, false, true
+ };
+
+ m_OutputNot =
+ {
+ false, true, true, false
+ };
+ }
+};
+
+class SimpleUnaryIntTestData : public UnaryTestData
+{
+public:
+ SimpleUnaryIntTestData() : UnaryTestData()
+ {
+ m_InputShape = { 1, 1, 1, 4 };
+ m_OutputShape = m_InputShape;
+
+ m_InputData =
+ {
+ 1, 11, 111, 0
+ };
+
+ m_OutputNot =
+ {
+ 0, 0, 0, 1
+ };
+ }
+};
+
+class SimpleBinaryTestData : public BinaryTestData
+{
+public:
+ SimpleBinaryTestData() : BinaryTestData()
+ {
+ m_InputShape0 = { 1, 1, 1, 4 };
+ m_InputShape1 = m_InputShape0;
+ m_OutputShape = m_InputShape1;
+
+ m_InputData0 =
+ {
+ true, false, false, true
+ };
+
+ m_InputData1 =
+ {
+ true, false, true, false
+ };
+
+ m_OutputAnd =
+ {
+ true, false, false, false
+ };
+
+ m_OutputOr =
+ {
+ true, false, true, true
+ };
+ }
+};
+
+class SimpleBinaryIntTestData : public BinaryTestData
+{
+public:
+ SimpleBinaryIntTestData() : BinaryTestData()
+ {
+ m_InputShape0 = { 1, 1, 1, 4 };
+ m_InputShape1 = m_InputShape0;
+ m_OutputShape = m_InputShape1;
+
+ m_InputData0 =
+ {
+ 1, 11, 111, 0
+ };
+
+ m_InputData1 =
+ {
+ 0, 111, 111, 0
+ };
+
+ m_OutputAnd =
+ {
+ 0, 1, 1, 0
+ };
+
+ m_OutputOr =
+ {
+ 1, 1, 1, 0
+ };
+ }
+};
+
+class BroadcastBinary1TestData : public BinaryTestData
+{
+public:
+ BroadcastBinary1TestData() : BinaryTestData()
+ {
+ m_InputShape0 = { 1, 1, 1, 4 };
+ m_InputShape1 = { 1, 1, 1, 1 };
+ m_OutputShape = m_InputShape0;
+
+ m_InputData0 =
+ {
+ true, false, false, true
+ };
+
+ m_InputData1 =
+ {
+ true
+ };
+
+ m_OutputAnd =
+ {
+ true, false, false, true
+ };
+
+ m_OutputOr =
+ {
+ true, true, true, true
+ };
+ }
+};
+
+class BroadcastBinary2TestData : public BinaryTestData
+{
+public:
+ BroadcastBinary2TestData() : BinaryTestData()
+ {
+ m_InputShape0 = { 1, 1, 1, 1 };
+ m_InputShape1 = { 1, 1, 1, 4 };
+ m_OutputShape = m_InputShape1;
+
+ m_InputData0 =
+ {
+ true
+ };
+
+ m_InputData1 =
+ {
+ true, false, false, true
+ };
+
+ m_OutputAnd =
+ {
+ true, false, false, true
+ };
+
+ m_OutputOr =
+ {
+ true, true, true, true
+ };
+ }
+};
+
+class BroadcastBinary3TestData : public BinaryTestData
+{
+public:
+ BroadcastBinary3TestData() : BinaryTestData()
+ {
+ m_InputShape0 = { 1, 1, 1, 4 };
+ m_InputShape1 = { 1, 1, 1, 1 };
+ m_OutputShape = m_InputShape0;
+
+ m_InputData0 =
+ {
+ true, false, false, true
+ };
+
+ m_InputData1 =
+ {
+ false
+ };
+
+ m_OutputAnd =
+ {
+ false, false, false, false
+ };
+
+ m_OutputOr =
+ {
+ true, false, false, true
+ };
+ }
+};
+
+static SimpleUnaryTestData s_SimpleUnaryTestData;
+static SimpleBinaryTestData s_SimpleBinaryTestData;
+
+static SimpleUnaryIntTestData s_SimpleUnaryIntTestData;
+static SimpleBinaryIntTestData s_SimpleBinaryIntTestData;
+
+static BroadcastBinary1TestData s_BroadcastBinary1TestData;
+static BroadcastBinary2TestData s_BroadcastBinary2TestData;
+static BroadcastBinary3TestData s_BroadcastBinary3TestData;
+
+
+} // anonymous namespace
+
+// Unary - Not
+LayerTestResult<uint8_t, 4> LogicalNotTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalUnaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::UnaryOperation::LogicalNot,
+ s_SimpleUnaryTestData.m_InputShape,
+ s_SimpleUnaryTestData.m_InputData,
+ s_SimpleUnaryTestData.m_OutputShape,
+ s_SimpleUnaryTestData.m_OutputNot,
+ tensorHandleFactory);
+}
+
+// Unary - Not with integers
+LayerTestResult<uint8_t, 4> LogicalNotIntTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalUnaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::UnaryOperation::LogicalNot,
+ s_SimpleUnaryIntTestData.m_InputShape,
+ s_SimpleUnaryIntTestData.m_InputData,
+ s_SimpleUnaryIntTestData.m_OutputShape,
+ s_SimpleUnaryIntTestData.m_OutputNot,
+ tensorHandleFactory);
+}
+
+// Binary - And
+LayerTestResult<uint8_t, 4> LogicalAndTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalBinaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::LogicalBinaryOperation::LogicalAnd,
+ s_SimpleBinaryTestData.m_InputShape0,
+ s_SimpleBinaryTestData.m_InputShape1,
+ s_SimpleBinaryTestData.m_InputData0,
+ s_SimpleBinaryTestData.m_InputData1,
+ s_SimpleBinaryTestData.m_OutputShape,
+ s_SimpleBinaryTestData.m_OutputAnd,
+ tensorHandleFactory);
+}
+
+// Binary - Or
+LayerTestResult<uint8_t, 4> LogicalOrTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalBinaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::LogicalBinaryOperation::LogicalOr,
+ s_SimpleBinaryTestData.m_InputShape0,
+ s_SimpleBinaryTestData.m_InputShape1,
+ s_SimpleBinaryTestData.m_InputData0,
+ s_SimpleBinaryTestData.m_InputData1,
+ s_SimpleBinaryTestData.m_OutputShape,
+ s_SimpleBinaryTestData.m_OutputOr,
+ tensorHandleFactory);
+}
+
+// Binary - And with integers
+LayerTestResult<uint8_t, 4> LogicalAndIntTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalBinaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::LogicalBinaryOperation::LogicalAnd,
+ s_SimpleBinaryIntTestData.m_InputShape0,
+ s_SimpleBinaryIntTestData.m_InputShape1,
+ s_SimpleBinaryIntTestData.m_InputData0,
+ s_SimpleBinaryIntTestData.m_InputData1,
+ s_SimpleBinaryIntTestData.m_OutputShape,
+ s_SimpleBinaryIntTestData.m_OutputAnd,
+ tensorHandleFactory);
+}
+
+// Binary - Or with integers
+LayerTestResult<uint8_t, 4> LogicalOrIntTest(armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalBinaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::LogicalBinaryOperation::LogicalOr,
+ s_SimpleBinaryIntTestData.m_InputShape0,
+ s_SimpleBinaryIntTestData.m_InputShape1,
+ s_SimpleBinaryIntTestData.m_InputData0,
+ s_SimpleBinaryIntTestData.m_InputData1,
+ s_SimpleBinaryIntTestData.m_OutputShape,
+ s_SimpleBinaryIntTestData.m_OutputOr,
+ tensorHandleFactory);
+}
+
+// Binary - And Broadcast
+LayerTestResult<uint8_t, 4> LogicalAndBroadcast1Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalBinaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::LogicalBinaryOperation::LogicalAnd,
+ s_BroadcastBinary1TestData.m_InputShape0,
+ s_BroadcastBinary1TestData.m_InputShape1,
+ s_BroadcastBinary1TestData.m_InputData0,
+ s_BroadcastBinary1TestData.m_InputData1,
+ s_BroadcastBinary1TestData.m_OutputShape,
+ s_BroadcastBinary1TestData.m_OutputAnd,
+ tensorHandleFactory);
+}
+
+// Binary - Or Broadcast
+LayerTestResult<uint8_t, 4> LogicalOrBroadcast1Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalBinaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::LogicalBinaryOperation::LogicalOr,
+ s_BroadcastBinary1TestData.m_InputShape0,
+ s_BroadcastBinary1TestData.m_InputShape1,
+ s_BroadcastBinary1TestData.m_InputData0,
+ s_BroadcastBinary1TestData.m_InputData1,
+ s_BroadcastBinary1TestData.m_OutputShape,
+ s_BroadcastBinary1TestData.m_OutputOr,
+ tensorHandleFactory);
+}
+
+// Binary - And Broadcast
+LayerTestResult<uint8_t, 4> LogicalAndBroadcast2Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalBinaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::LogicalBinaryOperation::LogicalAnd,
+ s_BroadcastBinary2TestData.m_InputShape0,
+ s_BroadcastBinary2TestData.m_InputShape1,
+ s_BroadcastBinary2TestData.m_InputData0,
+ s_BroadcastBinary2TestData.m_InputData1,
+ s_BroadcastBinary2TestData.m_OutputShape,
+ s_BroadcastBinary2TestData.m_OutputAnd,
+ tensorHandleFactory);
+}
+
+// Binary - Or Broadcast
+LayerTestResult<uint8_t, 4> LogicalOrBroadcast2Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalBinaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::LogicalBinaryOperation::LogicalOr,
+ s_BroadcastBinary2TestData.m_InputShape0,
+ s_BroadcastBinary2TestData.m_InputShape1,
+ s_BroadcastBinary2TestData.m_InputData0,
+ s_BroadcastBinary2TestData.m_InputData1,
+ s_BroadcastBinary2TestData.m_OutputShape,
+ s_BroadcastBinary2TestData.m_OutputOr,
+ tensorHandleFactory);
+}
+
+// Binary - And Broadcast
+LayerTestResult<uint8_t, 4> LogicalAndBroadcast3Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalBinaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::LogicalBinaryOperation::LogicalAnd,
+ s_BroadcastBinary3TestData.m_InputShape0,
+ s_BroadcastBinary3TestData.m_InputShape1,
+ s_BroadcastBinary3TestData.m_InputData0,
+ s_BroadcastBinary3TestData.m_InputData1,
+ s_BroadcastBinary3TestData.m_OutputShape,
+ s_BroadcastBinary3TestData.m_OutputAnd,
+ tensorHandleFactory);
+}
+
+// Binary - Or Broadcast
+LayerTestResult<uint8_t, 4> LogicalOrBroadcast3Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory)
+{
+ return LogicalBinaryTestHelper<4>(workloadFactory,
+ memoryManager,
+ armnn::LogicalBinaryOperation::LogicalOr,
+ s_BroadcastBinary3TestData.m_InputShape0,
+ s_BroadcastBinary3TestData.m_InputShape1,
+ s_BroadcastBinary3TestData.m_InputData0,
+ s_BroadcastBinary3TestData.m_InputData1,
+ s_BroadcastBinary3TestData.m_OutputShape,
+ s_BroadcastBinary3TestData.m_OutputOr,
+ tensorHandleFactory);
+} \ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.hpp
new file mode 100644
index 0000000000..1711d90d5a
--- /dev/null
+++ b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.hpp
@@ -0,0 +1,83 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+// Unary - Logical Not
+LayerTestResult<uint8_t, 4> LogicalNotTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Unary - Logical Not with integers
+LayerTestResult<uint8_t, 4> LogicalNotIntTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Binary - Logical And
+LayerTestResult<uint8_t, 4> LogicalAndTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Binary - Logical Or
+LayerTestResult<uint8_t, 4> LogicalOrTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Binary - Logical And with integers
+LayerTestResult<uint8_t, 4> LogicalAndIntTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Binary - Logical Or with integers
+LayerTestResult<uint8_t, 4> LogicalOrIntTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Binary - Logical And Broadcast
+LayerTestResult<uint8_t, 4> LogicalAndBroadcast1Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Binary - Logical Or Broadcast
+LayerTestResult<uint8_t, 4> LogicalOrBroadcast1Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Binary - Logical And Broadcast
+LayerTestResult<uint8_t, 4> LogicalAndBroadcast2Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Binary - Logical Or Broadcast
+LayerTestResult<uint8_t, 4> LogicalOrBroadcast2Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Binary - Logical And Broadcast
+LayerTestResult<uint8_t, 4> LogicalAndBroadcast3Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory);
+
+// Binary - Logical Or Broadcast
+LayerTestResult<uint8_t, 4> LogicalOrBroadcast3Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::ITensorHandleFactory& tensorHandleFactory); \ No newline at end of file