aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/backendsCommon')
-rw-r--r--src/backends/backendsCommon/ILayerSupport.cpp7
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp11
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp5
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp9
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.hpp3
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp2
-rwxr-xr-xsrc/backends/backendsCommon/test/LayerTests.cpp164
-rw-r--r--src/backends/backendsCommon/test/LayerTests.hpp24
8 files changed, 225 insertions, 0 deletions
diff --git a/src/backends/backendsCommon/ILayerSupport.cpp b/src/backends/backendsCommon/ILayerSupport.cpp
index 0b78913b99..aa1bb5042d 100644
--- a/src/backends/backendsCommon/ILayerSupport.cpp
+++ b/src/backends/backendsCommon/ILayerSupport.cpp
@@ -336,4 +336,11 @@ bool ILayerSupport::IsGreaterSupported(const TensorInfo& input0,
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool ILayerSupport::IsRsqrtSupported(const TensorInfo &input,
+ const TensorInfo &output,
+ Optional<std::string &> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
} // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 1dac498c11..a5db088be7 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1040,4 +1040,15 @@ void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
"second input");
}
+void RsqrtQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+ ValidateSingleInput(workloadInfo, "RsqrtQueueDescriptor");
+ ValidateSingleOutput(workloadInfo, "RsqrtQueueDescriptor");
+ ValidateTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
+ workloadInfo.m_OutputTensorInfos[0],
+ "RsqrtQueueDescriptor",
+ "input",
+ "output");
+}
+
} //namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 9142d87bfd..59e3dfbf5c 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -373,4 +373,9 @@ struct DebugQueueDescriptor : QueueDescriptorWithParameters<DebugDescriptor>
void Validate(const WorkloadInfo& workloadInfo) const;
};
+struct RsqrtQueueDescriptor : QueueDescriptor
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
} //namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 3b8a7d8f7f..1dc96a5ec3 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -600,6 +600,15 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
result = layerSupportObject->IsResizeBilinearSupported(OverrideDataType(input, dataType), reason);
break;
}
+ case LayerType::Rsqrt:
+ {
+ const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+ result = layerSupportObject->IsRsqrtSupported(OverrideDataType(input, dataType),
+ OverrideDataType(output, dataType),
+ reason);
+ break;
+ }
case LayerType::Softmax:
{
auto cLayer = boost::polymorphic_downcast<const SoftmaxLayer*>(&layer);
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index e72987fdcb..aee9f91b56 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -156,6 +156,9 @@ public:
virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const = 0;
+
+ virtual std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const = 0;
};
} //namespace armnn
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index d4c5fe43c6..d6528bb0ae 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -378,6 +378,8 @@ DECLARE_LAYER_POLICY_2_PARAM(ResizeBilinear)
DECLARE_LAYER_POLICY_2_PARAM(Reshape)
+DECLARE_LAYER_POLICY_1_PARAM(Rsqrt)
+
DECLARE_LAYER_POLICY_2_PARAM(Softmax)
DECLARE_LAYER_POLICY_2_PARAM(SpaceToBatchNd)
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index 52150075ce..8e4596b703 100755
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -6734,6 +6734,170 @@ LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(
return result;
}
+LayerTestResult<float, 2> Rsqrt2dTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::TensorInfo inputTensorInfo,
+ const armnn::TensorInfo outputTensorInfo,
+ std::vector<float> inputValues,
+ std::vector<float> expectedOutputValues)
+{
+ auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, std::vector<float>(inputValues));
+
+ LayerTestResult<float, 2> result(outputTensorInfo);
+ result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(expectedOutputValues));
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::RsqrtQueueDescriptor descriptor;
+
+ armnn::WorkloadInfo info;
+
+ AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
+
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
+
+ return result;
+}
+LayerTestResult<float, 2> Rsqrt2dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const armnn::TensorShape inputShape{ 2, 2 };
+ const armnn::TensorShape outputShape{ 2, 2 };
+
+ const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
+ const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
+
+ std::vector<float> inputValues
+ {
+ 1.f, 4.f,
+ 16.f, 25.f
+ };
+
+ std::vector<float> expectedOutputValues
+ {
+ 1.f, 0.5f,
+ 0.25f, 0.2f
+ };
+
+ return Rsqrt2dTestCommon(workloadFactory, memoryManager,
+ inputTensorInfo, outputTensorInfo,
+ inputValues, expectedOutputValues);
+}
+
+LayerTestResult<float, 3> Rsqrt3dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const armnn::TensorShape inputShape{ 3, 1, 2 };
+ const armnn::TensorShape outputShape{ 3, 1, 2 };
+
+ const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
+ const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
+
+ std::vector<float> inputValues
+ {
+ 1.f, 4.f, 16.f,
+ 25.f, 64.f, 100.f
+ };
+
+ std::vector<float> expectedOutputValues
+ {
+ 1.f, 0.5f, 0.25f,
+ 0.2f, 0.125f, 0.1f
+ };
+
+ auto inputTensor = MakeTensor<float, 3>(inputTensorInfo, std::vector<float>(inputValues));
+
+ LayerTestResult<float, 3> result(outputTensorInfo);
+ result.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float >(expectedOutputValues));
+
+ std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+ std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+ armnn::RsqrtQueueDescriptor descriptor;
+
+ armnn::WorkloadInfo info;
+
+ AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+ AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+ std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRsqrt(descriptor, info);
+
+ inputHandle->Allocate();
+ outputHandle->Allocate();
+
+ CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
+
+ workload->Execute();
+
+ CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
+
+ return result;
+}
+
+LayerTestResult<float, 2> RsqrtZeroTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const armnn::TensorShape inputShape{ 1, 2 };
+ const armnn::TensorShape outputShape{ 1, 2 };
+
+ const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
+ const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
+
+ std::vector<float> inputValues
+ {
+ 0.f, -0.f
+ };
+
+ std::vector<float> expectedOutputValues
+ {
+ INFINITY, -INFINITY
+ };
+
+ return Rsqrt2dTestCommon(workloadFactory, memoryManager,
+ inputTensorInfo, outputTensorInfo,
+ inputValues, expectedOutputValues);
+}
+
+LayerTestResult<float, 2> RsqrtNegativeTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+ const armnn::TensorShape inputShape{ 1, 2 };
+ const armnn::TensorShape outputShape{ 1, 2 };
+
+ const armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
+ const armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
+
+ std::vector<float> inputValues
+ {
+ -25.f, -16.f
+ };
+
+ std::vector<float> expectedOutputValues
+ {
+ -NAN, -NAN
+ };
+
+ return Rsqrt2dTestCommon(workloadFactory, memoryManager,
+ inputTensorInfo, outputTensorInfo,
+ inputValues, expectedOutputValues);
+}
+
LayerTestResult<float, 4> BatchNormTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index a871594900..98c0806ddf 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -639,6 +639,30 @@ LayerTestResult<float, 4> ResizeBilinearMagTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout);
+LayerTestResult<float, 2> Rsqrt2dTestCommon(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ const armnn::TensorInfo inputTensorInfo,
+ const armnn::TensorInfo outputTensorInfo,
+ std::vector<float> inputValues,
+ std::vector<float> expectedOutputValues);
+
+LayerTestResult<float, 2> Rsqrt2dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 3> Rsqrt3dTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 2> RsqrtZeroTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 2> RsqrtNegativeTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
LayerTestResult<float, 4> BatchNormTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);