aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDerek Lamberti <derek.lamberti@arm.com>2019-03-25 15:41:58 +0000
committerderek.lamberti <derek.lamberti@arm.com>2019-03-27 16:06:46 +0000
commita9cca6aa935b3e290181a05fdb2c5f5557a49c09 (patch)
tree60493eeada1357fa2334f7f952879561480e192f
parent045ea78ab73b1c88a38d3ecb501583a38161ca0e (diff)
downloadarmnn-a9cca6aa935b3e290181a05fdb2c5f5557a49c09.tar.gz
IVGCVSW-2870 Support QuantizeLayer on frontend
Change-Id: I2014a8d801f1f222d27a80dddf4f188ddcb3a5c9 Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
-rw-r--r--Android.mk1
-rw-r--r--CMakeLists.txt2
-rw-r--r--include/armnn/ILayerSupport.hpp4
-rw-r--r--include/armnn/ILayerVisitor.hpp6
-rw-r--r--include/armnn/INetwork.hpp5
-rw-r--r--include/armnn/LayerVisitorBase.hpp3
-rw-r--r--src/armnn/InternalTypes.hpp1
-rw-r--r--src/armnn/LayerSupport.cpp9
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp5
-rw-r--r--src/armnn/Network.hpp2
-rw-r--r--src/armnn/layers/QuantizeLayer.cpp48
-rw-r--r--src/armnn/layers/QuantizeLayer.hpp35
-rw-r--r--src/armnn/test/NetworkTests.cpp51
-rw-r--r--src/armnn/test/TestLayerVisitor.hpp145
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.cpp7
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.hpp4
-rw-r--r--src/backends/backendsCommon/WorkloadData.cpp18
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp5
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp256
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.hpp157
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp2
22 files changed, 548 insertions, 220 deletions
diff --git a/Android.mk b/Android.mk
index 1b0ffea024..c65fc5ef93 100644
--- a/Android.mk
+++ b/Android.mk
@@ -116,6 +116,7 @@ LOCAL_SRC_FILES := \
src/armnn/layers/PermuteLayer.cpp \
src/armnn/layers/Pooling2dLayer.cpp \
src/armnn/layers/PreCompiledLayer.cpp \
+ src/armnn/layers/QuantizeLayer.cpp \
src/armnn/layers/ReshapeLayer.cpp \
src/armnn/layers/ResizeBilinearLayer.cpp \
src/armnn/layers/RsqrtLayer.cpp \
diff --git a/CMakeLists.txt b/CMakeLists.txt
index fe1c1a71c9..dcdc4bd9de 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -253,6 +253,8 @@ list(APPEND armnn_sources
src/armnn/layers/PermuteLayer.cpp
src/armnn/layers/Pooling2dLayer.hpp
src/armnn/layers/Pooling2dLayer.cpp
+ src/armnn/layers/QuantizeLayer.cpp
+ src/armnn/layers/QuantizeLayer.hpp
src/armnn/layers/DivisionLayer.cpp
src/armnn/layers/DivisionLayer.hpp
src/armnn/layers/PreCompiledLayer.hpp
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index 7896a80a8f..76d9c10d7f 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -209,6 +209,10 @@ public:
const PreCompiledDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+ virtual bool IsQuantizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
virtual bool IsReshapeSupported(const TensorInfo& input,
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
diff --git a/include/armnn/ILayerVisitor.hpp b/include/armnn/ILayerVisitor.hpp
index ce09246d07..76027f6eed 100644
--- a/include/armnn/ILayerVisitor.hpp
+++ b/include/armnn/ILayerVisitor.hpp
@@ -281,6 +281,12 @@ public:
const PadDescriptor& padDescriptor,
const char* name = nullptr) = 0;
+ /// Function a quantize layer should call back to when its Accept(ILayerVisitor&) function is invoked.
+ /// @param layer - pointer to the layer which is calling back to this visit function.
+ /// @param name - Optional name for the layer.
+ virtual void VisitQuantizeLayer(const IConnectableLayer* layer,
+ const char* name = nullptr) = 0;
+
/// Function a strided slice layer should call back to when its Accept(ILayerVisitor&) function is invoked.
/// @param layer - pointer to the layer which is calling back to this visit function.
/// @param StridedSliceDescriptor - Parameters for the strided slice operation.
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index b61206fae1..7c1fbdbd49 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -353,6 +353,11 @@ public:
virtual IConnectableLayer* AddPadLayer(const PadDescriptor& padDescriptor,
const char* name = nullptr) = 0;
+ /// Add a quantize layer to the network
+ ///@param name - Optional name for the layer.
+ /// @return - Interface for configuring the layer.
+ virtual IConnectableLayer* AddQuantizeLayer(const char* name = nullptr) = 0;
+
/// Adds a strided slice layer to the network.
/// @param StridedSliceDescriptor - Parameters for the strided slice operation.
/// @param name - Optional name for the layer.
diff --git a/include/armnn/LayerVisitorBase.hpp b/include/armnn/LayerVisitorBase.hpp
index 2c37a21786..d30005f0ed 100644
--- a/include/armnn/LayerVisitorBase.hpp
+++ b/include/armnn/LayerVisitorBase.hpp
@@ -151,6 +151,9 @@ public:
const PadDescriptor&,
const char*) override { DefaultPolicy::Apply(); }
+ void VisitQuantizeLayer(const IConnectableLayer*,
+ const char*) override { DefaultPolicy::Apply(); }
+
void VisitStridedSliceLayer(const IConnectableLayer*,
const StridedSliceDescriptor&,
const char*) override { DefaultPolicy::Apply(); }
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index a61c7b8147..d6d66031a7 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -47,6 +47,7 @@ enum class LayerType
Permute,
Pooling2d,
PreCompiled,
+ Quantize,
Reshape,
ResizeBilinear,
Rsqrt,
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 5916488fb5..0e3d2522e4 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -406,6 +406,15 @@ bool IsPadSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsPadSupported, input, output, descriptor);
}
+bool IsQuantizeSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsQuantizeSupported, input, output);
+}
+
bool IsPermuteSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 497b517f28..9300a75a07 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -39,6 +39,7 @@
#include "layers/PermuteLayer.hpp"
#include "layers/Pooling2dLayer.hpp"
#include "layers/PreCompiledLayer.hpp"
+#include "layers/QuantizeLayer.hpp"
#include "layers/ReshapeLayer.hpp"
#include "layers/ResizeBilinearLayer.hpp"
#include "layers/RsqrtLayer.hpp"
@@ -108,6 +109,7 @@ DECLARE_LAYER(Pad)
DECLARE_LAYER(Permute)
DECLARE_LAYER(Pooling2d)
DECLARE_LAYER(PreCompiled)
+DECLARE_LAYER(Quantize)
DECLARE_LAYER(Reshape)
DECLARE_LAYER(ResizeBilinear)
DECLARE_LAYER(Rsqrt)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index bd5adccf86..c5dfbd75ec 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -930,6 +930,11 @@ IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, cons
return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
}
+IConnectableLayer *Network::AddQuantizeLayer(const char *name)
+{
+ return m_Graph->AddLayer<QuantizeLayer>(name);
+}
+
IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
const char* name)
{
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index e50ce79052..5ed8cca2f2 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -159,6 +159,8 @@ public:
IConnectableLayer* AddPadLayer(const PadDescriptor& padDescriptor, const char* name = nullptr) override;
+ IConnectableLayer* AddQuantizeLayer(const char* name = nullptr) override;
+
IConnectableLayer* AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
const char* name = nullptr) override;
diff --git a/src/armnn/layers/QuantizeLayer.cpp b/src/armnn/layers/QuantizeLayer.cpp
new file mode 100644
index 0000000000..fbf8b322ab
--- /dev/null
+++ b/src/armnn/layers/QuantizeLayer.cpp
@@ -0,0 +1,48 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "QuantizeLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/ILayerVisitor.hpp>
+
+namespace armnn
+{
+
+QuantizeLayer::QuantizeLayer(const char* name)
+: Layer(1, 1, LayerType::Quantize, name)
+{}
+
+std::unique_ptr<IWorkload> QuantizeLayer::CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const
+{
+ return nullptr;
+}
+
+Layer* QuantizeLayer::Clone(Graph& graph) const
+{
+ QuantizeLayer* clone = CloneBase<QuantizeLayer>(graph, GetName());
+ return clone;
+}
+
+void QuantizeLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(1, CHECK_LOCATION());
+
+ auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
+
+ ConditionalThrowIfNotEqual<LayerValidationException>(
+ "QuantizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+ GetOutputSlot(0).GetTensorInfo().GetShape(),
+ inferredShapes[0]);
+}
+
+void QuantizeLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitQuantizeLayer(this, GetName());
+}
+
+} //namespace armnn \ No newline at end of file
diff --git a/src/armnn/layers/QuantizeLayer.hpp b/src/armnn/layers/QuantizeLayer.hpp
new file mode 100644
index 0000000000..fabb4492c5
--- /dev/null
+++ b/src/armnn/layers/QuantizeLayer.hpp
@@ -0,0 +1,35 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <Layer.hpp>
+
+namespace armnn {
+
+//Forward
+class IWorkload;
+class IWorkloadFactory;
+class ILayerVisitor;
+
+class QuantizeLayer : public Layer
+{
+public:
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ Layer* Clone(Graph& graph) const override;
+
+ void ValidateTensorShapesFromInputs() override;
+
+ void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+ QuantizeLayer(const char* name);
+ ~QuantizeLayer() = default;
+
+};
+
+} //namespace armnn
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index 0963df6222..4de09a2804 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -6,6 +6,7 @@
#include "GraphUtils.hpp"
#include <armnn/ArmNN.hpp>
+#include <armnn/LayerVisitorBase.hpp>
#include <Network.hpp>
#include <boost/test/unit_test.hpp>
@@ -366,4 +367,54 @@ BOOST_AUTO_TEST_CASE(NetworkModification_SplitterMultiplication)
prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
}
+BOOST_AUTO_TEST_CASE(Network_AddQuantize)
+{
+ struct Test : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
+ {
+ void VisitQuantizeLayer(const armnn::IConnectableLayer* layer, const char* name) override
+ {
+ m_Visited = true;
+
+ BOOST_TEST(layer);
+
+ std::string expectedName = std::string("quantize");
+ BOOST_TEST(std::string(layer->GetName()) == expectedName);
+ BOOST_TEST(std::string(name) == expectedName);
+
+ BOOST_TEST(layer->GetNumInputSlots() == 1);
+ BOOST_TEST(layer->GetNumOutputSlots() == 1);
+
+ const armnn::TensorInfo& infoIn = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
+ BOOST_TEST((infoIn.GetDataType() == armnn::DataType::Float32));
+
+ const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
+ BOOST_TEST((infoOut.GetDataType() == armnn::DataType::QuantisedAsymm8));
+ }
+
+ bool m_Visited = false;
+ };
+
+
+ auto graph = armnn::INetwork::Create();
+
+ auto input = graph->AddInputLayer(0, "input");
+ auto quantize = graph->AddQuantizeLayer("quantize");
+ auto output = graph->AddOutputLayer(1, "output");
+
+ input->GetOutputSlot(0).Connect(quantize->GetInputSlot(0));
+ quantize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ armnn::TensorInfo infoIn({3,1}, armnn::DataType::Float32);
+ input->GetOutputSlot(0).SetTensorInfo(infoIn);
+
+ armnn::TensorInfo infoOut({3,1}, armnn::DataType::QuantisedAsymm8);
+ quantize->GetOutputSlot(0).SetTensorInfo(infoOut);
+
+ Test testQuantize;
+ graph->Accept(testQuantize);
+
+ BOOST_TEST(testQuantize.m_Visited == true);
+
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnn/test/TestLayerVisitor.hpp b/src/armnn/test/TestLayerVisitor.hpp
index 8c0da50be3..e43227f520 100644
--- a/src/armnn/test/TestLayerVisitor.hpp
+++ b/src/armnn/test/TestLayerVisitor.hpp
@@ -4,13 +4,13 @@
//
#pragma once
-#include <armnn/ILayerVisitor.hpp>
+#include <armnn/LayerVisitorBase.hpp>
#include <armnn/Descriptors.hpp>
namespace armnn
{
// Abstract base class with do nothing implementations for all layer visit methods
-class TestLayerVisitor : public ILayerVisitor
+class TestLayerVisitor : public LayerVisitorBase<VisitorNoThrowPolicy>
{
protected:
virtual ~TestLayerVisitor() {}
@@ -34,147 +34,6 @@ public:
m_LayerName = "";
}
}
-
- void VisitInputLayer(const IConnectableLayer* layer,
- LayerBindingId id,
- const char* name = nullptr) override {}
-
- void VisitConvolution2dLayer(const IConnectableLayer* layer,
- const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr) override {}
-
- void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr) override {}
-
- void VisitDetectionPostProcessLayer(const IConnectableLayer* layer,
- const DetectionPostProcessDescriptor& descriptor,
- const ConstTensor& anchors,
- const char* name = nullptr) override {}
-
- void VisitFullyConnectedLayer(const IConnectableLayer* layer,
- const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr) override {}
-
- void VisitPermuteLayer(const IConnectableLayer* layer,
- const PermuteDescriptor& permuteDescriptor,
- const char* name = nullptr) override {}
-
- void VisitBatchToSpaceNdLayer(const IConnectableLayer* layer,
- const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
- const char* name = nullptr) override {}
-
- void VisitPooling2dLayer(const IConnectableLayer* layer,
- const Pooling2dDescriptor& pooling2dDescriptor,
- const char* name = nullptr) override {}
-
- void VisitActivationLayer(const IConnectableLayer* layer,
- const ActivationDescriptor& activationDescriptor,
- const char* name = nullptr) override {}
-
- void VisitNormalizationLayer(const IConnectableLayer* layer,
- const NormalizationDescriptor& normalizationDescriptor,
- const char* name = nullptr) override {}
-
- void VisitSoftmaxLayer(const IConnectableLayer* layer,
- const SoftmaxDescriptor& softmaxDescriptor,
- const char* name = nullptr) override {}
-
- void VisitSplitterLayer(const IConnectableLayer* layer,
- const ViewsDescriptor& splitterDescriptor,
- const char* name = nullptr) override {}
-
- void VisitMergerLayer(const IConnectableLayer* layer,
- const OriginsDescriptor& mergerDescriptor,
- const char* name = nullptr) override {}
-
- void VisitAdditionLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
-
- void VisitMultiplicationLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
-
- void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
- const BatchNormalizationDescriptor& desc,
- const ConstTensor& mean,
- const ConstTensor& variance,
- const ConstTensor& beta,
- const ConstTensor& gamma,
- const char* name = nullptr) override {}
-
- void VisitResizeBilinearLayer(const IConnectableLayer* layer,
- const ResizeBilinearDescriptor& resizeDesc,
- const char* name = nullptr) override {}
-
- void VisitL2NormalizationLayer(const IConnectableLayer* layer,
- const L2NormalizationDescriptor& desc,
- const char* name = nullptr) override {}
-
- void VisitConstantLayer(const IConnectableLayer* layer,
- const ConstTensor& input,
- const char* name = nullptr) override {}
-
- void VisitReshapeLayer(const IConnectableLayer* layer,
- const ReshapeDescriptor& reshapeDescriptor,
- const char* name = nullptr) override {}
-
- void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer,
- const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
- const char* name = nullptr) override {}
-
- void VisitFloorLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
-
- void VisitOutputLayer(const IConnectableLayer* layer,
- LayerBindingId id,
- const char* name = nullptr) override {}
-
- void VisitLstmLayer(const IConnectableLayer* layer,
- const LstmDescriptor& descriptor,
- const LstmInputParams& params,
- const char* name = nullptr) override {}
-
- void VisitDivisionLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
-
- void VisitSubtractionLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
-
- void VisitMaximumLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
-
- void VisitMeanLayer(const IConnectableLayer* layer,
- const MeanDescriptor& meanDescriptor,
- const char* name = nullptr) override {}
-
- void VisitPadLayer(const IConnectableLayer* layer,
- const PadDescriptor& padDescriptor,
- const char* name = nullptr) override {}
-
- void VisitStridedSliceLayer(const IConnectableLayer* layer,
- const StridedSliceDescriptor& stridedSliceDescriptor,
- const char* name = nullptr) override {}
-
- void VisitMinimumLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
-
- void VisitGreaterLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
-
- void VisitEqualLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
-
- void VisitRsqrtLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
-
- void VisitGatherLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
};
} //namespace armnn
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index c170ac05e2..137e77eebe 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -315,6 +315,13 @@ bool LayerSupportBase::IsPreCompiledSupported(const TensorInfo& input,
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}
+bool LayerSupportBase::IsQuantizeSupported(const armnn::TensorInfo& input,
+ const armnn::TensorInfo& output,
+ armnn::Optional<std::string&> reasonIfUnsupported) const
+{
+ return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
bool LayerSupportBase::IsReshapeSupported(const TensorInfo& input,
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index 75c366cf3b..ceb3b2768e 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -198,6 +198,10 @@ public:
const PreCompiledDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+ bool IsQuantizeSupported(const TensorInfo& input,
+ const TensorInfo& output,
+ Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
bool IsReshapeSupported(const TensorInfo& input,
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 7474b9bc9a..e30a3f36b7 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -946,6 +946,24 @@ void PadQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
}
}
+void QuantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+ ValidateSingleInput(workloadInfo, "QuantizeQueueDescriptor");
+ ValidateSingleOutput(workloadInfo, "QuantizeQueueDescriptor");
+
+
+ if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::Float32)
+ {
+ throw InvalidArgumentException("Quantize only accepts Float32 inputs.");
+ }
+
+ if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::QuantisedAsymm8 &&
+ workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::QuantisedSymm16)
+ {
+ throw InvalidArgumentException("Output of quantized layer must be quantized type.");
+ }
+}
+
void BatchToSpaceNdQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
{
ValidateSingleInput(workloadInfo, "BatchToSpaceNdQueueDescriptor");
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 18bd921d5e..9250ceac43 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -229,6 +229,11 @@ struct PadQueueDescriptor : QueueDescriptorWithParameters<PadDescriptor>
void Validate(const WorkloadInfo& workloadInfo) const;
};
+struct QuantizeQueueDescriptor : QueueDescriptor
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
// Equal layer workload data
struct EqualQueueDescriptor : QueueDescriptor
{
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 0996a8aaee..833f3b894e 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -4,6 +4,8 @@
//
#include "CpuTensorHandle.hpp"
+#include "WorkloadFactory.hpp"
+
#include <Layer.hpp>
#include <LayersFwd.hpp>
@@ -605,6 +607,13 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
reason);
break;
}
+ case LayerType::Quantize:
+ {
+ const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+ const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+ result = layerSupportObject->IsQuantizeSupported(input, output, reason);
+ break;
+ }
case LayerType::Division:
{
const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
@@ -751,4 +760,251 @@ bool IWorkloadFactory::IsLayerSupported(const IConnectableLayer& connectableLaye
return IsLayerSupported(layer->GetBackendId(), connectableLayer, dataType, outReasonIfUnsupported);
}
+// Default Implementations
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateActivation(const ActivationQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchNormalization(
+ const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
+ const WorkloadInfo& Info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateConstant(const ConstantQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthwiseConvolution2d(
+ const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateDetectionPostProcess(
+ const DetectionPostProcessQueueDescriptor& descriptor, const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateDivision(const DivisionQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
+ const WorkloadInfo& Info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateGather(const GatherQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateLstm(const LstmQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMaximum(const MaximumQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMean(const MeanQueueDescriptor& descriptor,
+ const WorkloadInfo& Info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMerger(const MergerQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMinimum(const MinimumQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateNormalization(const NormalizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateOutput(const OutputQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
+ const WorkloadInfo& Info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantize(const QuantizeQueueDescriptor& descriptor,
+ const WorkloadInfo& Info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSplitter(const SplitterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
+ const WorkloadInfo& Info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const
+{
+ return std::unique_ptr<IWorkload>();
+}
+
}
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index 88833491b2..2aa3854c4a 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -49,125 +49,128 @@ public:
virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
DataLayout dataLayout) const = 0;
- virtual std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
-
virtual std::unique_ptr<IWorkload> CreateActivation(const ActivationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
+ const WorkloadInfo& Info) const;
- virtual std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
virtual std::unique_ptr<IWorkload> CreateConvolution2d(const Convolution2dQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ const WorkloadInfo& info) const;
+
+ virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
virtual std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d(
- const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const = 0;
+ const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const;
virtual std::unique_ptr<IWorkload> CreateDetectionPostProcess(
- const DetectionPostProcessQueueDescriptor& descriptor, const WorkloadInfo& info) const = 0;
+ const DetectionPostProcessQueueDescriptor& descriptor, const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
-
- virtual std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
+ const WorkloadInfo& Info) const;
- virtual std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateFullyConnected(const FullyConnectedQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
virtual std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
-
- virtual std::unique_ptr<IWorkload> CreateConstant(const ConstantQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateLstm(const LstmQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& descriptor,
+ const WorkloadInfo& Info) const;
- virtual std::unique_ptr<IWorkload> CreateLstm(const LstmQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateConvertFp16ToFp32(const ConvertFp16ToFp32QueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateMerger(const MergerQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateConvertFp32ToFp16(const ConvertFp32ToFp16QueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateDivision(const DivisionQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateMultiplication(const MultiplicationQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateNormalization(const NormalizationQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateMaximum(const MaximumQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateOutput(const OutputQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateMean(const MeanQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& descriptor,
+ const WorkloadInfo& Info) const;
- virtual std::unique_ptr<IWorkload> CreateMinimum(const MinimumQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreatePermute(const PermuteQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreatePad(const PadQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
- const WorkloadInfo& Info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& descriptor,
+ const WorkloadInfo& Info) const;
- virtual std::unique_ptr<IWorkload> CreateGreater(const GreaterQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateDebug(const DebugQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
virtual std::unique_ptr<IWorkload> CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreatePreCompiled(const PreCompiledQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
- virtual std::unique_ptr<IWorkload> CreateGather(const GatherQueueDescriptor& descriptor,
- const WorkloadInfo& info) const = 0;
+ virtual std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
+
+ virtual std::unique_ptr<IWorkload> CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
+
+ virtual std::unique_ptr<IWorkload> CreateSplitter(const SplitterQueueDescriptor& descriptor,
+ const WorkloadInfo& info) const;
+
+ virtual std::unique_ptr<IWorkload> CreateStridedSlice(const StridedSliceQueueDescriptor& descriptor,
+ const WorkloadInfo& Info) const;
};
} //namespace armnn
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 79213c18e0..8f86132274 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -372,6 +372,8 @@ DECLARE_LAYER_POLICY_CUSTOM_PARAM(Output, armnn::LayerBindingId)
DECLARE_LAYER_POLICY_2_PARAM(Pad)
+DECLARE_LAYER_POLICY_1_PARAM(Quantize)
+
DECLARE_LAYER_POLICY_2_PARAM(Permute)
DECLARE_LAYER_POLICY_2_PARAM(Pooling2d)