diff options
author | Nattapat Chaimanowong <nattapat.chaimanowong@arm.com> | 2019-03-28 09:56:53 +0000 |
---|---|---|
committer | Nattapat Chaimanowong <nattapat.chaimanowong@arm.com> | 2019-03-28 11:18:00 +0000 |
commit | e4294fd7363fd2f5a7be25d1722f20ed935d3445 (patch) | |
tree | b1ebf691cc66e447706081c956a50ce1b4c18e99 /src | |
parent | 61f54634701d070dbf48e082993fa58104d6f329 (diff) | |
download | armnn-e4294fd7363fd2f5a7be25d1722f20ed935d3445.tar.gz |
IVGCVSW-2874 Add DequantizeLayer and no-op factory method
*Add Dequantize layer to the frontend
*Add Serializer and Deserializer for Dequantize
Change-Id: Ide2647b9e0348d599deb97e61ca4bf66e2f17fc0
Signed-off-by: Nattapat Chaimanowong <nattapat.chaimanowong@arm.com>
Diffstat (limited to 'src')
23 files changed, 258 insertions, 2 deletions
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp index e4a6ac82d5..fe1542b162 100644 --- a/src/armnn/InternalTypes.cpp +++ b/src/armnn/InternalTypes.cpp @@ -24,6 +24,7 @@ char const* GetLayerTypeAsCString(LayerType type) case LayerType::Convolution2d: return "Convolution2d"; case LayerType::Debug: return "Debug"; case LayerType::DepthwiseConvolution2d: return "DepthwiseConvolution2d"; + case LayerType::Dequantize: return "Dequantize"; case LayerType::DetectionPostProcess: return "DetectionPostProcess"; case LayerType::Division: return "Division"; case LayerType::Equal: return "Equal"; diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp index d6d66031a7..1972e9c1b5 100644 --- a/src/armnn/InternalTypes.hpp +++ b/src/armnn/InternalTypes.hpp @@ -24,6 +24,7 @@ enum class LayerType Convolution2d, Debug, DepthwiseConvolution2d, + Dequantize, DetectionPostProcess, Division, Equal, diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp index 0e3d2522e4..030973306f 100644 --- a/src/armnn/LayerSupport.cpp +++ b/src/armnn/LayerSupport.cpp @@ -189,6 +189,15 @@ bool IsDepthwiseConvolutionSupported(const BackendId& backend, FORWARD_LAYER_SUPPORT_FUNC(backend, IsDepthwiseConvolutionSupported, input, output, descriptor, weights, biases); } +bool IsDequantizeSupported(const BackendId& backend, + const TensorInfo& input, + const TensorInfo& output, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(backend, IsDequantizeSupported, input, output); +} + bool IsDetectionPostProcessSupported(const BackendId& backend, const TensorInfo& input0, const TensorInfo& input1, diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index 9300a75a07..9d87aeeee3 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -16,6 +16,7 @@ #include "layers/Convolution2dLayer.hpp" #include "layers/DebugLayer.hpp" #include "layers/DepthwiseConvolution2dLayer.hpp" +#include "layers/DequantizeLayer.hpp" #include "layers/DetectionPostProcessLayer.hpp" #include "layers/DivisionLayer.hpp" #include "layers/EqualLayer.hpp" @@ -86,6 +87,7 @@ DECLARE_LAYER(ConvertFp32ToFp16) DECLARE_LAYER(Convolution2d) DECLARE_LAYER(Debug) DECLARE_LAYER(DepthwiseConvolution2d) +DECLARE_LAYER(Dequantize) DECLARE_LAYER(DetectionPostProcess) DECLARE_LAYER(Division) DECLARE_LAYER(Equal) diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index c5dfbd75ec..6dbd4611df 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -935,6 +935,11 @@ IConnectableLayer *Network::AddQuantizeLayer(const char *name) return m_Graph->AddLayer<QuantizeLayer>(name); } +IConnectableLayer* Network::AddDequantizeLayer(const char* name) +{ + return m_Graph->AddLayer<DequantizeLayer>(name); +} + IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor, const char* name) { diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index 5ed8cca2f2..782531acde 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -72,6 +72,8 @@ public: const ConstTensor& biases, const char* name = nullptr) override; + IConnectableLayer* AddDequantizeLayer(const char* name = nullptr) override; + IConnectableLayer* AddDetectionPostProcessLayer( const DetectionPostProcessDescriptor& descriptor, const ConstTensor& anchors, diff --git a/src/armnn/layers/DequantizeLayer.cpp b/src/armnn/layers/DequantizeLayer.cpp new file mode 100644 index 0000000000..4dd30de77b --- /dev/null +++ b/src/armnn/layers/DequantizeLayer.cpp @@ -0,0 +1,52 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#include "DequantizeLayer.hpp" + +#include "LayerCloneBase.hpp" + +#include <backendsCommon/WorkloadData.hpp> +#include <backendsCommon/WorkloadFactory.hpp> + +namespace armnn +{ + +DequantizeLayer::DequantizeLayer(const char* name) + : Layer(1, 1, LayerType::Dequantize, name) +{} + +std::unique_ptr<IWorkload> DequantizeLayer::CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const +{ + DequantizeQueueDescriptor descriptor; + + return factory.CreateDequantize(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +DequantizeLayer* DequantizeLayer::Clone(Graph& graph) const +{ + return CloneBase<DequantizeLayer>(graph, GetName()); +} + +void DequantizeLayer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(1, CHECK_LOCATION()); + + std::vector<TensorShape> inferredShapes = InferOutputShapes({ + GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() }); + + BOOST_ASSERT(inferredShapes.size() == 1); + + ConditionalThrowIfNotEqual<LayerValidationException>( + "DequantizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", + GetOutputSlot(0).GetTensorInfo().GetShape(), + inferredShapes[0]); +} + +void DequantizeLayer::Accept(ILayerVisitor& visitor) const +{ + visitor.VisitDequantizeLayer(this, GetName()); +} + +} // namespace armnn diff --git a/src/armnn/layers/DequantizeLayer.hpp b/src/armnn/layers/DequantizeLayer.hpp new file mode 100644 index 0000000000..1340f96a27 --- /dev/null +++ b/src/armnn/layers/DequantizeLayer.hpp @@ -0,0 +1,42 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "Layer.hpp" + +namespace armnn +{ + +/// This layer dequantizes the input tensor. +class DequantizeLayer : public Layer +{ +public: + /// Makes a workload for the Dequantize type. + /// @param [in] graph The graph where this layer can be found. + /// @param [in] factory The workload factory which will create the workload. + /// @return A pointer to the created workload, or nullptr if not created. + virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + /// Creates a dynamically-allocated copy of this layer. + /// @param [in] graph The graph into which this layer is being cloned. + DequantizeLayer* Clone(Graph& graph) const override; + + /// Check if the input tensor shape(s) + /// will lead to a valid configuration of @ref DequantizeLayer. + void ValidateTensorShapesFromInputs() override; + + void Accept(ILayerVisitor& visitor) const override; + +protected: + /// Constructor to create a DequantizeLayer. + /// @param [in] name Optional name for the layer. + DequantizeLayer(const char* name); + + /// Default destructor + ~DequantizeLayer() = default; +}; + +} // namespace armnn diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp index ff5bf8ba4e..943c6a7fed 100644 --- a/src/armnnDeserializer/Deserializer.cpp +++ b/src/armnnDeserializer/Deserializer.cpp @@ -193,6 +193,7 @@ m_ParserFunctions(Layer_MAX+1, &Deserializer::ParseUnsupportedLayer) m_ParserFunctions[Layer_ConstantLayer] = &Deserializer::ParseConstant; m_ParserFunctions[Layer_Convolution2dLayer] = &Deserializer::ParseConvolution2d; m_ParserFunctions[Layer_DepthwiseConvolution2dLayer] = &Deserializer::ParseDepthwiseConvolution2d; + m_ParserFunctions[Layer_DequantizeLayer] = &Deserializer::ParseDequantize; m_ParserFunctions[Layer_DetectionPostProcessLayer] = &Deserializer::ParseDetectionPostProcess; m_ParserFunctions[Layer_DivisionLayer] = &Deserializer::ParseDivision; m_ParserFunctions[Layer_EqualLayer] = &Deserializer::ParseEqual; @@ -242,6 +243,8 @@ Deserializer::LayerBaseRawPtr Deserializer::GetBaseLayer(const GraphPtr& graphPt return graphPtr->layers()->Get(layerIndex)->layer_as_Convolution2dLayer()->base(); case Layer::Layer_DepthwiseConvolution2dLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_DepthwiseConvolution2dLayer()->base(); + case Layer::Layer_DequantizeLayer: + return graphPtr->layers()->Get(layerIndex)->layer_as_DequantizeLayer()->base(); case Layer::Layer_DetectionPostProcessLayer: return graphPtr->layers()->Get(layerIndex)->layer_as_DetectionPostProcessLayer()->base(); case Layer::Layer_DivisionLayer: @@ -2062,4 +2065,24 @@ void Deserializer::ParseLstm(GraphPtr graph, unsigned int layerIndex) RegisterOutputSlots(graph, layerIndex, layer); } +void Deserializer::ParseDequantize(GraphPtr graph, unsigned int layerIndex) +{ + CHECK_LAYERS(graph, 0, layerIndex); + + Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex); + CHECK_VALID_SIZE(inputs.size(), 1); + + Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex); + CHECK_VALID_SIZE(outputs.size(), 1); + + const std::string layerName = GetLayerName(graph, layerIndex); + IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str()); + + armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]); + layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); + + RegisterInputSlots(graph, layerIndex, layer); + RegisterOutputSlots(graph, layerIndex, layer); +} + } // namespace armnnDeserializer diff --git a/src/armnnDeserializer/Deserializer.hpp b/src/armnnDeserializer/Deserializer.hpp index 5d57dfc02d..f18c163035 100644 --- a/src/armnnDeserializer/Deserializer.hpp +++ b/src/armnnDeserializer/Deserializer.hpp @@ -85,6 +85,7 @@ private: void ParseConstant(GraphPtr graph, unsigned int layerIndex); void ParseConvolution2d(GraphPtr graph, unsigned int layerIndex); void ParseDepthwiseConvolution2d(GraphPtr graph, unsigned int layerIndex); + void ParseDequantize(GraphPtr graph, unsigned int layerIndex); void ParseDetectionPostProcess(GraphPtr graph, unsigned int layerIndex); void ParseDivision(GraphPtr graph, unsigned int layerIndex); void ParseEqual(GraphPtr graph, unsigned int layerIndex); diff --git a/src/armnnDeserializer/DeserializerSupport.md b/src/armnnDeserializer/DeserializerSupport.md index d53252ec00..77856cf389 100644 --- a/src/armnnDeserializer/DeserializerSupport.md +++ b/src/armnnDeserializer/DeserializerSupport.md @@ -13,6 +13,7 @@ The Arm NN SDK Deserialize parser currently supports the following layers: * Constant * Convolution2d * DepthwiseConvolution2d +* Dequantize * DetectionPostProcess * Division * Equal diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs index 7ac83598e1..3aa644dbe5 100644 --- a/src/armnnSerializer/ArmnnSchema.fbs +++ b/src/armnnSerializer/ArmnnSchema.fbs @@ -117,7 +117,8 @@ enum LayerType : uint { Splitter = 32, DetectionPostProcess = 33, Lstm = 34, - Quantize = 35 + Quantize = 35, + Dequantize = 36 } // Base layer table to be used as part of other layers @@ -519,6 +520,10 @@ table LstmLayer { inputParams:LstmInputParams; } +table DequantizeLayer { + base:LayerBase; +} + union Layer { ActivationLayer, AdditionLayer, @@ -555,7 +560,8 @@ union Layer { SplitterLayer, DetectionPostProcessLayer, LstmLayer, - QuantizeLayer + QuantizeLayer, + DequantizeLayer } table AnyLayer { diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp index 83777c9849..7181f01e6b 100644 --- a/src/armnnSerializer/Serializer.cpp +++ b/src/armnnSerializer/Serializer.cpp @@ -289,6 +289,15 @@ void SerializerVisitor::VisitDepthwiseConvolution2dLayer(const armnn::IConnectab CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_DepthwiseConvolution2dLayer); } +void SerializerVisitor::VisitDequantizeLayer(const armnn::IConnectableLayer* layer, + const char* name) +{ + auto fbDequantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Dequantize); + auto fbDequantizeLayer = serializer::CreateDequantizeLayer(m_flatBufferBuilder, fbDequantizeBaseLayer); + + CreateAnyLayer(fbDequantizeLayer.o, serializer::Layer::Layer_DequantizeLayer); +} + void SerializerVisitor::VisitDetectionPostProcessLayer(const armnn::IConnectableLayer* layer, const armnn::DetectionPostProcessDescriptor& descriptor, const armnn::ConstTensor& anchors, diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp index 82e19316e9..5c3e48a695 100644 --- a/src/armnnSerializer/Serializer.hpp +++ b/src/armnnSerializer/Serializer.hpp @@ -77,6 +77,9 @@ public: const armnn::Optional<armnn::ConstTensor>& biases, const char* name = nullptr) override; + void VisitDequantizeLayer(const armnn::IConnectableLayer* layer, + const char* name = nullptr) override; + void VisitDetectionPostProcessLayer(const armnn::IConnectableLayer* layer, const armnn::DetectionPostProcessDescriptor& descriptor, const armnn::ConstTensor& anchors, diff --git a/src/armnnSerializer/SerializerSupport.md b/src/armnnSerializer/SerializerSupport.md index 7686d5c24c..a3c5852bd2 100644 --- a/src/armnnSerializer/SerializerSupport.md +++ b/src/armnnSerializer/SerializerSupport.md @@ -13,6 +13,7 @@ The Arm NN SDK Serializer currently supports the following layers: * Constant * Convolution2d * DepthwiseConvolution2d +* Dequantize * DetectionPostProcess * Division * Equal diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp index 0345e53bcb..0979076476 100644 --- a/src/armnnSerializer/test/SerializerTests.cpp +++ b/src/armnnSerializer/test/SerializerTests.cpp @@ -59,6 +59,9 @@ protected: BOOST_TEST(connectedInfo.GetShape() == m_InputTensorInfos[i].GetShape()); BOOST_TEST( GetDataTypeName(connectedInfo.GetDataType()) == GetDataTypeName(m_InputTensorInfos[i].GetDataType())); + + BOOST_TEST(connectedInfo.GetQuantizationScale() == m_InputTensorInfos[i].GetQuantizationScale()); + BOOST_TEST(connectedInfo.GetQuantizationOffset() == m_InputTensorInfos[i].GetQuantizationOffset()); } for (unsigned int i = 0; i < m_OutputTensorInfos.size(); i++) @@ -67,6 +70,9 @@ protected: BOOST_TEST(outputInfo.GetShape() == m_OutputTensorInfos[i].GetShape()); BOOST_TEST( GetDataTypeName(outputInfo.GetDataType()) == GetDataTypeName(m_OutputTensorInfos[i].GetDataType())); + + BOOST_TEST(outputInfo.GetQuantizationScale() == m_OutputTensorInfos[i].GetQuantizationScale()); + BOOST_TEST(outputInfo.GetQuantizationOffset() == m_OutputTensorInfos[i].GetQuantizationOffset()); } } @@ -590,6 +596,44 @@ BOOST_AUTO_TEST_CASE(SerializeDepthwiseConvolution2d) deserializedNetwork->Accept(verifier); } +BOOST_AUTO_TEST_CASE(SerializeDequantize) +{ + class DequantizeLayerVerifier : public LayerVerifierBase + { + public: + DequantizeLayerVerifier(const std::string& layerName, + const std::vector<armnn::TensorInfo>& inputInfos, + const std::vector<armnn::TensorInfo>& outputInfos) + : LayerVerifierBase(layerName, inputInfos, outputInfos) {} + + void VisitDequantizeLayer(const armnn::IConnectableLayer* layer, const char* name) override + { + VerifyNameAndConnections(layer, name); + } + }; + + const std::string layerName("dequantize"); + const armnn::TensorInfo inputInfo({ 1, 5, 2, 3 }, armnn::DataType::QuantisedAsymm8, 0.5f, 1); + const armnn::TensorInfo outputInfo({ 1, 5, 2, 3 }, armnn::DataType::Float32); + + armnn::INetworkPtr network = armnn::INetwork::Create(); + armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0); + armnn::IConnectableLayer* const dequantizeLayer = network->AddDequantizeLayer(layerName.c_str()); + armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0); + + inputLayer->GetOutputSlot(0).Connect(dequantizeLayer->GetInputSlot(0)); + dequantizeLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); + + inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo); + dequantizeLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); + + armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network)); + BOOST_CHECK(deserializedNetwork); + + DequantizeLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}); + deserializedNetwork->Accept(verifier); +} + BOOST_AUTO_TEST_CASE(SerializeDeserializeDetectionPostProcess) { class DetectionPostProcessLayerVerifier : public LayerVerifierBase diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp index 137e77eebe..04f822cea9 100644 --- a/src/backends/backendsCommon/LayerSupportBase.cpp +++ b/src/backends/backendsCommon/LayerSupportBase.cpp @@ -115,6 +115,13 @@ bool LayerSupportBase::IsDepthwiseConvolutionSupported(const TensorInfo& input, return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); } +bool LayerSupportBase::IsDequantizeSupported(const TensorInfo& input, + const TensorInfo& output, + Optional<std::string&> reasonIfUnsupported) const +{ + return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported); +} + bool LayerSupportBase::IsDetectionPostProcessSupported(const armnn::TensorInfo& input0, const armnn::TensorInfo& input1, const armnn::DetectionPostProcessDescriptor& descriptor, diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp index ceb3b2768e..7d64095667 100644 --- a/src/backends/backendsCommon/LayerSupportBase.hpp +++ b/src/backends/backendsCommon/LayerSupportBase.hpp @@ -67,6 +67,10 @@ public: const Optional<TensorInfo>& biases, Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + bool IsDequantizeSupported(const TensorInfo& input, + const TensorInfo& output, + Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override; + bool IsDetectionPostProcessSupported(const TensorInfo& input0, const TensorInfo& input1, const DetectionPostProcessDescriptor& descriptor, diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp index e30a3f36b7..91b1c5790b 100644 --- a/src/backends/backendsCommon/WorkloadData.cpp +++ b/src/backends/backendsCommon/WorkloadData.cpp @@ -1153,6 +1153,23 @@ void DetectionPostProcessQueueDescriptor::Validate(const WorkloadInfo& workloadI } } +void DequantizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const +{ + ValidateSingleInput(workloadInfo, "DequantizeQueueDescriptor"); + ValidateSingleOutput(workloadInfo, "DequantizeQueueDescriptor"); + + if (workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::QuantisedAsymm8 && + workloadInfo.m_InputTensorInfos[0].GetDataType() != DataType::QuantisedSymm16) + { + throw InvalidArgumentException("Input to dequantize layer must be quantized type."); + } + + if (workloadInfo.m_OutputTensorInfos[0].GetDataType() != DataType::Float32) + { + throw InvalidArgumentException("Output of dequantize layer must be Float32 type."); + } +} + void PreCompiledQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const { // This is internally generated so it should not need validation. diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp index 9250ceac43..5640701d82 100644 --- a/src/backends/backendsCommon/WorkloadData.hpp +++ b/src/backends/backendsCommon/WorkloadData.hpp @@ -416,4 +416,9 @@ struct PreCompiledQueueDescriptor : QueueDescriptorWithParameters<PreCompiledDes void Validate(const WorkloadInfo& workloadInfo) const; }; +struct DequantizeQueueDescriptor : QueueDescriptor +{ + void Validate(const WorkloadInfo& workloadInfo) const; +}; + } //namespace armnn diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp index 833f3b894e..6534a00343 100644 --- a/src/backends/backendsCommon/WorkloadFactory.cpp +++ b/src/backends/backendsCommon/WorkloadFactory.cpp @@ -229,6 +229,16 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId, reason); break; } + case LayerType::Dequantize: + { + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + + result = layerSupportObject->IsDequantizeSupported(OverrideDataType(input, dataType), + OverrideDataType(output, DataType::Float32), + reason); + break; + } case LayerType::DetectionPostProcess: { const TensorInfo& input0 = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); @@ -821,6 +831,12 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateDepthwiseConvolution2d( return std::unique_ptr<IWorkload>(); } +std::unique_ptr<IWorkload> IWorkloadFactory::CreateDequantize( + const DequantizeQueueDescriptor& descriptor, const WorkloadInfo& info) const +{ + return std::unique_ptr<IWorkload>(); +} + std::unique_ptr<IWorkload> IWorkloadFactory::CreateDetectionPostProcess( const DetectionPostProcessQueueDescriptor& descriptor, const WorkloadInfo& info) const { diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp index 2aa3854c4a..ed7303cf33 100644 --- a/src/backends/backendsCommon/WorkloadFactory.hpp +++ b/src/backends/backendsCommon/WorkloadFactory.hpp @@ -79,6 +79,9 @@ public: virtual std::unique_ptr<IWorkload> CreateDepthwiseConvolution2d( const DepthwiseConvolution2dQueueDescriptor& descriptor, const WorkloadInfo& info) const; + virtual std::unique_ptr<IWorkload> CreateDequantize(const DequantizeQueueDescriptor& descriptor, + const WorkloadInfo& info) const; + virtual std::unique_ptr<IWorkload> CreateDetectionPostProcess( const DetectionPostProcessQueueDescriptor& descriptor, const WorkloadInfo& info) const; diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp index 8f86132274..26fb03f55d 100644 --- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp +++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp @@ -336,6 +336,8 @@ DECLARE_LAYER_POLICY_1_PARAM(Debug) DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d) +DECLARE_LAYER_POLICY_1_PARAM(Dequantize) + DECLARE_LAYER_POLICY_2_PARAM(DetectionPostProcess) DECLARE_LAYER_POLICY_1_PARAM(Equal) |