aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/InternalTypes.hpp1
-rw-r--r--src/armnn/LayerSupport.cpp9
-rw-r--r--src/armnn/LayersFwd.hpp2
-rw-r--r--src/armnn/Network.cpp5
-rw-r--r--src/armnn/Network.hpp2
-rw-r--r--src/armnn/layers/QuantizeLayer.cpp48
-rw-r--r--src/armnn/layers/QuantizeLayer.hpp35
-rw-r--r--src/armnn/test/NetworkTests.cpp51
-rw-r--r--src/armnn/test/TestLayerVisitor.hpp145
9 files changed, 155 insertions, 143 deletions
diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp
index a61c7b8147..d6d66031a7 100644
--- a/src/armnn/InternalTypes.hpp
+++ b/src/armnn/InternalTypes.hpp
@@ -47,6 +47,7 @@ enum class LayerType
Permute,
Pooling2d,
PreCompiled,
+ Quantize,
Reshape,
ResizeBilinear,
Rsqrt,
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 5916488fb5..0e3d2522e4 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -406,6 +406,15 @@ bool IsPadSupported(const BackendId& backend,
FORWARD_LAYER_SUPPORT_FUNC(backend, IsPadSupported, input, output, descriptor);
}
+bool IsQuantizeSupported(const BackendId& backend,
+ const TensorInfo& input,
+ const TensorInfo& output,
+ char* reasonIfUnsupported,
+ size_t reasonIfUnsupportedMaxLength)
+{
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsQuantizeSupported, input, output);
+}
+
bool IsPermuteSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index 497b517f28..9300a75a07 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -39,6 +39,7 @@
#include "layers/PermuteLayer.hpp"
#include "layers/Pooling2dLayer.hpp"
#include "layers/PreCompiledLayer.hpp"
+#include "layers/QuantizeLayer.hpp"
#include "layers/ReshapeLayer.hpp"
#include "layers/ResizeBilinearLayer.hpp"
#include "layers/RsqrtLayer.hpp"
@@ -108,6 +109,7 @@ DECLARE_LAYER(Pad)
DECLARE_LAYER(Permute)
DECLARE_LAYER(Pooling2d)
DECLARE_LAYER(PreCompiled)
+DECLARE_LAYER(Quantize)
DECLARE_LAYER(Reshape)
DECLARE_LAYER(ResizeBilinear)
DECLARE_LAYER(Rsqrt)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index bd5adccf86..c5dfbd75ec 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -930,6 +930,11 @@ IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, cons
return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
}
+IConnectableLayer *Network::AddQuantizeLayer(const char *name)
+{
+ return m_Graph->AddLayer<QuantizeLayer>(name);
+}
+
IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
const char* name)
{
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index e50ce79052..5ed8cca2f2 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -159,6 +159,8 @@ public:
IConnectableLayer* AddPadLayer(const PadDescriptor& padDescriptor, const char* name = nullptr) override;
+ IConnectableLayer* AddQuantizeLayer(const char* name = nullptr) override;
+
IConnectableLayer* AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
const char* name = nullptr) override;
diff --git a/src/armnn/layers/QuantizeLayer.cpp b/src/armnn/layers/QuantizeLayer.cpp
new file mode 100644
index 0000000000..fbf8b322ab
--- /dev/null
+++ b/src/armnn/layers/QuantizeLayer.cpp
@@ -0,0 +1,48 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "QuantizeLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/ILayerVisitor.hpp>
+
+namespace armnn
+{
+
+QuantizeLayer::QuantizeLayer(const char* name)
+: Layer(1, 1, LayerType::Quantize, name)
+{}
+
+std::unique_ptr<IWorkload> QuantizeLayer::CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const
+{
+ return nullptr;
+}
+
+Layer* QuantizeLayer::Clone(Graph& graph) const
+{
+ QuantizeLayer* clone = CloneBase<QuantizeLayer>(graph, GetName());
+ return clone;
+}
+
+void QuantizeLayer::ValidateTensorShapesFromInputs()
+{
+ VerifyLayerConnections(1, CHECK_LOCATION());
+
+ auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
+
+ ConditionalThrowIfNotEqual<LayerValidationException>(
+ "QuantizeLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+ GetOutputSlot(0).GetTensorInfo().GetShape(),
+ inferredShapes[0]);
+}
+
+void QuantizeLayer::Accept(ILayerVisitor& visitor) const
+{
+ visitor.VisitQuantizeLayer(this, GetName());
+}
+
+} //namespace armnn \ No newline at end of file
diff --git a/src/armnn/layers/QuantizeLayer.hpp b/src/armnn/layers/QuantizeLayer.hpp
new file mode 100644
index 0000000000..fabb4492c5
--- /dev/null
+++ b/src/armnn/layers/QuantizeLayer.hpp
@@ -0,0 +1,35 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <Layer.hpp>
+
+namespace armnn {
+
+//Forward
+class IWorkload;
+class IWorkloadFactory;
+class ILayerVisitor;
+
+class QuantizeLayer : public Layer
+{
+public:
+ virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const override;
+
+ Layer* Clone(Graph& graph) const override;
+
+ void ValidateTensorShapesFromInputs() override;
+
+ void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+ QuantizeLayer(const char* name);
+ ~QuantizeLayer() = default;
+
+};
+
+} //namespace armnn
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index 0963df6222..4de09a2804 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -6,6 +6,7 @@
#include "GraphUtils.hpp"
#include <armnn/ArmNN.hpp>
+#include <armnn/LayerVisitorBase.hpp>
#include <Network.hpp>
#include <boost/test/unit_test.hpp>
@@ -366,4 +367,54 @@ BOOST_AUTO_TEST_CASE(NetworkModification_SplitterMultiplication)
prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
}
+BOOST_AUTO_TEST_CASE(Network_AddQuantize)
+{
+ struct Test : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
+ {
+ void VisitQuantizeLayer(const armnn::IConnectableLayer* layer, const char* name) override
+ {
+ m_Visited = true;
+
+ BOOST_TEST(layer);
+
+ std::string expectedName = std::string("quantize");
+ BOOST_TEST(std::string(layer->GetName()) == expectedName);
+ BOOST_TEST(std::string(name) == expectedName);
+
+ BOOST_TEST(layer->GetNumInputSlots() == 1);
+ BOOST_TEST(layer->GetNumOutputSlots() == 1);
+
+ const armnn::TensorInfo& infoIn = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
+ BOOST_TEST((infoIn.GetDataType() == armnn::DataType::Float32));
+
+ const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
+ BOOST_TEST((infoOut.GetDataType() == armnn::DataType::QuantisedAsymm8));
+ }
+
+ bool m_Visited = false;
+ };
+
+
+ auto graph = armnn::INetwork::Create();
+
+ auto input = graph->AddInputLayer(0, "input");
+ auto quantize = graph->AddQuantizeLayer("quantize");
+ auto output = graph->AddOutputLayer(1, "output");
+
+ input->GetOutputSlot(0).Connect(quantize->GetInputSlot(0));
+ quantize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ armnn::TensorInfo infoIn({3,1}, armnn::DataType::Float32);
+ input->GetOutputSlot(0).SetTensorInfo(infoIn);
+
+ armnn::TensorInfo infoOut({3,1}, armnn::DataType::QuantisedAsymm8);
+ quantize->GetOutputSlot(0).SetTensorInfo(infoOut);
+
+ Test testQuantize;
+ graph->Accept(testQuantize);
+
+ BOOST_TEST(testQuantize.m_Visited == true);
+
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnn/test/TestLayerVisitor.hpp b/src/armnn/test/TestLayerVisitor.hpp
index 8c0da50be3..e43227f520 100644
--- a/src/armnn/test/TestLayerVisitor.hpp
+++ b/src/armnn/test/TestLayerVisitor.hpp
@@ -4,13 +4,13 @@
//
#pragma once
-#include <armnn/ILayerVisitor.hpp>
+#include <armnn/LayerVisitorBase.hpp>
#include <armnn/Descriptors.hpp>
namespace armnn
{
// Abstract base class with do nothing implementations for all layer visit methods
-class TestLayerVisitor : public ILayerVisitor
+class TestLayerVisitor : public LayerVisitorBase<VisitorNoThrowPolicy>
{
protected:
virtual ~TestLayerVisitor() {}
@@ -34,147 +34,6 @@ public:
m_LayerName = "";
}
}
-
- void VisitInputLayer(const IConnectableLayer* layer,
- LayerBindingId id,
- const char* name = nullptr) override {}
-
- void VisitConvolution2dLayer(const IConnectableLayer* layer,
- const Convolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr) override {}
-
- void VisitDepthwiseConvolution2dLayer(const IConnectableLayer* layer,
- const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr) override {}
-
- void VisitDetectionPostProcessLayer(const IConnectableLayer* layer,
- const DetectionPostProcessDescriptor& descriptor,
- const ConstTensor& anchors,
- const char* name = nullptr) override {}
-
- void VisitFullyConnectedLayer(const IConnectableLayer* layer,
- const FullyConnectedDescriptor& fullyConnectedDescriptor,
- const ConstTensor& weights,
- const Optional<ConstTensor>& biases,
- const char* name = nullptr) override {}
-
- void VisitPermuteLayer(const IConnectableLayer* layer,
- const PermuteDescriptor& permuteDescriptor,
- const char* name = nullptr) override {}
-
- void VisitBatchToSpaceNdLayer(const IConnectableLayer* layer,
- const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
- const char* name = nullptr) override {}
-
- void VisitPooling2dLayer(const IConnectableLayer* layer,
- const Pooling2dDescriptor& pooling2dDescriptor,
- const char* name = nullptr) override {}
-
- void VisitActivationLayer(const IConnectableLayer* layer,
- const ActivationDescriptor& activationDescriptor,
- const char* name = nullptr) override {}
-
- void VisitNormalizationLayer(const IConnectableLayer* layer,
- const NormalizationDescriptor& normalizationDescriptor,
- const char* name = nullptr) override {}
-
- void VisitSoftmaxLayer(const IConnectableLayer* layer,
- const SoftmaxDescriptor& softmaxDescriptor,
- const char* name = nullptr) override {}
-
- void VisitSplitterLayer(const IConnectableLayer* layer,
- const ViewsDescriptor& splitterDescriptor,
- const char* name = nullptr) override {}
-
- void VisitMergerLayer(const IConnectableLayer* layer,
- const OriginsDescriptor& mergerDescriptor,
- const char* name = nullptr) override {}
-
- void VisitAdditionLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
-
- void VisitMultiplicationLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
-
- void VisitBatchNormalizationLayer(const IConnectableLayer* layer,
- const BatchNormalizationDescriptor& desc,
- const ConstTensor& mean,
- const ConstTensor& variance,
- const ConstTensor& beta,
- const ConstTensor& gamma,
- const char* name = nullptr) override {}
-
- void VisitResizeBilinearLayer(const IConnectableLayer* layer,
- const ResizeBilinearDescriptor& resizeDesc,
- const char* name = nullptr) override {}
-
- void VisitL2NormalizationLayer(const IConnectableLayer* layer,
- const L2NormalizationDescriptor& desc,
- const char* name = nullptr) override {}
-
- void VisitConstantLayer(const IConnectableLayer* layer,
- const ConstTensor& input,
- const char* name = nullptr) override {}
-
- void VisitReshapeLayer(const IConnectableLayer* layer,
- const ReshapeDescriptor& reshapeDescriptor,
- const char* name = nullptr) override {}
-
- void VisitSpaceToBatchNdLayer(const IConnectableLayer* layer,
- const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
- const char* name = nullptr) override {}
-
- void VisitFloorLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
-
- void VisitOutputLayer(const IConnectableLayer* layer,
- LayerBindingId id,
- const char* name = nullptr) override {}
-
- void VisitLstmLayer(const IConnectableLayer* layer,
- const LstmDescriptor& descriptor,
- const LstmInputParams& params,
- const char* name = nullptr) override {}
-
- void VisitDivisionLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
-
- void VisitSubtractionLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
-
- void VisitMaximumLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
-
- void VisitMeanLayer(const IConnectableLayer* layer,
- const MeanDescriptor& meanDescriptor,
- const char* name = nullptr) override {}
-
- void VisitPadLayer(const IConnectableLayer* layer,
- const PadDescriptor& padDescriptor,
- const char* name = nullptr) override {}
-
- void VisitStridedSliceLayer(const IConnectableLayer* layer,
- const StridedSliceDescriptor& stridedSliceDescriptor,
- const char* name = nullptr) override {}
-
- void VisitMinimumLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
-
- void VisitGreaterLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
-
- void VisitEqualLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
-
- void VisitRsqrtLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
-
- void VisitGatherLayer(const IConnectableLayer* layer,
- const char* name = nullptr) override {}
};
} //namespace armnn