aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/layers/FakeQuantizationLayer.cpp
diff options
context:
space:
mode:
authorsurmeh01 <surabhi.mehta@arm.com>2018-05-18 16:31:43 +0100
committertelsoa01 <telmo.soares@arm.com>2018-05-23 13:09:07 +0100
commit3537c2ca7ebf31c1673b9ec2bb0c17b0406bbae0 (patch)
tree5950603ad78ec3fe56fb31ddc7f4d52a19f5bc60 /src/armnn/layers/FakeQuantizationLayer.cpp
parentbceff2fb3fc68bb0aa88b886900c34b77340c826 (diff)
downloadarmnn-3537c2ca7ebf31c1673b9ec2bb0c17b0406bbae0.tar.gz
Release 18.05
Diffstat (limited to 'src/armnn/layers/FakeQuantizationLayer.cpp')
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.cpp51
1 files changed, 51 insertions, 0 deletions
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
new file mode 100644
index 0000000000..24b53b2e37
--- /dev/null
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -0,0 +1,51 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include "FakeQuantizationLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include <backends/WorkloadData.hpp>
+#include <backends/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+FakeQuantizationLayer::FakeQuantizationLayer(const FakeQuantizationDescriptor& param, const char* name)
+: LayerWithParameters(1, 1, LayerType::FakeQuantization, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> FakeQuantizationLayer::CreateWorkload(const Graph& graph,
+ const IWorkloadFactory& factory) const
+{
+ FakeQuantizationQueueDescriptor descriptor;
+ return factory.CreateFakeQuantization(descriptor, PrepInfoAndDesc(descriptor, graph) );
+}
+
+FakeQuantizationLayer* FakeQuantizationLayer::Clone(Graph& graph) const
+{
+ return CloneBase<FakeQuantizationLayer>(graph, m_Param, GetName());
+}
+
+void FakeQuantizationLayer::ValidateTensorShapesFromInputs()
+{
+ ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection() != nullptr,
+ "FakeQuantizationLayer: InputSlot must be connected to an OutputSlot");
+ ConditionalThrow<LayerValidationException>(GetInputSlot(0).GetConnection()->IsTensorInfoSet(),
+ "FakeQuantizationLayer: TensorInfo must be set on connected OutputSlot.");
+
+
+ IOutputSlot* input = GetInputSlot(0).GetConnection();
+
+ // input and output shapes are the same
+ TensorShape const& outShape = input->GetTensorInfo().GetShape();
+ ConditionalThrowIfNotEqual<LayerValidationException>(
+ "FakeQuantizationLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+ GetOutputSlot(0).GetTensorInfo().GetShape(),
+ outShape);
+}
+
+} // namespace armnn