aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIsabella Gottardi <isabella.gottardi@arm.com>2019-11-05 17:50:27 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2019-11-14 13:32:30 +0000
commitcd4e9abf7a165f15ccd10ac4541365d4f8a6db19 (patch)
tree442cb14065b866065c1e2837490ae36731a38eef
parentee8cf3287265436d0d12effa36e876dc251eee8c (diff)
downloadComputeLibrary-cd4e9abf7a165f15ccd10ac4541365d4f8a6db19.tar.gz
COMPMID-2452: Add mnist example
* Add small-mnist example * Add PrintAccessor * Add DequantizationLayer graph node Change-Id: I7bc8011e5a602f40fa3c47b231a2a69c804e78c2 Signed-off-by: Isabella Gottardi <isabella.gottardi@arm.com> Reviewed-on: https://review.mlplatform.org/c/2274 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Giorgio Arena <giorgio.arena@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
-rw-r--r--arm_compute/graph/GraphBuilder.h9
-rw-r--r--arm_compute/graph/INodeVisitor.h9
-rw-r--r--arm_compute/graph/TypePrinter.h3
-rw-r--r--arm_compute/graph/Types.h1
-rw-r--r--arm_compute/graph/backends/FunctionHelpers.h38
-rw-r--r--arm_compute/graph/backends/ValidateHelpers.h20
-rw-r--r--arm_compute/graph/frontend/Layers.h21
-rw-r--r--arm_compute/graph/nodes/DequantizationLayerNode.h53
-rw-r--r--arm_compute/graph/nodes/Nodes.h1
-rw-r--r--arm_compute/graph/nodes/NodesFwd.h1
-rw-r--r--examples/graph_mnist.cpp170
-rw-r--r--src/graph/GraphBuilder.cpp5
-rw-r--r--src/graph/backends/CL/CLFunctionsFactory.cpp2
-rw-r--r--src/graph/backends/CL/CLNodeValidator.cpp2
-rw-r--r--src/graph/backends/GLES/GCNodeValidator.cpp2
-rw-r--r--src/graph/backends/NEON/NEFunctionFactory.cpp2
-rw-r--r--src/graph/backends/NEON/NENodeValidator.cpp2
-rw-r--r--src/graph/nodes/DequantizationLayerNode.cpp77
-rw-r--r--utils/GraphUtils.cpp13
-rw-r--r--utils/GraphUtils.h38
20 files changed, 468 insertions, 1 deletions
diff --git a/arm_compute/graph/GraphBuilder.h b/arm_compute/graph/GraphBuilder.h
index dc41ed5367..c1c56c3dd2 100644
--- a/arm_compute/graph/GraphBuilder.h
+++ b/arm_compute/graph/GraphBuilder.h
@@ -205,6 +205,15 @@ public:
* @return Node ID of the created node, EmptyNodeID in case of error
*/
static NodeID add_elementwise_node(Graph &g, NodeParams params, NodeIdxPair input0, NodeIdxPair input1, EltwiseOperation operation);
+ /** Adds a dequantization node to the graph
+ *
+ * @param[in] g Graph to add the node to
+ * @param[in] params Common node parameters
+ * @param[in] input Input to the dequantization node as a NodeID-Index pair
+ *
+ * @return Node ID of the created node, EmptyNodeID in case of error
+ */
+ static NodeID add_dequantization_node(Graph &g, NodeParams params, NodeIdxPair input);
/** Adds a detection output layer node to the graph
*
* @param[in] g Graph to add the node to
diff --git a/arm_compute/graph/INodeVisitor.h b/arm_compute/graph/INodeVisitor.h
index c9f9d2172c..c17b7753d6 100644
--- a/arm_compute/graph/INodeVisitor.h
+++ b/arm_compute/graph/INodeVisitor.h
@@ -71,6 +71,11 @@ public:
* @param[in] n Node to visit.
*/
virtual void visit(DepthwiseConvolutionLayerNode &n) = 0;
+ /** Visit DequantizationLayerNode.
+ *
+ * @param[in] n Node to visit.
+ */
+ virtual void visit(DequantizationLayerNode &n) = 0;
/** Visit DetectionOutputLayerNode.
*
* @param[in] n Node to visit.
@@ -200,6 +205,10 @@ public:
{
default_visit();
}
+ virtual void visit(DequantizationLayerNode &) override
+ {
+ default_visit();
+ }
virtual void visit(DetectionOutputLayerNode &) override
{
default_visit();
diff --git a/arm_compute/graph/TypePrinter.h b/arm_compute/graph/TypePrinter.h
index 131fd39277..7610852aaf 100644
--- a/arm_compute/graph/TypePrinter.h
+++ b/arm_compute/graph/TypePrinter.h
@@ -83,6 +83,9 @@ inline ::std::ostream &operator<<(::std::ostream &os, const NodeType &node_type)
case NodeType::DeconvolutionLayer:
os << "DeconvolutionLayer";
break;
+ case NodeType::DequantizationLayer:
+ os << "DequantizationLayer";
+ break;
case NodeType::DetectionOutputLayer:
os << "DetectionOutputLayer";
break;
diff --git a/arm_compute/graph/Types.h b/arm_compute/graph/Types.h
index 5b82f93686..c01f9a8f4e 100644
--- a/arm_compute/graph/Types.h
+++ b/arm_compute/graph/Types.h
@@ -138,6 +138,7 @@ enum class NodeType
ConvolutionLayer,
DeconvolutionLayer,
DepthwiseConvolutionLayer,
+ DequantizationLayer,
DetectionOutputLayer,
DetectionPostProcessLayer,
EltwiseLayer,
diff --git a/arm_compute/graph/backends/FunctionHelpers.h b/arm_compute/graph/backends/FunctionHelpers.h
index 02bfe9dc22..960011c1e2 100644
--- a/arm_compute/graph/backends/FunctionHelpers.h
+++ b/arm_compute/graph/backends/FunctionHelpers.h
@@ -607,6 +607,44 @@ std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvoluti
return func;
}
+/** Create a backend dequantize layer function
+ *
+ * @tparam DequantizationLayer Function Backend dequantize function
+ * @tparam TargetInfo Target-specific information
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend dequantize layer function
+ */
+template <typename DequantizationLayerFunction, typename TargetInfo>
+std::unique_ptr<IFunction> create_dequantization_layer(DequantizationLayerNode &node)
+{
+ validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
+
+ // Extract IO and info
+ typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
+ typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
+
+ ARM_COMPUTE_ERROR_ON(input == nullptr);
+ ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+ // Create and configure function
+ auto func = support::cpp14::make_unique<DequantizationLayerFunction>();
+ func->configure(input, output);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
+ << " Data Type: " << input->info()->data_type()
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Input quantization info: " << output->info()->quantization_info()
+ << " Output shape: " << output->info()->tensor_shape()
+ << std::endl);
+
+ return std::move(func);
+}
/** Create a backend detection output layer function
*
* @tparam DetectionOutputLayer Function Backend detection output function
diff --git a/arm_compute/graph/backends/ValidateHelpers.h b/arm_compute/graph/backends/ValidateHelpers.h
index 9170006d9c..090e2d6b7c 100644
--- a/arm_compute/graph/backends/ValidateHelpers.h
+++ b/arm_compute/graph/backends/ValidateHelpers.h
@@ -199,7 +199,27 @@ Status validate_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
return status;
}
+/** Validates a dequantize layer node
+ *
+ * @tparam DequantizationLayer Dequantize layer type
+ *
+ * @param[in] node Node to validate
+ *
+ * @return Status
+ */
+template <typename DequantizationLayer>
+Status validate_dequantization_layer(DequantizationLayerNode &node)
+{
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionOutputLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 1);
+ ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
+ // Extract IO and info
+ arm_compute::ITensorInfo *input = get_backing_tensor_info(node.input(0));
+ arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
+
+ return DequantizationLayer::validate(input, output);
+}
/** Validates a detection output layer node
*
* @tparam DetectionOutputLayer DetectionOutput layer type
diff --git a/arm_compute/graph/frontend/Layers.h b/arm_compute/graph/frontend/Layers.h
index 120997a8b4..61cd83c107 100644
--- a/arm_compute/graph/frontend/Layers.h
+++ b/arm_compute/graph/frontend/Layers.h
@@ -489,6 +489,25 @@ private:
const QuantizationInfo _weights_quant_info;
const QuantizationInfo _out_quant_info;
};
+/** Dequantization Layer */
+class DequantizationLayer final : public ILayer
+{
+public:
+ /** Construct a dequantization layer.
+ *
+ */
+ DequantizationLayer()
+ {
+ }
+
+ NodeID create_layer(IStream &s) override
+ {
+ NodeParams common_params = { name(), s.hints().target_hint };
+ NodeIdxPair input = { s.tail_node(), 0 };
+ return GraphBuilder::add_dequantization_node(s.graph(), common_params, input);
+ }
+};
+
/** DetectionOutput Layer */
class DetectionOutputLayer final : public ILayer
{
@@ -555,7 +574,7 @@ private:
class DummyLayer final : public ILayer
{
public:
- /** Construct an input layer.
+ /** Construct a dummy layer.
*
* @param[in] shape Output shape
*/
diff --git a/arm_compute/graph/nodes/DequantizationLayerNode.h b/arm_compute/graph/nodes/DequantizationLayerNode.h
new file mode 100644
index 0000000000..8b3d4add65
--- /dev/null
+++ b/arm_compute/graph/nodes/DequantizationLayerNode.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_GRAPH_DEQUANTIZATION_NODE_H
+#define ARM_COMPUTE_GRAPH_DEQUANTIZATION_NODE_H
+
+#include "arm_compute/graph/INode.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+/** Dequantize Layer node
+ *
+ * Dequantize layer transforms a given input to a dequantized output.
+ *
+ */
+class DequantizationLayerNode final : public INode
+{
+public:
+ /** Constructor
+ */
+ DequantizationLayerNode();
+
+ // Inherited overridden methods:
+ NodeType type() const override;
+ bool forward_descriptors() override;
+ TensorDescriptor configure_output(size_t idx) const override;
+ void accept(INodeVisitor &v) override;
+};
+} // namespace graph
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_GRAPH_DEQUANTIZATION_NODE_H */ \ No newline at end of file
diff --git a/arm_compute/graph/nodes/Nodes.h b/arm_compute/graph/nodes/Nodes.h
index 1586270093..7b586b855f 100644
--- a/arm_compute/graph/nodes/Nodes.h
+++ b/arm_compute/graph/nodes/Nodes.h
@@ -33,6 +33,7 @@
#include "arm_compute/graph/nodes/ConvolutionLayerNode.h"
#include "arm_compute/graph/nodes/DeconvolutionLayerNode.h"
#include "arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h"
+#include "arm_compute/graph/nodes/DequantizationLayerNode.h"
#include "arm_compute/graph/nodes/DetectionOutputLayerNode.h"
#include "arm_compute/graph/nodes/DetectionPostProcessLayerNode.h"
#include "arm_compute/graph/nodes/DummyNode.h"
diff --git a/arm_compute/graph/nodes/NodesFwd.h b/arm_compute/graph/nodes/NodesFwd.h
index 53f2a6a1b5..42fe0d0baf 100644
--- a/arm_compute/graph/nodes/NodesFwd.h
+++ b/arm_compute/graph/nodes/NodesFwd.h
@@ -39,6 +39,7 @@ class ConstNode;
class ConvolutionLayerNode;
class DeconvolutionLayerNode;
class DepthwiseConvolutionLayerNode;
+class DequantizationLayerNode;
class DetectionOutputLayerNode;
class DetectionPostProcessLayerNode;
class DummyNode;
diff --git a/examples/graph_mnist.cpp b/examples/graph_mnist.cpp
new file mode 100644
index 0000000000..eb66138df4
--- /dev/null
+++ b/examples/graph_mnist.cpp
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/graph.h"
+#include "support/ToolchainSupport.h"
+#include "utils/CommonGraphOptions.h"
+#include "utils/GraphUtils.h"
+#include "utils/Utils.h"
+
+using namespace arm_compute;
+using namespace arm_compute::utils;
+using namespace arm_compute::graph::frontend;
+using namespace arm_compute::graph_utils;
+
+/** Example demonstrating how to implement Mnist's network using the Compute Library's graph API */
+class GraphMnistExample : public Example
+{
+public:
+ GraphMnistExample()
+ : cmd_parser(), common_opts(cmd_parser), common_params(), graph(0, "LeNet")
+ {
+ }
+ bool do_setup(int argc, char **argv) override
+ {
+ // Parse arguments
+ cmd_parser.parse(argc, argv);
+ cmd_parser.validate();
+
+ // Consume common parameters
+ common_params = consume_common_graph_parameters(common_opts);
+
+ // Return when help menu is requested
+ if(common_params.help)
+ {
+ cmd_parser.print_help(argv[0]);
+ return false;
+ }
+
+ // Print parameter values
+ std::cout << common_params << std::endl;
+
+ // Get trainable parameters data path
+ std::string data_path = common_params.data_path;
+
+ // Add model path to data path
+ if(!data_path.empty() && arm_compute::is_data_type_quantized_asymmetric(common_params.data_type))
+ {
+ data_path += "/cnn_data/mnist_qasymm8_model/";
+ }
+
+ // Create input descriptor
+ const TensorShape tensor_shape = permute_shape(TensorShape(28U, 28U, 1U), DataLayout::NCHW, common_params.data_layout);
+ TensorDescriptor input_descriptor = TensorDescriptor(tensor_shape, common_params.data_type).set_layout(common_params.data_layout);
+
+ const QuantizationInfo in_quant_info = QuantizationInfo(0.003921568859368563f, 0);
+
+ const std::vector<std::pair<QuantizationInfo, QuantizationInfo>> conv_quant_info =
+ {
+ { QuantizationInfo(0.004083447158336639f, 138), QuantizationInfo(0.0046257381327450275f, 0) }, // conv0
+ { QuantizationInfo(0.0048590428195893764f, 149), QuantizationInfo(0.03558270260691643f, 0) }, // conv1
+ { QuantizationInfo(0.004008443560451269f, 146), QuantizationInfo(0.09117382764816284f, 0) }, // conv2
+ { QuantizationInfo(0.004344311077147722f, 160), QuantizationInfo(0.5494495034217834f, 167) }, // fc
+ };
+
+ // Set weights trained layout
+ const DataLayout weights_layout = DataLayout::NHWC;
+ FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo();
+ fc_info.set_weights_trained_layout(weights_layout);
+
+ graph << common_params.target
+ << common_params.fast_math_hint
+ << InputLayer(input_descriptor.set_quantization_info(in_quant_info),
+ get_input_accessor(common_params))
+ << ConvolutionLayer(
+ 3U, 3U, 32U,
+ get_weights_accessor(data_path, "conv2d_weights_quant_FakeQuantWithMinMaxVars.npy", weights_layout),
+ get_weights_accessor(data_path, "conv2d_Conv2D_bias.npy"),
+ PadStrideInfo(1U, 1U, 1U, 1U), 1, conv_quant_info.at(0).first, conv_quant_info.at(0).second)
+ .set_name("Conv0")
+
+ << ConvolutionLayer(
+ 3U, 3U, 32U,
+ get_weights_accessor(data_path, "conv2d_1_weights_quant_FakeQuantWithMinMaxVars.npy", weights_layout),
+ get_weights_accessor(data_path, "conv2d_1_Conv2D_bias.npy"),
+ PadStrideInfo(1U, 1U, 1U, 1U), 1, conv_quant_info.at(1).first, conv_quant_info.at(1).second)
+ .set_name("conv1")
+
+ << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 2, PadStrideInfo(2, 2, 0, 0))).set_name("maxpool1")
+
+ << ConvolutionLayer(
+ 3U, 3U, 32U,
+ get_weights_accessor(data_path, "conv2d_2_weights_quant_FakeQuantWithMinMaxVars.npy", weights_layout),
+ get_weights_accessor(data_path, "conv2d_2_Conv2D_bias.npy"),
+ PadStrideInfo(1U, 1U, 1U, 1U), 1, conv_quant_info.at(2).first, conv_quant_info.at(2).second)
+ .set_name("conv2")
+
+ << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 2, PadStrideInfo(2, 2, 0, 0))).set_name("maxpool2")
+
+ << FullyConnectedLayer(
+ 10U,
+ get_weights_accessor(data_path, "dense_weights_quant_FakeQuantWithMinMaxVars_transpose.npy", weights_layout),
+ get_weights_accessor(data_path, "dense_MatMul_bias.npy"),
+ fc_info, conv_quant_info.at(3).first, conv_quant_info.at(3).second)
+ .set_name("fc")
+
+ << SoftmaxLayer().set_name("prob");
+
+ if(arm_compute::is_data_type_quantized_asymmetric(common_params.data_type))
+ {
+ graph << DequantizationLayer().set_name("dequantize");
+ }
+
+ graph << OutputLayer(get_output_accessor(common_params, 5));
+
+ // Finalize graph
+ GraphConfig config;
+ config.num_threads = common_params.threads;
+ config.use_tuner = common_params.enable_tuner;
+ config.tuner_mode = common_params.tuner_mode;
+ config.tuner_file = common_params.tuner_file;
+
+ graph.finalize(common_params.target, config);
+
+ return true;
+ }
+ void do_run() override
+ {
+ // Run graph
+ graph.run();
+ }
+
+private:
+ CommandLineParser cmd_parser;
+ CommonGraphOptions common_opts;
+ CommonGraphParams common_params;
+ Stream graph;
+};
+
+/** Main program for Mnist Example
+ *
+ * @note To list all the possible arguments execute the binary appended with the --help option
+ *
+ * @param[in] argc Number of arguments
+ * @param[in] argv Arguments
+ */
+int main(int argc, char **argv)
+{
+ return arm_compute::utils::run_example<GraphMnistExample>(argc, argv);
+}
diff --git a/src/graph/GraphBuilder.cpp b/src/graph/GraphBuilder.cpp
index 228f2d211a..89c8c20de8 100644
--- a/src/graph/GraphBuilder.cpp
+++ b/src/graph/GraphBuilder.cpp
@@ -376,6 +376,11 @@ NodeID GraphBuilder::add_depthwise_convolution_node(Graph &g, NodeParams params,
return conv_nid;
}
+NodeID GraphBuilder::add_dequantization_node(Graph &g, NodeParams params, NodeIdxPair input)
+{
+ return create_simple_single_input_output_node<DequantizationLayerNode>(g, params, input);
+}
+
NodeID GraphBuilder::add_detection_output_node(Graph &g, NodeParams params, NodeIdxPair input_loc, NodeIdxPair input_conf, NodeIdxPair input_priorbox, const DetectionOutputLayerInfo &detect_info)
{
check_nodeidx_pair(input_loc, g);
diff --git a/src/graph/backends/CL/CLFunctionsFactory.cpp b/src/graph/backends/CL/CLFunctionsFactory.cpp
index ca6c837ab8..57b48b0c67 100644
--- a/src/graph/backends/CL/CLFunctionsFactory.cpp
+++ b/src/graph/backends/CL/CLFunctionsFactory.cpp
@@ -244,6 +244,8 @@ std::unique_ptr<IFunction> CLFunctionFactory::create(INode *node, GraphContext &
return detail::create_concatenate_layer<CLConcatenateLayer, CLTargetInfo>(*polymorphic_downcast<ConcatenateLayerNode *>(node));
case NodeType::DepthwiseConvolutionLayer:
return detail::create_depthwise_convolution_layer<CLDepthwiseConvolutionLayer, CLTargetInfo>(*polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
+ case NodeType::DequantizationLayer:
+ return detail::create_dequantization_layer<CLDequantizationLayer, CLTargetInfo>(*polymorphic_downcast<DequantizationLayerNode *>(node));
case NodeType::DetectionOutputLayer:
return detail::create_detection_output_layer<CPPDetectionOutputLayer, CLTargetInfo>(*polymorphic_downcast<DetectionOutputLayerNode *>(node));
case NodeType::DetectionPostProcessLayer:
diff --git a/src/graph/backends/CL/CLNodeValidator.cpp b/src/graph/backends/CL/CLNodeValidator.cpp
index a2786187a2..8ca58bc6e8 100644
--- a/src/graph/backends/CL/CLNodeValidator.cpp
+++ b/src/graph/backends/CL/CLNodeValidator.cpp
@@ -59,6 +59,8 @@ Status CLNodeValidator::validate(INode *node)
CLWinogradConvolutionLayer>(*polymorphic_downcast<ConvolutionLayerNode *>(node));
case NodeType::DepthwiseConvolutionLayer:
return detail::validate_depthwise_convolution_layer<CLDepthwiseConvolutionLayer>(*polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
+ case NodeType::DequantizationLayer:
+ return detail::validate_dequantization_layer<CLDequantizationLayer>(*polymorphic_downcast<DequantizationLayerNode *>(node));
case NodeType::DetectionOutputLayer:
return detail::validate_detection_output_layer<CPPDetectionOutputLayer>(*polymorphic_downcast<DetectionOutputLayerNode *>(node));
case NodeType::DetectionPostProcessLayer:
diff --git a/src/graph/backends/GLES/GCNodeValidator.cpp b/src/graph/backends/GLES/GCNodeValidator.cpp
index 9d848ab3b1..15a66f4163 100644
--- a/src/graph/backends/GLES/GCNodeValidator.cpp
+++ b/src/graph/backends/GLES/GCNodeValidator.cpp
@@ -110,6 +110,8 @@ Status GCNodeValidator::validate(INode *node)
return validate_convolution_layer(*polymorphic_downcast<ConvolutionLayerNode *>(node));
case NodeType::DepthwiseConvolutionLayer:
return validate_depthwise_convolution_layer(*polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
+ case NodeType::DequantizationLayer:
+ return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported operation : DequantizationLayer");
case NodeType::DetectionOutputLayer:
return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported operation : DetectionOutputLayer");
case NodeType::DetectionPostProcessLayer:
diff --git a/src/graph/backends/NEON/NEFunctionFactory.cpp b/src/graph/backends/NEON/NEFunctionFactory.cpp
index e0fd32f67d..7ff68b5a40 100644
--- a/src/graph/backends/NEON/NEFunctionFactory.cpp
+++ b/src/graph/backends/NEON/NEFunctionFactory.cpp
@@ -207,6 +207,8 @@ std::unique_ptr<IFunction> NEFunctionFactory::create(INode *node, GraphContext &
return detail::create_concatenate_layer<NEConcatenateLayer, NETargetInfo>(*polymorphic_downcast<ConcatenateLayerNode *>(node));
case NodeType::DepthwiseConvolutionLayer:
return detail::create_depthwise_convolution_layer<NEDepthwiseConvolutionLayer, NETargetInfo>(*polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
+ case NodeType::DequantizationLayer:
+ return detail::create_dequantization_layer<NEDequantizationLayer, NETargetInfo>(*polymorphic_downcast<DequantizationLayerNode *>(node));
case NodeType::DetectionOutputLayer:
return detail::create_detection_output_layer<CPPDetectionOutputLayer, NETargetInfo>(*polymorphic_downcast<DetectionOutputLayerNode *>(node));
case NodeType::DetectionPostProcessLayer:
diff --git a/src/graph/backends/NEON/NENodeValidator.cpp b/src/graph/backends/NEON/NENodeValidator.cpp
index f17b116892..fc849595ff 100644
--- a/src/graph/backends/NEON/NENodeValidator.cpp
+++ b/src/graph/backends/NEON/NENodeValidator.cpp
@@ -59,6 +59,8 @@ Status NENodeValidator::validate(INode *node)
NEWinogradConvolutionLayer>(*polymorphic_downcast<ConvolutionLayerNode *>(node));
case NodeType::DepthwiseConvolutionLayer:
return detail::validate_depthwise_convolution_layer<NEDepthwiseConvolutionLayer>(*polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
+ case NodeType::DequantizationLayer:
+ return detail::validate_dequantization_layer<NEDequantizationLayer>(*polymorphic_downcast<DequantizationLayerNode *>(node));
case NodeType::DetectionOutputLayer:
return detail::validate_detection_output_layer<CPPDetectionOutputLayer>(*polymorphic_downcast<DetectionOutputLayerNode *>(node));
case NodeType::DetectionPostProcessLayer:
diff --git a/src/graph/nodes/DequantizationLayerNode.cpp b/src/graph/nodes/DequantizationLayerNode.cpp
new file mode 100644
index 0000000000..27134b4167
--- /dev/null
+++ b/src/graph/nodes/DequantizationLayerNode.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/DequantizationLayerNode.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
+#include "arm_compute/graph/Tensor.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+DequantizationLayerNode::DequantizationLayerNode()
+{
+ _input_edges.resize(1, EmptyEdgeID);
+ _outputs.resize(1, NullTensorID);
+}
+
+bool DequantizationLayerNode::forward_descriptors()
+{
+ if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ {
+ Tensor *dst = output(0);
+ ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ dst->desc() = configure_output(0);
+ return true;
+ }
+ return false;
+}
+
+TensorDescriptor DequantizationLayerNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+ const Tensor *src = input(0);
+ ARM_COMPUTE_ERROR_ON(src == nullptr);
+
+ TensorDescriptor output_desc = src->desc();
+ output_desc.data_type = DataType::F32;
+
+ return output_desc;
+}
+
+NodeType DequantizationLayerNode::type() const
+{
+ return NodeType::DequantizationLayer;
+}
+
+void DequantizationLayerNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
+}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/utils/GraphUtils.cpp b/utils/GraphUtils.cpp
index e07e26f2fd..71bfc372fe 100644
--- a/utils/GraphUtils.cpp
+++ b/utils/GraphUtils.cpp
@@ -223,6 +223,19 @@ bool NumPyAccessor::access_tensor(ITensor &tensor)
return false;
}
+#ifdef ARM_COMPUTE_ASSERTS_ENABLED
+PrintAccessor::PrintAccessor(std::ostream &output_stream, IOFormatInfo io_fmt)
+ : _output_stream(output_stream), _io_fmt(io_fmt)
+{
+}
+
+bool PrintAccessor::access_tensor(ITensor &tensor)
+{
+ tensor.print(_output_stream, _io_fmt);
+ return false;
+}
+#endif /* ARM_COMPUTE_ASSERTS_ENABLED */
+
SaveNumPyAccessor::SaveNumPyAccessor(std::string npy_name, const bool is_fortran)
: _npy_name(std::move(npy_name)), _is_fortran(is_fortran)
{
diff --git a/utils/GraphUtils.h b/utils/GraphUtils.h
index bc0822cc89..d6bae3ea3f 100644
--- a/utils/GraphUtils.h
+++ b/utils/GraphUtils.h
@@ -199,6 +199,33 @@ private:
const bool _is_fortran;
};
+/** Print accessor class
+ * @note The print accessor will print only when asserts are enabled.
+ * */
+class PrintAccessor final : public graph::ITensorAccessor
+{
+public:
+ /** Constructor
+ *
+ * @param[out] output_stream (Optional) Output stream
+ * @param[in] io_fmt (Optional) Format information
+ */
+ PrintAccessor(std::ostream &output_stream = std::cout, IOFormatInfo io_fmt = IOFormatInfo());
+ /** Allow instances of this class to be move constructed */
+ PrintAccessor(PrintAccessor &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ PrintAccessor(const PrintAccessor &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ PrintAccessor &operator=(const PrintAccessor &) = delete;
+
+ // Inherited methods overriden:
+ bool access_tensor(ITensor &tensor) override;
+
+private:
+ std::ostream &_output_stream;
+ IOFormatInfo _io_fmt;
+};
+
/** Image accessor class */
class ImageAccessor final : public graph::ITensorAccessor
{
@@ -615,6 +642,17 @@ inline std::unique_ptr<graph::ITensorAccessor> get_save_npy_output_accessor(cons
}
}
+/** Generates print tensor accessor
+ *
+ * @param[out] output_stream (Optional) Output stream
+ *
+ * @return A print tensor accessor
+ */
+inline std::unique_ptr<graph::ITensorAccessor> get_print_output_accessor(std::ostream &output_stream = std::cout)
+{
+ return arm_compute::support::cpp14::make_unique<PrintAccessor>(output_stream);
+}
+
/** Permutes a given tensor shape given the input and output data layout
*
* @param[in] tensor_shape Tensor shape to permute