aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2017-10-09 15:46:30 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commit27c9efb922832e5e6785a492e84a46934d9a47f8 (patch)
tree031e45ce8229c4801a8f8263a258cecbb2403763
parent63e5041e6ea0f7fb57a1cc349f1325785fa800fa (diff)
downloadComputeLibrary-27c9efb922832e5e6785a492e84a46934d9a47f8.tar.gz
COMPMID-554 Add Nodes
- BatchNormalization - DepthConvert - Dequantization - Flatten - Quantization - Reshape Change-Id: Ie01a04b7a6cc8e2b5481cf2345268e6871580d7f Reviewed-on: http://mpd-gerrit.cambridge.arm.com/91618 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
-rw-r--r--arm_compute/graph/Nodes.h5
-rw-r--r--arm_compute/graph/Types.h6
-rw-r--r--arm_compute/graph/nodes/DepthConvertLayer.h58
-rw-r--r--arm_compute/graph/nodes/DequantizationLayer.h59
-rw-r--r--arm_compute/graph/nodes/FlattenLayer.h45
-rw-r--r--arm_compute/graph/nodes/FloorLayer.h2
-rw-r--r--arm_compute/graph/nodes/QuantizationLayer.h45
-rw-r--r--arm_compute/graph/nodes/ReshapeLayer.h54
-rw-r--r--src/graph/nodes/BatchNormalizationLayer.cpp23
-rw-r--r--src/graph/nodes/DeQuantizationLayer.cpp68
-rw-r--r--src/graph/nodes/DepthConvertLayer.cpp58
-rw-r--r--src/graph/nodes/FlattenLayer.cpp52
-rw-r--r--src/graph/nodes/QuantizationLayer.cpp48
-rw-r--r--src/graph/nodes/ReshapeLayer.cpp57
-rw-r--r--src/graph/operations/CLSimpleOperations.cpp136
-rw-r--r--src/graph/operations/NESimpleOperations.cpp136
16 files changed, 851 insertions, 1 deletions
diff --git a/arm_compute/graph/Nodes.h b/arm_compute/graph/Nodes.h
index d1ed715ae8..79407f9ab3 100644
--- a/arm_compute/graph/Nodes.h
+++ b/arm_compute/graph/Nodes.h
@@ -28,11 +28,16 @@
#include "arm_compute/graph/nodes/BatchNormalizationLayer.h"
#include "arm_compute/graph/nodes/BranchLayer.h"
#include "arm_compute/graph/nodes/ConvolutionLayer.h"
+#include "arm_compute/graph/nodes/DepthConvertLayer.h"
+#include "arm_compute/graph/nodes/DequantizationLayer.h"
+#include "arm_compute/graph/nodes/FlattenLayer.h"
#include "arm_compute/graph/nodes/FloorLayer.h"
#include "arm_compute/graph/nodes/FullyConnectedLayer.h"
#include "arm_compute/graph/nodes/L2NormalizeLayer.h"
#include "arm_compute/graph/nodes/NormalizationLayer.h"
#include "arm_compute/graph/nodes/PoolingLayer.h"
+#include "arm_compute/graph/nodes/QuantizationLayer.h"
+#include "arm_compute/graph/nodes/ReshapeLayer.h"
#include "arm_compute/graph/nodes/SoftmaxLayer.h"
#endif /* __ARM_COMPUTE_GRAPH_NODES_H__ */
diff --git a/arm_compute/graph/Types.h b/arm_compute/graph/Types.h
index 662a7404ef..b3aa034477 100644
--- a/arm_compute/graph/Types.h
+++ b/arm_compute/graph/Types.h
@@ -55,6 +55,7 @@ using arm_compute::TensorShape;
using arm_compute::WeightsInfo;
using arm_compute::logging::LogLevel;
+using arm_compute::ConvertPolicy;
/**< Execution hint to the graph executor */
enum class TargetHint
@@ -77,11 +78,16 @@ enum class OperationType
ActivationLayer,
BatchNormalizationLayer,
ConvolutionLayer,
+ DepthConvertLayer,
+ DequantizationLayer,
+ FlattenLayer,
FloorLayer,
FullyConnectedLayer,
L2NormalizeLayer,
NormalizationLayer,
PoolingLayer,
+ QuantizationLayer,
+ ReshapeLayer,
SoftmaxLayer
};
diff --git a/arm_compute/graph/nodes/DepthConvertLayer.h b/arm_compute/graph/nodes/DepthConvertLayer.h
new file mode 100644
index 0000000000..03bf9b7ed5
--- /dev/null
+++ b/arm_compute/graph/nodes/DepthConvertLayer.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH_DEPTHCONVERT_LAYER_H__
+#define __ARM_COMPUTE_GRAPH_DEPTHCONVERT_LAYER_H__
+
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/INode.h"
+#include "arm_compute/graph/ITensorObject.h"
+#include "arm_compute/graph/Types.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+/** DepthConvertLayer layer node */
+class DepthConvertLayer final : public INode
+{
+public:
+ /** Default constructor
+ *
+ * @param[in] policy Convertion policy
+ * @param[in] shift Shift value
+ * @param[in] output_datatype Output datatype
+ */
+ DepthConvertLayer(const ConvertPolicy policy, uint32_t shift, DataType output_datatype);
+
+ // Inherited methods overriden:
+ std::unique_ptr<arm_compute::IFunction> instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output) override;
+
+private:
+ const ConvertPolicy _policy;
+ uint32_t _shift;
+ DataType _output_datatype;
+};
+} // namespace graph
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH_DEPTHCONVERT_LAYER_H__ */
diff --git a/arm_compute/graph/nodes/DequantizationLayer.h b/arm_compute/graph/nodes/DequantizationLayer.h
new file mode 100644
index 0000000000..f9b7e8af87
--- /dev/null
+++ b/arm_compute/graph/nodes/DequantizationLayer.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH_DEQUANTIZATION_LAYER_H__
+#define __ARM_COMPUTE_GRAPH_DEQUANTIZATION_LAYER_H__
+
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/INode.h"
+#include "arm_compute/graph/ITensorObject.h"
+#include "arm_compute/graph/Tensor.h"
+#include "arm_compute/graph/Types.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+/** DequantizationLayer layer node */
+class DequantizationLayer final : public INode
+{
+public:
+ /** Default constructor
+ *
+ * @param[in] min_max Min max value tensor
+ */
+ template <typename AccessorType>
+ DequantizationLayer(AccessorType &&min_max)
+ : _min_max(std::move(min_max))
+ {
+ }
+
+ // Inherited methods overriden:
+ std::unique_ptr<arm_compute::IFunction> instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output) override;
+
+private:
+ Tensor _min_max;
+};
+} // namespace graph
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH_DEQUANTIZATION_LAYER_H__ */
diff --git a/arm_compute/graph/nodes/FlattenLayer.h b/arm_compute/graph/nodes/FlattenLayer.h
new file mode 100644
index 0000000000..c5f51a2b3e
--- /dev/null
+++ b/arm_compute/graph/nodes/FlattenLayer.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH_FLATTEN_LAYER_H__
+#define __ARM_COMPUTE_GRAPH_FLATTEN_LAYER_H__
+
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/INode.h"
+#include "arm_compute/graph/ITensorObject.h"
+#include "arm_compute/graph/Types.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+/** Flatten layer node */
+class FlattenLayer final : public INode
+{
+public:
+ // Inherited methods overriden:
+ std::unique_ptr<arm_compute::IFunction> instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output) override;
+};
+} // namespace graph
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH_FLATTEN_LAYER_H__ */
diff --git a/arm_compute/graph/nodes/FloorLayer.h b/arm_compute/graph/nodes/FloorLayer.h
index f88a5b9d94..146e2c16dd 100644
--- a/arm_compute/graph/nodes/FloorLayer.h
+++ b/arm_compute/graph/nodes/FloorLayer.h
@@ -33,7 +33,7 @@ namespace arm_compute
namespace graph
{
/** Floor layer node */
-class FloorLayer : public INode
+class FloorLayer final : public INode
{
public:
// Inherited methods overriden:
diff --git a/arm_compute/graph/nodes/QuantizationLayer.h b/arm_compute/graph/nodes/QuantizationLayer.h
new file mode 100644
index 0000000000..a3ef02530e
--- /dev/null
+++ b/arm_compute/graph/nodes/QuantizationLayer.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH_QUANTIZATION_LAYER_H__
+#define __ARM_COMPUTE_GRAPH_QUANTIZATION_LAYER_H__
+
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/INode.h"
+#include "arm_compute/graph/ITensorObject.h"
+#include "arm_compute/graph/Types.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+/** Quantization layer node */
+class QuantizationLayer final : public INode
+{
+public:
+ // Inherited methods overriden:
+ std::unique_ptr<arm_compute::IFunction> instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output) override;
+};
+} // namespace graph
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH_QUANTIZATION_LAYER_H__ */
diff --git a/arm_compute/graph/nodes/ReshapeLayer.h b/arm_compute/graph/nodes/ReshapeLayer.h
new file mode 100644
index 0000000000..b727d33a2c
--- /dev/null
+++ b/arm_compute/graph/nodes/ReshapeLayer.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH_RESHAPE_LAYER_H__
+#define __ARM_COMPUTE_GRAPH_RESHAPE_LAYER_H__
+
+#include "arm_compute/graph/GraphContext.h"
+#include "arm_compute/graph/INode.h"
+#include "arm_compute/graph/ITensorObject.h"
+#include "arm_compute/graph/Types.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+/** Reshape layer node */
+class ReshapeLayer final : public INode
+{
+public:
+ /** Default constructor
+ *
+ * @param[in] shape Output shape
+ */
+ ReshapeLayer(const TensorShape shape);
+
+ // Inherited methods overriden:
+ std::unique_ptr<arm_compute::IFunction> instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output) override;
+
+private:
+ TensorShape _shape;
+};
+} // namespace graph
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH_RESHAPE_LAYER_H__ */
diff --git a/src/graph/nodes/BatchNormalizationLayer.cpp b/src/graph/nodes/BatchNormalizationLayer.cpp
index bce19016d7..a433f39dc4 100644
--- a/src/graph/nodes/BatchNormalizationLayer.cpp
+++ b/src/graph/nodes/BatchNormalizationLayer.cpp
@@ -56,6 +56,11 @@ std::unique_ptr<arm_compute::IFunction> BatchNormalizationLayer::instantiate_nod
_gamma.set_info(TensorInfo(TensorShape(batch_norm_size), in->info()->num_channels(), in->info()->data_type(), in->info()->fixed_point_position()));
}
+ bool mean_is_loaded = _mean.tensor() != nullptr;
+ bool var_is_loaded = _var.tensor() != nullptr;
+ bool gamma_is_loaded = _gamma.tensor() != nullptr;
+ bool beta_is_loaded = _beta.tensor() != nullptr;
+
// Create node context
NodeContext node_ctx(OperationType::BatchNormalizationLayer);
node_ctx.set_target(_target_hint);
@@ -67,6 +72,24 @@ std::unique_ptr<arm_compute::IFunction> BatchNormalizationLayer::instantiate_nod
node_ctx.add_output(out);
node_ctx.add_parameter<float>("epsilon", _epsilon);
+ // Fill tensors
+ if(!mean_is_loaded)
+ {
+ _mean.allocate_and_fill_if_needed();
+ }
+ if(!var_is_loaded)
+ {
+ _var.allocate_and_fill_if_needed();
+ }
+ if(!gamma_is_loaded)
+ {
+ _gamma.allocate_and_fill_if_needed();
+ }
+ if(!beta_is_loaded)
+ {
+ _beta.allocate_and_fill_if_needed();
+ }
+
// Get function
return OperationRegistry::get().find_operation(OperationType::BatchNormalizationLayer, _target_hint)->configure(node_ctx);
} \ No newline at end of file
diff --git a/src/graph/nodes/DeQuantizationLayer.cpp b/src/graph/nodes/DeQuantizationLayer.cpp
new file mode 100644
index 0000000000..3760de6487
--- /dev/null
+++ b/src/graph/nodes/DeQuantizationLayer.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/DequantizationLayer.h"
+
+#include "arm_compute/graph/NodeContext.h"
+#include "arm_compute/graph/OperationRegistry.h"
+
+using namespace arm_compute::graph;
+
+std::unique_ptr<arm_compute::IFunction> DequantizationLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
+{
+ ARM_COMPUTE_ERROR_ON(input == nullptr || input->tensor() == nullptr);
+ ARM_COMPUTE_ERROR_ON(output == nullptr || output->tensor() == nullptr);
+
+ _target_hint = ctx.hints().target_hint();
+ arm_compute::ITensor *in = input->tensor();
+ arm_compute::ITensor *out = output->tensor();
+
+ if(_min_max.tensor() == nullptr)
+ {
+ TensorShape shape = in->info()->tensor_shape();
+ shape.set(Window::DimX, 2);
+ shape.remove_dimension(1);
+ shape.remove_dimension(1);
+
+ _min_max.set_info(TensorInfo(shape, in->info()->num_channels(), DataType::F32));
+ _min_max.set_target(_target_hint);
+ }
+
+ bool minmax_is_loaded = _min_max.tensor() != nullptr;
+
+ // Create node context
+ NodeContext node_ctx(OperationType::DequantizationLayer);
+ node_ctx.set_target(_target_hint);
+ node_ctx.add_input(in);
+ node_ctx.add_output(_min_max.tensor());
+ node_ctx.add_output(out);
+
+ // Fill min max
+ if(!minmax_is_loaded)
+ {
+ _min_max.allocate_and_fill_if_needed();
+ }
+
+ // Get function
+ return OperationRegistry::get().find_operation(OperationType::DequantizationLayer, _target_hint)->configure(node_ctx);
+}
diff --git a/src/graph/nodes/DepthConvertLayer.cpp b/src/graph/nodes/DepthConvertLayer.cpp
new file mode 100644
index 0000000000..62f308213e
--- /dev/null
+++ b/src/graph/nodes/DepthConvertLayer.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/DepthConvertLayer.h"
+
+#include "arm_compute/graph/NodeContext.h"
+#include "arm_compute/graph/OperationRegistry.h"
+
+using namespace arm_compute::graph;
+
+DepthConvertLayer::DepthConvertLayer(const ConvertPolicy policy, uint32_t shift, DataType output_datatype)
+ : _policy(policy), _shift(shift), _output_datatype(output_datatype)
+{
+}
+
+std::unique_ptr<arm_compute::IFunction> DepthConvertLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
+{
+ ARM_COMPUTE_ERROR_ON(input == nullptr || input->tensor() == nullptr);
+ ARM_COMPUTE_ERROR_ON(output == nullptr || output->tensor() == nullptr);
+
+ _target_hint = ctx.hints().target_hint();
+ arm_compute::ITensor *in = input->tensor();
+ arm_compute::ITensor *out = output->tensor();
+
+ // Auto configure output
+ arm_compute::auto_init_if_empty(*out->info(), in->info()->tensor_shape(), 1, _output_datatype, in->info()->fixed_point_position());
+
+ // Create node context
+ NodeContext node_ctx(OperationType::DepthConvertLayer);
+ node_ctx.set_target(_target_hint);
+ node_ctx.add_input(in);
+ node_ctx.add_output(out);
+ node_ctx.add_parameter<ConvertPolicy>("ConvertPolicy", _policy);
+ node_ctx.add_parameter<uint32_t>("shift", _shift);
+
+ // Get function
+ return OperationRegistry::get().find_operation(OperationType::DepthConvertLayer, _target_hint)->configure(node_ctx);
+}
diff --git a/src/graph/nodes/FlattenLayer.cpp b/src/graph/nodes/FlattenLayer.cpp
new file mode 100644
index 0000000000..1e42bd0cfa
--- /dev/null
+++ b/src/graph/nodes/FlattenLayer.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/FlattenLayer.h"
+
+#include "arm_compute/graph/NodeContext.h"
+#include "arm_compute/graph/OperationRegistry.h"
+#include "support/ToolchainSupport.h"
+
+using namespace arm_compute::graph;
+
+std::unique_ptr<arm_compute::IFunction> FlattenLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
+{
+ ARM_COMPUTE_ERROR_ON(input == nullptr || input->tensor() == nullptr);
+ ARM_COMPUTE_ERROR_ON(output == nullptr || output->tensor() == nullptr);
+
+ _target_hint = ctx.hints().target_hint();
+ arm_compute::ITensor *in = input->tensor();
+ arm_compute::ITensor *out = output->tensor();
+
+ // Auto configure output
+ arm_compute::auto_init_if_empty(*out->info(), in->info()->tensor_shape(), 1, in->info()->data_type(), in->info()->fixed_point_position());
+
+ // Create node context
+ NodeContext node_ctx(OperationType::FlattenLayer);
+ node_ctx.set_target(_target_hint);
+ node_ctx.add_input(in);
+ node_ctx.add_output(out);
+
+ // Get function
+ return OperationRegistry::get().find_operation(OperationType::FlattenLayer, _target_hint)->configure(node_ctx);
+}
diff --git a/src/graph/nodes/QuantizationLayer.cpp b/src/graph/nodes/QuantizationLayer.cpp
new file mode 100644
index 0000000000..8e7dadbc93
--- /dev/null
+++ b/src/graph/nodes/QuantizationLayer.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/QuantizationLayer.h"
+
+#include "arm_compute/graph/NodeContext.h"
+#include "arm_compute/graph/OperationRegistry.h"
+
+using namespace arm_compute::graph;
+
+std::unique_ptr<arm_compute::IFunction> QuantizationLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
+{
+ ARM_COMPUTE_ERROR_ON(input == nullptr || input->tensor() == nullptr);
+ ARM_COMPUTE_ERROR_ON(output == nullptr || output->tensor() == nullptr);
+
+ _target_hint = ctx.hints().target_hint();
+ arm_compute::ITensor *in = input->tensor();
+ arm_compute::ITensor *out = output->tensor();
+
+ // Create node context
+ NodeContext node_ctx(OperationType::QuantizationLayer);
+ node_ctx.set_target(_target_hint);
+ node_ctx.add_input(in);
+ node_ctx.add_output(out);
+
+ // Get function
+ return OperationRegistry::get().find_operation(OperationType::QuantizationLayer, _target_hint)->configure(node_ctx);
+}
diff --git a/src/graph/nodes/ReshapeLayer.cpp b/src/graph/nodes/ReshapeLayer.cpp
new file mode 100644
index 0000000000..3d1a679112
--- /dev/null
+++ b/src/graph/nodes/ReshapeLayer.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/ReshapeLayer.h"
+
+#include "arm_compute/graph/NodeContext.h"
+#include "arm_compute/graph/OperationRegistry.h"
+#include "support/ToolchainSupport.h"
+
+using namespace arm_compute::graph;
+
+ReshapeLayer::ReshapeLayer(TensorShape shape)
+ : _shape(shape)
+{
+}
+
+std::unique_ptr<arm_compute::IFunction> ReshapeLayer::instantiate_node(GraphContext &ctx, ITensorObject *input, ITensorObject *output)
+{
+ ARM_COMPUTE_ERROR_ON(input == nullptr || input->tensor() == nullptr);
+ ARM_COMPUTE_ERROR_ON(output == nullptr || output->tensor() == nullptr);
+
+ _target_hint = ctx.hints().target_hint();
+ arm_compute::ITensor *in = input->tensor();
+ arm_compute::ITensor *out = output->tensor();
+
+ // Auto configure output
+ arm_compute::auto_init_if_empty(*out->info(), _shape, 1, in->info()->data_type(), in->info()->fixed_point_position());
+
+ // Create node context
+ NodeContext node_ctx(OperationType::QuantizationLayer);
+ node_ctx.set_target(_target_hint);
+ node_ctx.add_input(in);
+ node_ctx.add_output(out);
+
+ // Get function
+ return OperationRegistry::get().find_operation(OperationType::QuantizationLayer, _target_hint)->configure(node_ctx);
+}
diff --git a/src/graph/operations/CLSimpleOperations.cpp b/src/graph/operations/CLSimpleOperations.cpp
index b4c217b1a4..a42fada6f3 100644
--- a/src/graph/operations/CLSimpleOperations.cpp
+++ b/src/graph/operations/CLSimpleOperations.cpp
@@ -106,6 +106,90 @@ REGISTER_SIMPLE_OPERATION(CLBatchNormalizationLayerOperation, OPENCL, OperationT
return std::move(batch_norm);
}
+/* DepthConvert Layer */
+REGISTER_SIMPLE_OPERATION(CLDepthConvertLayerOperation, OPENCL, OperationType::DepthConvertLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
+ const auto conv_policy = ctx.parameter<ConvertPolicy>("ConvertPolicy");
+ const auto shift = ctx.parameter<uint32_t>("shift");
+
+ // Create and configure function
+ auto depthconvert = arm_compute::support::cpp14::make_unique<arm_compute::CLDepthConvert>();
+ depthconvert->configure(in, out, conv_policy, shift);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLDepthConvertLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << " shift: " << shift
+ << std::endl);
+
+ return std::move(depthconvert);
+}
+
+/* DeQuantizationLayer Layer */
+REGISTER_SIMPLE_OPERATION(CLDequantizationLayerOperation, OPENCL, OperationType::DequantizationLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 2);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(1)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
+ auto *min_max = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(1));
+
+ // Create and configure function
+ auto dequantization = arm_compute::support::cpp14::make_unique<arm_compute::CLDequantizationLayer>();
+ dequantization->configure(in, out, min_max);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLDequantizationLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << " Min max shape: " << min_max->info()->tensor_shape()
+ << std::endl);
+
+ return std::move(dequantization);
+}
+
+/* Flatten Layer */
+REGISTER_SIMPLE_OPERATION(CLFlattenLayerOperation, OPENCL, OperationType::FlattenLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
+
+ // Create and configure function
+ auto flatten = arm_compute::support::cpp14::make_unique<arm_compute::CLFlattenLayer>();
+ flatten->configure(in, out);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEFlattenLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << std::endl);
+
+ return std::move(flatten);
+}
+
/* Floor Layer */
REGISTER_SIMPLE_OPERATION(CLFloorLayerOperation, OPENCL, OperationType::FloorLayer)
{
@@ -250,6 +334,58 @@ REGISTER_SIMPLE_OPERATION(CLPoolingLayerOperation, OPENCL, OperationType::Poolin
return std::move(pool);
}
+/* Quantization Layer */
+REGISTER_SIMPLE_OPERATION(CLQuantizationLayerOperation, OPENCL, OperationType::QuantizationLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
+
+ // Create and configure function
+ auto quantization = arm_compute::support::cpp14::make_unique<arm_compute::CLQuantizationLayer>();
+ quantization->configure(in, out);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEQuantizationLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << std::endl);
+
+ return std::move(quantization);
+}
+
+/* Reshape Layer */
+REGISTER_SIMPLE_OPERATION(CLReshapeLayerOperation, OPENCL, OperationType::ReshapeLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
+
+ // Create and configure function
+ auto reshape = arm_compute::support::cpp14::make_unique<arm_compute::CLReshapeLayer>();
+ reshape->configure(in, out);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEReshapeLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << std::endl);
+
+ return std::move(reshape);
+}
+
/* Softmax Layer */
REGISTER_SIMPLE_OPERATION(CLSoftmaxLayerOperation, OPENCL, OperationType::SoftmaxLayer)
{
diff --git a/src/graph/operations/NESimpleOperations.cpp b/src/graph/operations/NESimpleOperations.cpp
index 59f252ae44..12f8c6c76b 100644
--- a/src/graph/operations/NESimpleOperations.cpp
+++ b/src/graph/operations/NESimpleOperations.cpp
@@ -106,6 +106,90 @@ REGISTER_SIMPLE_OPERATION(NEBatchNormalizationLayerOperation, NEON, OperationTyp
return std::move(batch_norm);
}
+/* DepthConvert Layer */
+REGISTER_SIMPLE_OPERATION(NEDepthConvertLayerOperation, NEON, OperationType::DepthConvertLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
+ const auto conv_policy = ctx.parameter<ConvertPolicy>("ConvertPolicy");
+ const auto shift = ctx.parameter<uint32_t>("shift");
+
+ // Create and configure function
+ auto depthconvert = arm_compute::support::cpp14::make_unique<arm_compute::NEDepthConvert>();
+ depthconvert->configure(in, out, conv_policy, shift);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEDepthConvertLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << " shift: " << shift
+ << std::endl);
+
+ return std::move(depthconvert);
+}
+
+/* DeQuantizationLayer Layer */
+REGISTER_SIMPLE_OPERATION(NEDequantizationLayerOperation, NEON, OperationType::DequantizationLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 2);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(1)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
+ auto *min_max = dynamic_cast<arm_compute::ITensor *>(ctx.output(1));
+
+ // Create and configure function
+ auto dequantization = arm_compute::support::cpp14::make_unique<arm_compute::NEDequantizationLayer>();
+ dequantization->configure(in, out, min_max);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEDequantizationLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << " Min max shape: " << min_max->info()->tensor_shape()
+ << std::endl);
+
+ return std::move(dequantization);
+}
+
+/* Flatten Layer */
+REGISTER_SIMPLE_OPERATION(NEFlattenLayerOperation, NEON, OperationType::FlattenLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
+
+ // Create and configure function
+ auto flatten = arm_compute::support::cpp14::make_unique<arm_compute::NEFlattenLayer>();
+ flatten->configure(in, out);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEFlattenLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << std::endl);
+
+ return std::move(flatten);
+}
+
/* Floor Layer */
REGISTER_SIMPLE_OPERATION(NEFloorLayerOperation, NEON, OperationType::FloorLayer)
{
@@ -250,6 +334,58 @@ REGISTER_SIMPLE_OPERATION(NEPoolingLayerOperation, NEON, OperationType::PoolingL
return std::move(pool);
}
+/* Quantization Layer */
+REGISTER_SIMPLE_OPERATION(NEQuantizationLayerOperation, NEON, OperationType::QuantizationLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
+
+ // Create and configure function
+ auto quantization = arm_compute::support::cpp14::make_unique<arm_compute::NEQuantizationLayer>();
+ quantization->configure(in, out);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEQuantizationLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << std::endl);
+
+ return std::move(quantization);
+}
+
+/* Reshape Layer */
+REGISTER_SIMPLE_OPERATION(NEReshapeLayerOperation, NEON, OperationType::ReshapeLayer)
+{
+ ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
+ ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.input(0)) == nullptr);
+ ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ITensor *>(ctx.output(0)) == nullptr);
+
+ // Extract IO and info
+ auto *in = dynamic_cast<arm_compute::ITensor *>(ctx.input(0));
+ auto *out = dynamic_cast<arm_compute::ITensor *>(ctx.output(0));
+
+ // Create and configure function
+ auto reshape = arm_compute::support::cpp14::make_unique<arm_compute::NEReshapeLayer>();
+ reshape->configure(in, out);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating NEReshapeLayer"
+ << " Data Type: " << in->info()->data_type()
+ << " Input shape: " << in->info()->tensor_shape()
+ << " Output shape: " << out->info()->tensor_shape()
+ << std::endl);
+
+ return std::move(reshape);
+}
+
/* Softmax Layer */
REGISTER_SIMPLE_OPERATION(NESoftmaxLayerOperation, NEON, OperationType::SoftmaxLayer)
{