aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/core/Types.h3
-rw-r--r--arm_compute/core/utils/misc/ShapeCalculator.h4
-rw-r--r--arm_compute/graph/GraphBuilder.h11
-rw-r--r--arm_compute/graph/INodeVisitor.h9
-rw-r--r--arm_compute/graph/TypePrinter.h3
-rw-r--r--arm_compute/graph/Types.h2
-rw-r--r--arm_compute/graph/backends/FunctionHelpers.h184
-rw-r--r--arm_compute/graph/backends/ValidateHelpers.h23
-rw-r--r--arm_compute/graph/frontend/Layers.h54
-rw-r--r--arm_compute/graph/nodes/Nodes.h1
-rw-r--r--arm_compute/graph/nodes/NodesFwd.h1
-rw-r--r--arm_compute/graph/nodes/PriorBoxLayerNode.h67
-rwxr-xr-xscripts/caffe_data_extractor.py2
-rw-r--r--src/graph/GraphBuilder.cpp15
-rw-r--r--src/graph/backends/CL/CLFunctionsFactory.cpp2
-rw-r--r--src/graph/backends/CL/CLNodeValidator.cpp2
-rw-r--r--src/graph/backends/GLES/GCFunctionsFactory.cpp21
-rw-r--r--src/graph/backends/GLES/GCNodeValidator.cpp2
-rw-r--r--src/graph/backends/NEON/NEFunctionFactory.cpp10
-rw-r--r--src/graph/backends/NEON/NENodeValidator.cpp2
-rw-r--r--src/graph/detail/ExecutionHelpers.cpp3
-rw-r--r--src/graph/nodes/FlattenLayerNode.cpp2
-rw-r--r--src/graph/nodes/PriorBoxLayerNode.cpp95
-rw-r--r--utils/GraphUtils.cpp6
-rw-r--r--utils/GraphUtils.h6
-rw-r--r--utils/Utils.h8
26 files changed, 461 insertions, 77 deletions
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 38094ee56a..ef25dc4150 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -857,7 +857,8 @@ public:
* @param[in] steps (Optional) Step values.
*/
PriorBoxLayerInfo(const std::vector<float> &min_sizes, const std::vector<float> &variances, float offset, bool flip = true, bool clip = false,
- const std::vector<float> &max_sizes = {}, const std::vector<float> &aspect_ratios = {}, const Coordinates2D &img_size = Coordinates2D{ 0, 0 }, const std::array<float, 2> &steps = { { 0.f, 0.f } })
+ const std::vector<float> &max_sizes = {}, const std::vector<float> &aspect_ratios = {},
+ const Coordinates2D &img_size = Coordinates2D{ 0, 0 }, const std::array<float, 2> &steps = { { 0.f, 0.f } })
: _min_sizes(min_sizes),
_variances(variances),
_offset(offset),
diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h
index 5c9457ed6b..da9ff56fd0 100644
--- a/arm_compute/core/utils/misc/ShapeCalculator.h
+++ b/arm_compute/core/utils/misc/ShapeCalculator.h
@@ -646,13 +646,13 @@ inline TensorShape compute_space_to_batch_shape(const ITensorInfo *input, const
return output_shape;
}
+
inline TensorShape compute_prior_box_shape(const ITensorInfo &input, const PriorBoxLayerInfo &info)
{
DataLayout data_layout = input.data_layout();
const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
-
- const int num_priors = info.aspect_ratios().size() * info.min_sizes().size() + info.max_sizes().size();
+ const int num_priors = info.aspect_ratios().size() * info.min_sizes().size() + info.max_sizes().size();
TensorShape output_shape{};
output_shape.set(0, input.dimension(idx_w) * input.dimension(idx_h) * num_priors * 4);
diff --git a/arm_compute/graph/GraphBuilder.h b/arm_compute/graph/GraphBuilder.h
index 22fc041684..57ce349984 100644
--- a/arm_compute/graph/GraphBuilder.h
+++ b/arm_compute/graph/GraphBuilder.h
@@ -308,6 +308,17 @@ public:
* @return Node ID of the created node, EmptyNodeID in case of error
*/
static NodeID add_pooling_node(Graph &g, NodeParams params, NodeIdxPair input, PoolingLayerInfo pool_info);
+ /** Adds a priorbox layer node to the graph
+ *
+ * @param[in] g Graph to add the node to
+ * @param[in] params Common node parameters
+ * @param[in] input0 First input to the priorbox layer node as a NodeID-Index pair
+ * @param[in] input1 Second input to the priorbox layer node as a NodeID-Index pair
+ * @param[in] prior_info PriorBox parameters
+ *
+ * @return Node ID of the created node, EmptyNodeID in case of error
+ */
+ static NodeID add_priorbox_node(Graph &g, NodeParams params, NodeIdxPair input0, NodeIdxPair input1, PriorBoxLayerInfo prior_info);
/** Adds a reorg layer node to the graph
*
* @param[in] g Graph to add the node to
diff --git a/arm_compute/graph/INodeVisitor.h b/arm_compute/graph/INodeVisitor.h
index a21c9b2d48..2df2574d62 100644
--- a/arm_compute/graph/INodeVisitor.h
+++ b/arm_compute/graph/INodeVisitor.h
@@ -111,6 +111,11 @@ public:
* @param[in] n Node to visit.
*/
virtual void visit(PoolingLayerNode &n) = 0;
+ /** Visit PriorBoxLayerNode.
+ *
+ * @param[in] n Node to visit.
+ */
+ virtual void visit(PriorBoxLayerNode &n) = 0;
/** Visit ReshapeLayerNode.
*
* @param[in] n Node to visit.
@@ -201,6 +206,10 @@ public:
{
default_visit();
}
+ virtual void visit(PriorBoxLayerNode &n) override
+ {
+ default_visit();
+ }
virtual void visit(ReshapeLayerNode &n) override
{
default_visit();
diff --git a/arm_compute/graph/TypePrinter.h b/arm_compute/graph/TypePrinter.h
index c66f9cb374..d633091d16 100644
--- a/arm_compute/graph/TypePrinter.h
+++ b/arm_compute/graph/TypePrinter.h
@@ -113,6 +113,9 @@ inline ::std::ostream &operator<<(::std::ostream &os, const NodeType &node_type)
case NodeType::PoolingLayer:
os << "PoolingLayer";
break;
+ case NodeType::PriorBoxLayer:
+ os << "PriorBoxLayer";
+ break;
case NodeType::ReorgLayer:
os << "ReorgLayer";
break;
diff --git a/arm_compute/graph/Types.h b/arm_compute/graph/Types.h
index 24c24d328f..b6803c89bc 100644
--- a/arm_compute/graph/Types.h
+++ b/arm_compute/graph/Types.h
@@ -51,6 +51,7 @@ using arm_compute::FullyConnectedLayerInfo;
using arm_compute::PadStrideInfo;
using arm_compute::PoolingLayerInfo;
using arm_compute::PoolingType;
+using arm_compute::PriorBoxLayerInfo;
using arm_compute::DimensionRoundingType;
using arm_compute::InterpolationPolicy;
@@ -141,6 +142,7 @@ enum class NodeType
PadLayer,
PermuteLayer,
PoolingLayer,
+ PriorBoxLayer,
ReorgLayer,
ReshapeLayer,
ResizeLayer,
diff --git a/arm_compute/graph/backends/FunctionHelpers.h b/arm_compute/graph/backends/FunctionHelpers.h
index e556e2f284..0d7210f7f8 100644
--- a/arm_compute/graph/backends/FunctionHelpers.h
+++ b/arm_compute/graph/backends/FunctionHelpers.h
@@ -72,9 +72,9 @@ template <typename TargetInfo>
void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
{
ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
- << " Target : " << TargetInfo::TargetType
- << " ID : " << node.id()
- << " Name: " << node.name()
+ << " Target: " << TargetInfo::TargetType
+ << " ID: " << node.id()
+ << node.name()
<< std::endl);
ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
@@ -105,7 +105,9 @@ std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
auto func = support::cpp14::make_unique<ActivationLayerFunction>();
func->configure(input, output, act_info);
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
<< " Target " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Shape: " << input->info()->tensor_shape()
@@ -147,13 +149,15 @@ std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLa
func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Shape: " << input->info()->tensor_shape()
<< " Epsilon: " << epsilon << " "
<< (fused_act.enabled() ? to_string(fused_act.activation()) : "")
- << " InPlace : " << is_in_place_operation(input, output)
+ << " InPlace: " << is_in_place_operation(input, output)
<< std::endl);
return std::move(func);
@@ -218,8 +222,10 @@ std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode
auto func = support::cpp14::make_unique<ChannelShuffleLayerFunction>();
func->configure(input, output, num_groups);
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Shape: " << input->info()->tensor_shape()
<< " Num groups: " << num_groups
@@ -263,8 +269,10 @@ std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLaye
func->configure(inputs, output, concat_axis);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << output->info()->data_type()
<< " Shape: " << output->info()->tensor_shape()
<< " Num Inputs: " << inputs.size()
@@ -350,8 +358,10 @@ std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node,
<< " Weights QuantInfo: " << weights->info()->quantization_info()
<< " Output QuantInfo: " << output->info()->quantization_info();
}
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << func_name
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Groups: " << num_groups
<< qss.str()
@@ -396,8 +406,10 @@ std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &no
input, weights, biases, output, deconv_info, inner_border.x(), inner_border.y());
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Input shape: " << input->info()->tensor_shape()
<< " Weights shape: " << weights->info()->tensor_shape()
@@ -462,8 +474,10 @@ std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvoluti
<< " Weights QuantInfo: " << weights->info()->quantization_info()
<< " Output QuantInfo: " << output->info()->quantization_info();
}
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << func_name
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< qss.str()
<< " Input shape: " << input->info()->tensor_shape()
@@ -524,11 +538,13 @@ std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
}
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
- << " Operation " << func_name
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
+ << " Operation: " << func_name
<< " Data Type: " << input1->info()->data_type()
- << " Shape : " << input1->info()->tensor_shape()
+ << " Shape: " << input1->info()->tensor_shape()
<< std::endl);
return func;
@@ -560,8 +576,10 @@ std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
func->configure(input, output);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Input shape: " << input->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
@@ -610,8 +628,10 @@ std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode
<< " Weights QuantInfo: " << weights->info()->quantization_info()
<< " Output QuantInfo: " << output->info()->quantization_info();
}
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< qss.str()
<< " Input shape: " << input->info()->tensor_shape()
@@ -700,8 +720,10 @@ std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &no
func->configure(input, output, norm_info);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Input shape: " << input->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
@@ -740,8 +762,10 @@ std::unique_ptr<IFunction> create_normalize_planar_yuv_layer(NormalizePlanarYUVL
func->configure(input, output, mean, std);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Shape: " << input->info()->tensor_shape()
<< std::endl);
@@ -775,8 +799,10 @@ std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
func->configure(input, output, padding);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Input shape: " << input->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
@@ -811,8 +837,10 @@ std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
func->configure(input, output, perm);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Input shape: " << input->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
@@ -848,8 +876,10 @@ std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
func->configure(input, output, pool_info);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Input shape: " << input->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
@@ -859,6 +889,48 @@ std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
return std::move(func);
}
+/** Create a backend priorbox layer function
+ *
+ * @tparam PriorBoxLayerFunction Backend priorbox function
+ * @tparam TargetInfo Target-specific information
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend priorbox layer function
+ */
+template <typename PriorBoxLayerFunction, typename TargetInfo>
+std::unique_ptr<IFunction> create_priorbox_layer(PriorBoxLayerNode &node)
+{
+ validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
+
+ // Extract IO and info
+ typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
+ typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
+ typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
+ const PriorBoxLayerInfo prior_info = node.priorbox_info();
+ ARM_COMPUTE_ERROR_ON(input0 == nullptr);
+ ARM_COMPUTE_ERROR_ON(input1 == nullptr);
+ ARM_COMPUTE_ERROR_ON(output == nullptr);
+
+ // Create and configure function
+ auto func = support::cpp14::make_unique<PriorBoxLayerFunction>();
+ func->configure(input0, input1, output, prior_info);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
+ << " Data Type: " << input0->info()->data_type()
+ << " Input0 shape: " << input0->info()->tensor_shape()
+ << " Input1 shape: " << input1->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape()
+ << " PriorBoxLayer info: " << prior_info
+ << std::endl);
+
+ return std::move(func);
+}
+
/** Create a backend reorg layer function
*
* @tparam ReorgLayerFunction Backend reorg function
@@ -884,8 +956,10 @@ std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
func->configure(input, output, node.stride());
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Input shape: " << input->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
@@ -919,8 +993,10 @@ std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
func->configure(input, output);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Input shape: " << input->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
@@ -955,8 +1031,10 @@ std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
func->configure(input, output, policy, BorderMode::CONSTANT);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Input shape: " << input->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
@@ -1034,8 +1112,10 @@ std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
func->configure(input, output, node.starts(), node.ends());
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Input shape: " << input->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
@@ -1071,8 +1151,10 @@ std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphCon
func->configure(input, output, beta);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Input shape: " << input->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
@@ -1110,8 +1192,10 @@ std::unique_ptr<IFunction> create_upsample_layer(UpsampleLayerNode &node, GraphC
func->configure(input, output, info, upsampling_policy);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Input shape: " << input->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
@@ -1150,8 +1234,10 @@ std::unique_ptr<IFunction> create_yolo_layer(YOLOLayerNode &node, GraphContext &
func->configure(input, output, act_info, num_classes);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << TargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Input shape: " << input->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
diff --git a/arm_compute/graph/backends/ValidateHelpers.h b/arm_compute/graph/backends/ValidateHelpers.h
index 7c31a80967..a6864c2286 100644
--- a/arm_compute/graph/backends/ValidateHelpers.h
+++ b/arm_compute/graph/backends/ValidateHelpers.h
@@ -300,6 +300,29 @@ Status validate_permute_layer(PermuteLayerNode &node)
return PermuteLayer::validate(input, output, perm);
}
+/** Validates a priorbox layer node
+ *
+ * @tparam PriorBoxLayer PriorBox layer type
+ *
+ * @param[in] node Node to validate
+ *
+ * @return Status
+ */
+template <typename PriorBoxLayer>
+Status validate_priorbox_layer(PriorBoxLayerNode &node)
+{
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating PriorBoxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 2);
+ ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 1);
+
+ // Extract IO and info
+ arm_compute::ITensorInfo *input0 = get_backing_tensor_info(node.input(0));
+ arm_compute::ITensorInfo *input1 = get_backing_tensor_info(node.input(1));
+ arm_compute::ITensorInfo *output = get_backing_tensor_info(node.output(0));
+ const PriorBoxLayerInfo prior_info = node.priorbox_info();
+
+ return PriorBoxLayer::validate(input0, input1, output, prior_info);
+}
/** Validates a Reorg layer node
*
diff --git a/arm_compute/graph/frontend/Layers.h b/arm_compute/graph/frontend/Layers.h
index 7ed448e3f2..78a3f20f1f 100644
--- a/arm_compute/graph/frontend/Layers.h
+++ b/arm_compute/graph/frontend/Layers.h
@@ -225,7 +225,27 @@ public:
*/
template <typename... Ts>
ConcatLayer(SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&... rest_sub_streams)
- : _sub_streams()
+ : _sub_streams(), _axis(DataLayoutDimension::CHANNEL)
+ {
+ _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream1)));
+ _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream2)));
+
+ utility::for_each([&](SubStream && sub_stream)
+ {
+ _sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream)));
+ },
+ std::move(rest_sub_streams)...);
+ }
+ /** Construct a concatenation layer
+ *
+ * @param[in] axis Axis over the concatenation will be performed
+ * @param[in] sub_stream1 First graph branch
+ * @param[in] sub_stream2 Second graph branch
+ * @param[in] rest_sub_streams Rest sub-graph branches
+ */
+ template <typename... Ts>
+ ConcatLayer(DataLayoutDimension axis, SubStream &&sub_stream1, SubStream &&sub_stream2, Ts &&... rest_sub_streams)
+ : _sub_streams(), _axis(axis)
{
_sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream1)));
_sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream2)));
@@ -242,7 +262,7 @@ public:
*/
template <typename... Ts>
ConcatLayer(SubStream &&sub_stream)
- : _sub_streams()
+ : _sub_streams(), _axis(DataLayoutDimension::CHANNEL)
{
_sub_streams.push_back(arm_compute::support::cpp14::make_unique<SubStream>(std::move(sub_stream)));
}
@@ -269,13 +289,14 @@ public:
}
}
}
- nid = GraphBuilder::add_concatenate_node(s.graph(), common_params, nodes, DataLayoutDimension::CHANNEL);
+ nid = GraphBuilder::add_concatenate_node(s.graph(), common_params, nodes, _axis);
}
return nid;
}
private:
std::vector<std::unique_ptr<SubStream>> _sub_streams;
+ DataLayoutDimension _axis;
};
/** Convolution Layer */
@@ -724,6 +745,33 @@ private:
PoolingLayerInfo _pool_info;
};
+/** PriorBox Layer */
+class PriorBoxLayer final : public ILayer
+{
+public:
+ /** Construct a priorbox layer.
+ *
+ * @param[in] sub_stream First graph sub-stream
+ * @param[in] prior_info PriorBox parameters.
+ */
+ PriorBoxLayer(SubStream &&sub_stream, PriorBoxLayerInfo prior_info)
+ : _ss(std::move(sub_stream)), _prior_info(prior_info)
+ {
+ }
+
+ NodeID create_layer(IStream &s) override
+ {
+ NodeParams common_params = { name(), s.hints().target_hint };
+ NodeIdxPair input0 = { s.tail_node(), 0 };
+ NodeIdxPair input1 = { _ss.tail_node(), 0 };
+ return GraphBuilder::add_priorbox_node(s.graph(), common_params, input0, input1, _prior_info);
+ }
+
+private:
+ SubStream _ss;
+ PriorBoxLayerInfo _prior_info;
+};
+
/** Reorg Layer */
class ReorgLayer final : public ILayer
{
diff --git a/arm_compute/graph/nodes/Nodes.h b/arm_compute/graph/nodes/Nodes.h
index 342ecbfd3b..5c7599fbfd 100644
--- a/arm_compute/graph/nodes/Nodes.h
+++ b/arm_compute/graph/nodes/Nodes.h
@@ -45,6 +45,7 @@
#include "arm_compute/graph/nodes/PadLayerNode.h"
#include "arm_compute/graph/nodes/PermuteLayerNode.h"
#include "arm_compute/graph/nodes/PoolingLayerNode.h"
+#include "arm_compute/graph/nodes/PriorBoxLayerNode.h"
#include "arm_compute/graph/nodes/ROIAlignLayerNode.h"
#include "arm_compute/graph/nodes/ReorgLayerNode.h"
#include "arm_compute/graph/nodes/ReshapeLayerNode.h"
diff --git a/arm_compute/graph/nodes/NodesFwd.h b/arm_compute/graph/nodes/NodesFwd.h
index 8d9bad3771..f956b54c66 100644
--- a/arm_compute/graph/nodes/NodesFwd.h
+++ b/arm_compute/graph/nodes/NodesFwd.h
@@ -51,6 +51,7 @@ class OutputNode;
class PadLayerNode;
class PermuteLayerNode;
class PoolingLayerNode;
+class PriorBoxLayerNode;
class ReorgLayerNode;
class ReshapeLayerNode;
class ResizeLayerNode;
diff --git a/arm_compute/graph/nodes/PriorBoxLayerNode.h b/arm_compute/graph/nodes/PriorBoxLayerNode.h
new file mode 100644
index 0000000000..901fa0817a
--- /dev/null
+++ b/arm_compute/graph/nodes/PriorBoxLayerNode.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH_PRIORBOX_LAYER_NODE_H__
+#define __ARM_COMPUTE_GRAPH_PRIORBOX_LAYER_NODE_H__
+
+#include "arm_compute/graph/INode.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+/** PriorBox Layer node */
+class PriorBoxLayerNode final : public INode
+{
+public:
+ /** Constructor
+ *
+ * @param[in] prior_info PriorBox Layer information
+ */
+ PriorBoxLayerNode(PriorBoxLayerInfo prior_info);
+ /** PriorBox metadata accessor
+ *
+ * @return PriorBox Layer info
+ */
+ PriorBoxLayerInfo priorbox_info() const;
+ /** Computes priorbox output descriptor
+ *
+ * @param[in] input_descriptor Input descriptor
+ * @param[in] info PriorBox operation attributes
+ *
+ * @return Output descriptor
+ */
+ static TensorDescriptor compute_output_descriptor(const TensorDescriptor &input_descriptor, const PriorBoxLayerInfo &info);
+
+ // Inherited overridden methods:
+ NodeType type() const override;
+ bool forward_descriptors() override;
+ TensorDescriptor configure_output(size_t idx) const override;
+ void accept(INodeVisitor &v) override;
+
+private:
+ PriorBoxLayerInfo _info;
+};
+} // namespace graph
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH_PRIORBOX_LAYER_NODE_H__ */
diff --git a/scripts/caffe_data_extractor.py b/scripts/caffe_data_extractor.py
index 65c9938480..47d24b265f 100755
--- a/scripts/caffe_data_extractor.py
+++ b/scripts/caffe_data_extractor.py
@@ -34,7 +34,7 @@ if __name__ == "__main__":
elif i == 1:
outname = name + "_b"
else:
- pass
+ continue
varname = outname
if os.path.sep in varname:
diff --git a/src/graph/GraphBuilder.cpp b/src/graph/GraphBuilder.cpp
index b4c58780bd..b2ca28da57 100644
--- a/src/graph/GraphBuilder.cpp
+++ b/src/graph/GraphBuilder.cpp
@@ -494,6 +494,21 @@ NodeID GraphBuilder::add_pooling_node(Graph &g, NodeParams params, NodeIdxPair i
return create_simple_single_input_output_node<PoolingLayerNode>(g, params, input, pool_info);
}
+NodeID GraphBuilder::add_priorbox_node(Graph &g, NodeParams params, NodeIdxPair input0, NodeIdxPair input1, PriorBoxLayerInfo prior_info)
+{
+ CHECK_NODEIDX_PAIR(input0, g);
+ CHECK_NODEIDX_PAIR(input1, g);
+
+ // Create priorbox node and connect
+ NodeID prior_nid = g.add_node<PriorBoxLayerNode>(prior_info);
+ g.add_connection(input0.node_id, input0.index, prior_nid, 0);
+ g.add_connection(input1.node_id, input1.index, prior_nid, 1);
+
+ set_node_params(g, prior_nid, params);
+
+ return prior_nid;
+}
+
NodeID GraphBuilder::add_reorg_node(Graph &g, NodeParams params, NodeIdxPair input, int stride)
{
return create_simple_single_input_output_node<ReorgLayerNode>(g, params, input, stride);
diff --git a/src/graph/backends/CL/CLFunctionsFactory.cpp b/src/graph/backends/CL/CLFunctionsFactory.cpp
index f63aba9ec5..c37a137cf7 100644
--- a/src/graph/backends/CL/CLFunctionsFactory.cpp
+++ b/src/graph/backends/CL/CLFunctionsFactory.cpp
@@ -113,6 +113,8 @@ std::unique_ptr<IFunction> CLFunctionFactory::create(INode *node, GraphContext &
return detail::create_permute_layer<CLPermute, CLTargetInfo>(*polymorphic_downcast<PermuteLayerNode *>(node));
case NodeType::PoolingLayer:
return detail::create_pooling_layer<CLPoolingLayer, CLTargetInfo>(*polymorphic_downcast<PoolingLayerNode *>(node));
+ case NodeType::PriorBoxLayer:
+ return detail::create_priorbox_layer<CLPriorBoxLayer, CLTargetInfo>(*polymorphic_downcast<PriorBoxLayerNode *>(node));
case NodeType::ReorgLayer:
return detail::create_reorg_layer<CLReorgLayer, CLTargetInfo>(*polymorphic_downcast<ReorgLayerNode *>(node));
case NodeType::ReshapeLayer:
diff --git a/src/graph/backends/CL/CLNodeValidator.cpp b/src/graph/backends/CL/CLNodeValidator.cpp
index 1ea3517467..a070973fd4 100644
--- a/src/graph/backends/CL/CLNodeValidator.cpp
+++ b/src/graph/backends/CL/CLNodeValidator.cpp
@@ -67,6 +67,8 @@ Status CLNodeValidator::validate(INode *node)
return detail::validate_pad_layer<CLPadLayer>(*polymorphic_downcast<PadLayerNode *>(node));
case NodeType::PermuteLayer:
return detail::validate_permute_layer<CLPermute>(*polymorphic_downcast<PermuteLayerNode *>(node));
+ case NodeType::PriorBoxLayer:
+ return detail::validate_priorbox_layer<CLPriorBoxLayer>(*polymorphic_downcast<PriorBoxLayerNode *>(node));
case NodeType::ReorgLayer:
return detail::validate_reorg_layer<CLReorgLayer>(*polymorphic_downcast<ReorgLayerNode *>(node));
case NodeType::ROIAlignLayer:
diff --git a/src/graph/backends/GLES/GCFunctionsFactory.cpp b/src/graph/backends/GLES/GCFunctionsFactory.cpp
index 7df659e7b3..2ca453ebde 100644
--- a/src/graph/backends/GLES/GCFunctionsFactory.cpp
+++ b/src/graph/backends/GLES/GCFunctionsFactory.cpp
@@ -94,7 +94,8 @@ std::unique_ptr<IFunction> create_concatenate_layer<GCDepthConcatenateLayer, GCT
func->configure(inputs, output);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
<< " Target " << GCTargetInfo::TargetType
<< " Data Type: " << output->info()->data_type()
<< " Shape: " << output->info()->tensor_shape()
@@ -143,7 +144,9 @@ std::unique_ptr<IFunction> create_convolution_layer<GCConvolutionLayerFunctions,
}
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << func_name
<< " Data Type: " << input->info()->data_type()
<< " Input QuantInfo: " << input->info()->quantization_info()
<< " Weights QuantInfo: " << weights->info()->quantization_info()
@@ -191,7 +194,9 @@ std::unique_ptr<IFunction> create_depthwise_convolution_layer<GCDepthwiseConvolu
}
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << func_name
<< " Target " << GCTargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Input QuantInfo: " << input->info()->quantization_info()
@@ -246,11 +251,13 @@ std::unique_ptr<IFunction> create_eltwise_layer<GCEltwiseFunctions, GCTargetInfo
}
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << GCTargetInfo::TargetType
- << " Operation " << func_name
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << GCTargetInfo::TargetType
+ << " Operation: " << func_name
<< " Data Type: " << input1->info()->data_type()
- << " Shape : " << input1->info()->tensor_shape()
+ << " Shape: " << input1->info()->tensor_shape()
<< std::endl);
return func;
diff --git a/src/graph/backends/GLES/GCNodeValidator.cpp b/src/graph/backends/GLES/GCNodeValidator.cpp
index 9cf39c6675..fe69c7a9ee 100644
--- a/src/graph/backends/GLES/GCNodeValidator.cpp
+++ b/src/graph/backends/GLES/GCNodeValidator.cpp
@@ -121,6 +121,8 @@ Status GCNodeValidator::validate(INode *node)
return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported operation : PadLayer");
case NodeType::PermuteLayer:
return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported operation : PermuteLayer");
+ case NodeType::PriorBoxLayer:
+ return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported operation : PriorBoxLayer");
case NodeType::ReorgLayer:
return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported operation : ReorgLayer");
case NodeType::ReshapeLayer:
diff --git a/src/graph/backends/NEON/NEFunctionFactory.cpp b/src/graph/backends/NEON/NEFunctionFactory.cpp
index f03cead2b4..ca8d485f8b 100644
--- a/src/graph/backends/NEON/NEFunctionFactory.cpp
+++ b/src/graph/backends/NEON/NEFunctionFactory.cpp
@@ -163,8 +163,10 @@ std::unique_ptr<IFunction> create_normalization_layer<NENormalizationLayer, NETa
func->configure(input, output, norm_info);
// Log info
- ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
- << " Target " << NETargetInfo::TargetType
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << NETargetInfo::TargetType
<< " Data Type: " << input->info()->data_type()
<< " Input shape: " << input->info()->tensor_shape()
<< " Output shape: " << output->info()->tensor_shape()
@@ -211,6 +213,8 @@ std::unique_ptr<IFunction> NEFunctionFactory::create(INode *node, GraphContext &
return detail::create_permute_layer<NEPermute, NETargetInfo>(*polymorphic_downcast<PermuteLayerNode *>(node));
case NodeType::PoolingLayer:
return detail::create_pooling_layer<NEPoolingLayer, NETargetInfo>(*polymorphic_downcast<PoolingLayerNode *>(node));
+ case NodeType::PriorBoxLayer:
+ return detail::create_priorbox_layer<NEPriorBoxLayer, NETargetInfo>(*polymorphic_downcast<PriorBoxLayerNode *>(node));
case NodeType::ReorgLayer:
return detail::create_reorg_layer<NEReorgLayer, NETargetInfo>(*polymorphic_downcast<ReorgLayerNode *>(node));
case NodeType::ReshapeLayer:
@@ -229,4 +233,4 @@ std::unique_ptr<IFunction> NEFunctionFactory::create(INode *node, GraphContext &
}
} // namespace backends
} // namespace graph
-} // namespace arm_compute \ No newline at end of file
+} // namespace arm_compute
diff --git a/src/graph/backends/NEON/NENodeValidator.cpp b/src/graph/backends/NEON/NENodeValidator.cpp
index f2131586b2..a2abc8330c 100644
--- a/src/graph/backends/NEON/NENodeValidator.cpp
+++ b/src/graph/backends/NEON/NENodeValidator.cpp
@@ -67,6 +67,8 @@ Status NENodeValidator::validate(INode *node)
return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported operation : PadLayer");
case NodeType::PermuteLayer:
return detail::validate_permute_layer<NEPermute>(*polymorphic_downcast<PermuteLayerNode *>(node));
+ case NodeType::PriorBoxLayer:
+ return detail::validate_priorbox_layer<NEPriorBoxLayer>(*polymorphic_downcast<PriorBoxLayerNode *>(node));
case NodeType::ReorgLayer:
return detail::validate_reorg_layer<NEReorgLayer>(*polymorphic_downcast<ReorgLayerNode *>(node));
case NodeType::ROIAlignLayer:
diff --git a/src/graph/detail/ExecutionHelpers.cpp b/src/graph/detail/ExecutionHelpers.cpp
index f479963280..f2c381b7df 100644
--- a/src/graph/detail/ExecutionHelpers.cpp
+++ b/src/graph/detail/ExecutionHelpers.cpp
@@ -254,7 +254,8 @@ bool call_all_output_node_accessors(ExecutionWorkload &workload)
bool is_valid = true;
std::for_each(std::begin(workload.outputs), std::end(workload.outputs), [&](Tensor * output_tensor)
{
- is_valid = is_valid && (output_tensor != nullptr) && output_tensor->call_accessor();
+ bool valid_output = (output_tensor != nullptr) && output_tensor->call_accessor();
+ is_valid = is_valid && valid_output;
});
return is_valid;
diff --git a/src/graph/nodes/FlattenLayerNode.cpp b/src/graph/nodes/FlattenLayerNode.cpp
index 78b45dc305..baae555247 100644
--- a/src/graph/nodes/FlattenLayerNode.cpp
+++ b/src/graph/nodes/FlattenLayerNode.cpp
@@ -57,7 +57,7 @@ TensorDescriptor FlattenLayerNode::configure_output(size_t idx) const
ARM_COMPUTE_ERROR_ON(src == nullptr);
TensorDescriptor output_desc = src->desc();
- output_desc.shape.collapse(src->desc().shape.num_dimensions());
+ output_desc.shape.collapse(3);
return output_desc;
}
diff --git a/src/graph/nodes/PriorBoxLayerNode.cpp b/src/graph/nodes/PriorBoxLayerNode.cpp
new file mode 100644
index 0000000000..edb1fba255
--- /dev/null
+++ b/src/graph/nodes/PriorBoxLayerNode.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/PriorBoxLayerNode.h"
+
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
+#include "arm_compute/graph/Utils.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+PriorBoxLayerNode::PriorBoxLayerNode(PriorBoxLayerInfo prior_info)
+ : _info(std::move(prior_info))
+{
+ _input_edges.resize(2, EmptyEdgeID);
+ _outputs.resize(1, NullTensorID);
+}
+
+PriorBoxLayerInfo PriorBoxLayerNode::priorbox_info() const
+{
+ return _info;
+}
+
+TensorDescriptor PriorBoxLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor,
+ const PriorBoxLayerInfo &info)
+{
+ const unsigned int layer_width = get_dimension_size(input_descriptor, DataLayoutDimension::WIDTH);
+ const unsigned int layer_height = get_dimension_size(input_descriptor, DataLayoutDimension::HEIGHT);
+ const unsigned int num_priors = info.aspect_ratios().size() * info.min_sizes().size() + info.max_sizes().size();
+
+ TensorDescriptor output_descriptor = input_descriptor;
+ output_descriptor.shape.set(0, layer_width * layer_height * num_priors * 4);
+ output_descriptor.shape.set(1, 2);
+ output_descriptor.shape.set(2, 1);
+
+ return output_descriptor;
+}
+
+bool PriorBoxLayerNode::forward_descriptors()
+{
+ if((input_id(0) != NullTensorID) && (output_id(0) != NullTensorID))
+ {
+ Tensor *dst = output(0);
+ ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ dst->desc() = configure_output(0);
+ return true;
+ }
+ return false;
+}
+
+TensorDescriptor PriorBoxLayerNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+ const Tensor *input0 = input(0);
+ ARM_COMPUTE_ERROR_ON(input0 == nullptr);
+
+ return compute_output_descriptor(input0->desc(), _info);
+}
+
+NodeType PriorBoxLayerNode::type() const
+{
+ return NodeType::PriorBoxLayer;
+}
+
+void PriorBoxLayerNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
+}
+} // namespace graph
+} // namespace arm_compute
diff --git a/utils/GraphUtils.cpp b/utils/GraphUtils.cpp
index 19fba4c0bf..2f1df7aef2 100644
--- a/utils/GraphUtils.cpp
+++ b/utils/GraphUtils.cpp
@@ -77,8 +77,8 @@ void TFPreproccessor::preprocess(ITensor &tensor)
});
}
-CaffePreproccessor::CaffePreproccessor(std::array<float, 3> mean, bool bgr)
- : _mean(mean), _bgr(bgr)
+CaffePreproccessor::CaffePreproccessor(std::array<float, 3> mean, float scale, bool bgr)
+ : _mean(mean), _scale(scale), _bgr(bgr)
{
if(_bgr)
{
@@ -96,7 +96,7 @@ void CaffePreproccessor::preprocess(ITensor &tensor)
execute_window_loop(window, [&](const Coordinates & id)
{
const float value = *reinterpret_cast<float *>(tensor.ptr_to_element(id)) - _mean[id[channel_idx]];
- *reinterpret_cast<float *>(tensor.ptr_to_element(id)) = value;
+ *reinterpret_cast<float *>(tensor.ptr_to_element(id)) = value * _scale;
});
}
diff --git a/utils/GraphUtils.h b/utils/GraphUtils.h
index a6d670d761..d7f24afdd8 100644
--- a/utils/GraphUtils.h
+++ b/utils/GraphUtils.h
@@ -63,14 +63,16 @@ public:
/** Default Constructor
*
* @param mean Mean array in RGB ordering
+ * @param scale Scale value
* @param bgr Boolean specifying if the preprocessing should assume BGR format
*/
- CaffePreproccessor(std::array<float, 3> mean = std::array<float, 3> { { 0, 0, 0 } }, bool bgr = true);
+ CaffePreproccessor(std::array<float, 3> mean = std::array<float, 3> { { 0, 0, 0 } }, float scale = 1.f, bool bgr = true);
void preprocess(ITensor &tensor) override;
private:
std::array<float, 3> _mean;
- bool _bgr;
+ float _scale;
+ bool _bgr;
};
/** TF preproccessor */
diff --git a/utils/Utils.h b/utils/Utils.h
index 92ab1a30b9..8cac857178 100644
--- a/utils/Utils.h
+++ b/utils/Utils.h
@@ -607,7 +607,6 @@ void save_to_ppm(T &tensor, const std::string &ppm_filename)
/** Template helper function to save a tensor image to a NPY file.
*
* @note Only F32 data type supported.
- * @note Only works with 2D tensors.
* @note If the input tensor is a CLTensor, the function maps and unmaps the image
*
* @param[in] tensor The tensor to save as NPY file
@@ -627,9 +626,9 @@ void save_to_npy(T &tensor, const std::string &npy_filename, bool fortran_order)
std::vector<npy::ndarray_len_t> shape(tensor.info()->num_dimensions());
- for(unsigned int i = 0; i < tensor.info()->num_dimensions(); ++i)
+ for(unsigned int i = 0, j = tensor.info()->num_dimensions() - 1; i < tensor.info()->num_dimensions(); ++i, --j)
{
- shape[i] = tensor.info()->tensor_shape()[i];
+ shape[i] = tensor.info()->tensor_shape()[!fortran_order ? j : i];
}
// Map buffer if creating a CLTensor
@@ -802,12 +801,13 @@ int compare_tensor(ITensor &tensor1, ITensor &tensor2)
map(tensor1, true);
map(tensor2, true);
+
Iterator itensor1(&tensor1, window);
Iterator itensor2(&tensor2, window);
execute_window_loop(window, [&](const Coordinates & id)
{
- if(std::abs(*reinterpret_cast<T *>(itensor1.ptr()) - *reinterpret_cast<T *>(itensor2.ptr())) > 0.00001)
+ if(std::abs(*reinterpret_cast<T *>(itensor1.ptr()) - *reinterpret_cast<T *>(itensor2.ptr())) > 0.0001)
{
++num_mismatches;
}