aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIsabella Gottardi <isabella.gottardi@arm.com>2019-01-08 13:48:44 +0000
committerIsabella Gottardi <isabella.gottardi@arm.com>2019-08-06 07:58:16 +0000
commita7acb3cbabeb66ce647684466a04c96b2963c9c9 (patch)
tree7988b75372c8ad1dfa3c8d028ab3a603a5e5a047
parent4746326ecb075dcfa123aaa8b38de5ec3e534b60 (diff)
downloadComputeLibrary-a7acb3cbabeb66ce647684466a04c96b2963c9c9.tar.gz
COMPMID-1849: Implement CPPDetectionPostProcessLayer
* Add DetectionPostProcessLayer * Add DetectionPostProcessLayer at the graph Change-Id: I7e56f6cffc26f112d26dfe74853085bb8ec7d849 Signed-off-by: Isabella Gottardi <isabella.gottardi@arm.com> Reviewed-on: https://review.mlplatform.org/c/1639 Reviewed-by: Giuseppe Rossini <giuseppe.rossini@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--arm_compute/core/Types.h116
-rw-r--r--arm_compute/graph/GraphBuilder.h15
-rw-r--r--arm_compute/graph/INodeVisitor.h9
-rw-r--r--arm_compute/graph/TypePrinter.h3
-rw-r--r--arm_compute/graph/Types.h2
-rw-r--r--arm_compute/graph/backends/FunctionHelpers.h56
-rw-r--r--arm_compute/graph/backends/ValidateHelpers.h27
-rw-r--r--arm_compute/graph/frontend/Layers.h33
-rw-r--r--arm_compute/graph/nodes/DetectionPostProcessLayerNode.h62
-rw-r--r--arm_compute/graph/nodes/Nodes.h1
-rw-r--r--arm_compute/graph/nodes/NodesFwd.h1
-rw-r--r--arm_compute/runtime/CPP/CPPFunctions.h1
-rw-r--r--arm_compute/runtime/CPP/functions/CPPDetectionOutputLayer.h9
-rw-r--r--arm_compute/runtime/CPP/functions/CPPDetectionPostProcessLayer.h123
-rw-r--r--src/graph/GraphBuilder.cpp30
-rw-r--r--src/graph/backends/CL/CLFunctionsFactory.cpp58
-rw-r--r--src/graph/backends/CL/CLNodeValidator.cpp2
-rw-r--r--src/graph/backends/GLES/GCNodeValidator.cpp2
-rw-r--r--src/graph/backends/NEON/NEFunctionFactory.cpp2
-rw-r--r--src/graph/backends/NEON/NENodeValidator.cpp2
-rw-r--r--src/graph/nodes/DetectionPostProcessLayerNode.cpp104
-rw-r--r--src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp24
-rw-r--r--src/runtime/CPP/functions/CPPDetectionPostProcessLayer.cpp388
-rw-r--r--tests/validation/CPP/DetectionPostProcessLayer.cpp390
-rw-r--r--utils/GraphUtils.cpp6
-rw-r--r--utils/GraphUtils.h9
-rw-r--r--utils/TypePrinter.h37
-rw-r--r--utils/Utils.h40
28 files changed, 1503 insertions, 49 deletions
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 2c17f273a5..6df74e7b88 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -34,6 +34,7 @@
#include <cmath>
#include <cstddef>
#include <cstdint>
+#include <map>
#include <string>
#include <utility>
@@ -943,6 +944,11 @@ private:
std::array<float, 2> _steps;
};
+// Bounding Box [xmin, ymin, xmax, ymax]
+using BBox = std::array<float, 4>;
+// LabelBBox used for map label and bounding box
+using LabelBBox = std::map<int, std::vector<BBox>>;
+
/** Available Detection Output code types */
enum class DetectionOutputLayerCodeType
{
@@ -1071,6 +1077,116 @@ private:
int _num_loc_classes;
};
+/** Detection Output layer info */
+class DetectionPostProcessLayerInfo final
+{
+public:
+ /** Default Constructor */
+ DetectionPostProcessLayerInfo()
+ : _max_detections(),
+ _max_classes_per_detection(),
+ _nms_score_threshold(),
+ _iou_threshold(),
+ _num_classes(),
+ _scales_values(),
+ _use_regular_nms(),
+ _detection_per_class()
+ {
+ }
+ /** Constructor
+ *
+ * @param[in] max_detections Number of total detection.
+ * @param[in] max_classes_per_detection Number of total classes to be kept after NMS step. Used in the Fast Non-Max-Suppression
+ * @param[in] nms_score_threshold Threshold to be used in NMS
+ * @param[in] iou_threshold Threshold to be used during the intersection over union.
+ * @param[in] num_classes Number of classes.
+ * @param[in] scales_values Scales values used for decode center size boxes.
+ * @param[in] use_regular_nms (Optional) Boolean to determinate if use regular or fast nms.
+ * @param[in] detection_per_class (Optional) Number of detection per class. Used in the Regular Non-Max-Suppression
+ */
+ DetectionPostProcessLayerInfo(unsigned int max_detections, unsigned int max_classes_per_detection, float nms_score_threshold, float iou_threshold, unsigned int num_classes,
+ std::array<float, 4> scales_values, bool use_regular_nms = false, unsigned int detection_per_class = 100)
+ : _max_detections(max_detections),
+ _max_classes_per_detection(max_classes_per_detection),
+ _nms_score_threshold(nms_score_threshold),
+ _iou_threshold(iou_threshold),
+ _num_classes(num_classes),
+ _scales_values(scales_values),
+ _use_regular_nms(use_regular_nms),
+ _detection_per_class(detection_per_class)
+ {
+ }
+ /** Get max detections. */
+ unsigned int max_detections() const
+ {
+ return _max_detections;
+ }
+ /** Get max_classes per detection. Used in the Fast Non-Max-Suppression.*/
+ unsigned int max_classes_per_detection() const
+ {
+ return _max_classes_per_detection;
+ }
+ /** Get detection per class. Used in the Regular Non-Max-Suppression */
+ unsigned int detection_per_class() const
+ {
+ return _detection_per_class;
+ }
+ /** Get nms threshold. */
+ float nms_score_threshold() const
+ {
+ return _nms_score_threshold;
+ }
+ /** Get intersection over union threshold. */
+ float iou_threshold() const
+ {
+ return _iou_threshold;
+ }
+ /** Get num classes. */
+ unsigned int num_classes() const
+ {
+ return _num_classes;
+ }
+ /** Get if use regular nms. */
+ bool use_regular_nms() const
+ {
+ return _use_regular_nms;
+ }
+ /** Get y scale value. */
+ float scale_value_y() const
+ {
+ // Saved as [y,x,h,w]
+ return _scales_values[0];
+ }
+ /** Get x scale value. */
+ float scale_value_x() const
+ {
+ // Saved as [y,x,h,w]
+ return _scales_values[1];
+ }
+ /** Get h scale value. */
+ float scale_value_h() const
+ {
+ // Saved as [y,x,h,w]
+ return _scales_values[2];
+ }
+ /** Get w scale value. */
+ float scale_value_w() const
+ {
+ // Saved as [y,x,h,w]
+ return _scales_values[3];
+ }
+
+private:
+ unsigned int _max_detections;
+ unsigned int _max_classes_per_detection;
+ float _nms_score_threshold;
+ float _iou_threshold;
+ unsigned int _num_classes;
+ std::array<float, 4> _scales_values;
+ bool _use_regular_nms;
+ unsigned int _detection_per_class;
+};
+
/** Pooling Layer Information class */
class PoolingLayerInfo
{
diff --git a/arm_compute/graph/GraphBuilder.h b/arm_compute/graph/GraphBuilder.h
index e1049ca938..dc41ed5367 100644
--- a/arm_compute/graph/GraphBuilder.h
+++ b/arm_compute/graph/GraphBuilder.h
@@ -217,6 +217,21 @@ public:
* @return Node ID of the created node, EmptyNodeID in case of error
*/
static NodeID add_detection_output_node(Graph &g, NodeParams params, NodeIdxPair input_loc, NodeIdxPair input_conf, NodeIdxPair input_priorbox, const DetectionOutputLayerInfo &detect_info);
+ /** Adds a detection post process layer node to the graph
+ *
+ * @param[in] g Graph to add the node to
+ * @param[in] params Common node parameters
+ * @param[in] input_box_encoding Boxes input to the detection output layer node as a NodeID-Index pair
+ * @param[in] input_class_prediction Class prediction input to the detection output layer node as a NodeID-Index pair
+ * @param[in] detect_info Detection output layer parameters
+ * @param[in] anchors_accessor (Optional) Const Node ID that contains the anchor values
+ * @param[in] anchor_quant_info (Optional) Anchor quantization info
+ *
+ * @return Node ID of the created node, EmptyNodeID in case of error
+ */
+ static NodeID add_detection_post_process_node(Graph &g, NodeParams params, NodeIdxPair input_box_encoding, NodeIdxPair input_class_prediction,
+ const DetectionPostProcessLayerInfo &detect_info, ITensorAccessorUPtr anchors_accessor = nullptr,
+ const QuantizationInfo &anchor_quant_info = QuantizationInfo());
/** Adds a Dummy node to the graph
*
* @note this node if for debugging purposes. Just alters the shape of the graph pipeline as requested.
diff --git a/arm_compute/graph/INodeVisitor.h b/arm_compute/graph/INodeVisitor.h
index 5c5b777ac9..f97906d02a 100644
--- a/arm_compute/graph/INodeVisitor.h
+++ b/arm_compute/graph/INodeVisitor.h
@@ -76,6 +76,11 @@ public:
* @param[in] n Node to visit.
*/
virtual void visit(DetectionOutputLayerNode &n) = 0;
+ /** Visit DetectionPostProcessLayerNode.
+ *
+ * @param[in] n Node to visit.
+ */
+ virtual void visit(DetectionPostProcessLayerNode &n) = 0;
/** Visit EltwiseLayerNode.
*
* @param[in] n Node to visit.
@@ -199,6 +204,10 @@ public:
{
default_visit();
}
+ virtual void visit(DetectionPostProcessLayerNode &n) override
+ {
+ default_visit();
+ }
virtual void visit(DepthwiseConvolutionLayerNode &n) override
{
default_visit();
diff --git a/arm_compute/graph/TypePrinter.h b/arm_compute/graph/TypePrinter.h
index 9da0e6157c..e4188125b9 100644
--- a/arm_compute/graph/TypePrinter.h
+++ b/arm_compute/graph/TypePrinter.h
@@ -86,6 +86,9 @@ inline ::std::ostream &operator<<(::std::ostream &os, const NodeType &node_type)
case NodeType::DetectionOutputLayer:
os << "DetectionOutputLayer";
break;
+ case NodeType::DetectionPostProcessLayer:
+ os << "DetectionPostProcessLayer";
+ break;
case NodeType::DepthwiseConvolutionLayer:
os << "DepthwiseConvolutionLayer";
break;
diff --git a/arm_compute/graph/Types.h b/arm_compute/graph/Types.h
index 9f962425b3..8b97708a63 100644
--- a/arm_compute/graph/Types.h
+++ b/arm_compute/graph/Types.h
@@ -48,6 +48,7 @@ using arm_compute::PermutationVector;
using arm_compute::ActivationLayerInfo;
using arm_compute::DetectionOutputLayerInfo;
+using arm_compute::DetectionPostProcessLayerInfo;
using arm_compute::NormType;
using arm_compute::NormalizationLayerInfo;
using arm_compute::FullyConnectedLayerInfo;
@@ -137,6 +138,7 @@ enum class NodeType
DeconvolutionLayer,
DepthwiseConvolutionLayer,
DetectionOutputLayer,
+ DetectionPostProcessLayer,
EltwiseLayer,
FlattenLayer,
FullyConnectedLayer,
diff --git a/arm_compute/graph/backends/FunctionHelpers.h b/arm_compute/graph/backends/FunctionHelpers.h
index ed5b35c0d1..dd833061a9 100644
--- a/arm_compute/graph/backends/FunctionHelpers.h
+++ b/arm_compute/graph/backends/FunctionHelpers.h
@@ -644,6 +644,62 @@ std::unique_ptr<IFunction> create_detection_output_layer(DetectionOutputLayerNod
return std::move(func);
}
+
+/** Create a backend detection post process layer function
+ *
+ * @tparam DetectionPostProcessLayerFunction Backend detection output function
+ * @tparam TargetInfo Target-specific information
+ *
+ * @param[in] node Node to create the backend function for
+ *
+ * @return Backend detection post process layer function
+ */
+template <typename DetectionPostProcessLayerFunction, typename TargetInfo>
+std::unique_ptr<IFunction> create_detection_post_process_layer(DetectionPostProcessLayerNode &node)
+{
+ validate_node<TargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
+
+ // Extract IO and info
+ typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
+ typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
+ typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
+ typename TargetInfo::TensorType *output0 = get_backing_tensor<TargetInfo>(node.output(0));
+ typename TargetInfo::TensorType *output1 = get_backing_tensor<TargetInfo>(node.output(1));
+ typename TargetInfo::TensorType *output2 = get_backing_tensor<TargetInfo>(node.output(2));
+ typename TargetInfo::TensorType *output3 = get_backing_tensor<TargetInfo>(node.output(3));
+ const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
+
+ ARM_COMPUTE_ERROR_ON(input0 == nullptr);
+ ARM_COMPUTE_ERROR_ON(input1 == nullptr);
+ ARM_COMPUTE_ERROR_ON(input2 == nullptr);
+ ARM_COMPUTE_ERROR_ON(output0 == nullptr);
+ ARM_COMPUTE_ERROR_ON(output1 == nullptr);
+ ARM_COMPUTE_ERROR_ON(output2 == nullptr);
+ ARM_COMPUTE_ERROR_ON(output3 == nullptr);
+
+ // Create and configure function
+ auto func = support::cpp14::make_unique<DetectionPostProcessLayerFunction>();
+ func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << TargetInfo::TargetType
+ << " Data Type: " << input0->info()->data_type()
+ << " Input0 shape: " << input0->info()->tensor_shape()
+ << " Input1 shape: " << input1->info()->tensor_shape()
+ << " Input2 shape: " << input2->info()->tensor_shape()
+ << " Output0 shape: " << output0->info()->tensor_shape()
+ << " Output1 shape: " << output1->info()->tensor_shape()
+ << " Output2 shape: " << output2->info()->tensor_shape()
+ << " Output3 shape: " << output3->info()->tensor_shape()
+ << " DetectionPostProcessLayer info: " << detect_info
+ << std::endl);
+
+ return std::move(func);
+}
+
/** Create a backend element-wise operation layer function
*
* @tparam EltwiseFunctions Backend element-wise function
diff --git a/arm_compute/graph/backends/ValidateHelpers.h b/arm_compute/graph/backends/ValidateHelpers.h
index 3a5686336b..13de273bdf 100644
--- a/arm_compute/graph/backends/ValidateHelpers.h
+++ b/arm_compute/graph/backends/ValidateHelpers.h
@@ -228,6 +228,33 @@ Status validate_detection_output_layer(DetectionOutputLayerNode &node)
return DetectionOutputLayer::validate(input0, input1, input2, output, detect_info);
}
+/** Validates a detection post process layer node
+ *
+ * @tparam DetectionPostProcessLayer DetectionOutput layer type
+ *
+ * @param[in] node Node to validate
+ *
+ * @return Status
+ */
+template <typename DetectionPostProcessLayer>
+Status validate_detection_post_process_layer(DetectionPostProcessLayerNode &node)
+{
+ ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating DetectionPostProcessLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
+ ARM_COMPUTE_RETURN_ERROR_ON(node.num_inputs() != 3);
+ ARM_COMPUTE_RETURN_ERROR_ON(node.num_outputs() != 4);
+
+ // Extract IO and info
+ arm_compute::ITensorInfo *input0 = get_backing_tensor_info(node.input(0));
+ arm_compute::ITensorInfo *input1 = get_backing_tensor_info(node.input(1));
+ arm_compute::ITensorInfo *input2 = get_backing_tensor_info(node.input(2));
+ arm_compute::ITensorInfo *output0 = get_backing_tensor_info(node.output(0));
+ arm_compute::ITensorInfo *output1 = get_backing_tensor_info(node.output(1));
+ arm_compute::ITensorInfo *output2 = get_backing_tensor_info(node.output(2));
+ arm_compute::ITensorInfo *output3 = get_backing_tensor_info(node.output(3));
+ const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
+
+ return DetectionPostProcessLayer::validate(input0, input1, input2, output0, output1, output2, output3, detect_info);
+}
/** Validates a Generate Proposals layer node
*
diff --git a/arm_compute/graph/frontend/Layers.h b/arm_compute/graph/frontend/Layers.h
index 3fc4af46d5..27a0cd3026 100644
--- a/arm_compute/graph/frontend/Layers.h
+++ b/arm_compute/graph/frontend/Layers.h
@@ -493,6 +493,39 @@ private:
SubStream _ss_prior;
DetectionOutputLayerInfo _detect_info;
};
+/** DetectionOutputPostProcess Layer */
+class DetectionPostProcessLayer final : public ILayer
+{
+public:
+ /** Construct a detection output layer.
+ *
+ * @param[in] sub_stream_class_prediction Class prediction graph sub-stream.
+ * @param[in] detect_info DetectionOutput parameters.
+ * @param[in] anchors Accessor to get anchors tensor data from.
+ * @param[in] out_quant_info (Optional) Output quantization info
+ */
+ DetectionPostProcessLayer(SubStream &&sub_stream_class_prediction, DetectionPostProcessLayerInfo detect_info, ITensorAccessorUPtr anchors,
+ const QuantizationInfo out_quant_info = QuantizationInfo())
+ : _sub_stream_class_prediction(std::move(sub_stream_class_prediction)), _detect_info(detect_info), _anchors(std::move(anchors)), _out_quant_info(std::move(out_quant_info))
+ {
+ }
+
+ NodeID create_layer(IStream &s) override
+ {
+ ARM_COMPUTE_ERROR_ON(_anchors == nullptr);
+
+ NodeParams common_params = { name(), s.hints().target_hint };
+ NodeIdxPair input_box_encoding = { s.tail_node(), 0 };
+ NodeIdxPair input_class_prediction = { _sub_stream_class_prediction.tail_node(), 0 };
+ return GraphBuilder::add_detection_post_process_node(s.graph(), common_params, input_box_encoding, input_class_prediction, _detect_info, std::move(_anchors), std::move(_out_quant_info));
+ }
+
+private:
+ SubStream _sub_stream_class_prediction;
+ DetectionPostProcessLayerInfo _detect_info;
+ ITensorAccessorUPtr _anchors;
+ const QuantizationInfo _out_quant_info;
+};
/** Dummy Layer */
class DummyLayer final : public ILayer
{
diff --git a/arm_compute/graph/nodes/DetectionPostProcessLayerNode.h b/arm_compute/graph/nodes/DetectionPostProcessLayerNode.h
new file mode 100644
index 0000000000..76b1d8ce98
--- /dev/null
+++ b/arm_compute/graph/nodes/DetectionPostProcessLayerNode.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_GRAPH_DETECTION_POST_PROCESS_LAYER_NODE_H__
+#define __ARM_COMPUTE_GRAPH_DETECTION_POST_PROCESS_LAYER_NODE_H__
+
+#include "arm_compute/graph/INode.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+/** DetectionPostProcess Layer node */
+class DetectionPostProcessLayerNode final : public INode
+{
+public:
+ /** Constructor
+ *
+ * @param[in] detection_info DetectionPostProcess Layer information
+ */
+ DetectionPostProcessLayerNode(DetectionPostProcessLayerInfo detection_info);
+ /** DetectionPostProcess metadata accessor
+ *
+ * @return DetectionPostProcess Layer info
+ */
+ DetectionPostProcessLayerInfo detection_post_process_info() const;
+
+ // Inherited overridden methods:
+ NodeType type() const override;
+ bool forward_descriptors() override;
+ TensorDescriptor configure_output(size_t idx) const override;
+ void accept(INodeVisitor &v) override;
+
+private:
+ DetectionPostProcessLayerInfo _info;
+
+ static const int kNumCoordBox = 4;
+ static const int kBatchSize = 1;
+};
+} // namespace graph
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_GRAPH_DETECTION_POST_PROCESS_LAYER_NODE_H__ */ \ No newline at end of file
diff --git a/arm_compute/graph/nodes/Nodes.h b/arm_compute/graph/nodes/Nodes.h
index 52e2f88528..1586270093 100644
--- a/arm_compute/graph/nodes/Nodes.h
+++ b/arm_compute/graph/nodes/Nodes.h
@@ -34,6 +34,7 @@
#include "arm_compute/graph/nodes/DeconvolutionLayerNode.h"
#include "arm_compute/graph/nodes/DepthwiseConvolutionLayerNode.h"
#include "arm_compute/graph/nodes/DetectionOutputLayerNode.h"
+#include "arm_compute/graph/nodes/DetectionPostProcessLayerNode.h"
#include "arm_compute/graph/nodes/DummyNode.h"
#include "arm_compute/graph/nodes/EltwiseLayerNode.h"
#include "arm_compute/graph/nodes/FlattenLayerNode.h"
diff --git a/arm_compute/graph/nodes/NodesFwd.h b/arm_compute/graph/nodes/NodesFwd.h
index 2c89679902..53f2a6a1b5 100644
--- a/arm_compute/graph/nodes/NodesFwd.h
+++ b/arm_compute/graph/nodes/NodesFwd.h
@@ -40,6 +40,7 @@ class ConvolutionLayerNode;
class DeconvolutionLayerNode;
class DepthwiseConvolutionLayerNode;
class DetectionOutputLayerNode;
+class DetectionPostProcessLayerNode;
class DummyNode;
class EltwiseLayerNode;
class FlattenLayerNode;
diff --git a/arm_compute/runtime/CPP/CPPFunctions.h b/arm_compute/runtime/CPP/CPPFunctions.h
index 1dff03f349..743929fae8 100644
--- a/arm_compute/runtime/CPP/CPPFunctions.h
+++ b/arm_compute/runtime/CPP/CPPFunctions.h
@@ -27,6 +27,7 @@
/* Header regrouping all the CPP functions */
#include "arm_compute/runtime/CPP/functions/CPPBoxWithNonMaximaSuppressionLimit.h"
#include "arm_compute/runtime/CPP/functions/CPPDetectionOutputLayer.h"
+#include "arm_compute/runtime/CPP/functions/CPPDetectionPostProcessLayer.h"
#include "arm_compute/runtime/CPP/functions/CPPNonMaximumSuppression.h"
#include "arm_compute/runtime/CPP/functions/CPPPermute.h"
#include "arm_compute/runtime/CPP/functions/CPPTopKV.h"
diff --git a/arm_compute/runtime/CPP/functions/CPPDetectionOutputLayer.h b/arm_compute/runtime/CPP/functions/CPPDetectionOutputLayer.h
index 71be8a0ad8..4e1b8f2a74 100644
--- a/arm_compute/runtime/CPP/functions/CPPDetectionOutputLayer.h
+++ b/arm_compute/runtime/CPP/functions/CPPDetectionOutputLayer.h
@@ -28,17 +28,10 @@
#include "arm_compute/core/Types.h"
-#include <map>
-
namespace arm_compute
{
class ITensor;
-// Normalized Bounding Box [xmin, ymin, xmax, ymax]
-using NormalizedBBox = std::array<float, 4>;
-// LabelBBox used for map label and bounding box
-using LabelBBox = std::map<int, std::vector<NormalizedBBox>>;
-
/** CPP Function to generate the detection output based on location and confidence
* predictions by doing non maximum suppression.
*
@@ -91,7 +84,7 @@ private:
std::vector<LabelBBox> _all_location_predictions;
std::vector<std::map<int, std::vector<float>>> _all_confidence_scores;
- std::vector<NormalizedBBox> _all_prior_bboxes;
+ std::vector<BBox> _all_prior_bboxes;
std::vector<std::array<float, 4>> _all_prior_variances;
std::vector<LabelBBox> _all_decode_bboxes;
std::vector<std::map<int, std::vector<int>>> _all_indices;
diff --git a/arm_compute/runtime/CPP/functions/CPPDetectionPostProcessLayer.h b/arm_compute/runtime/CPP/functions/CPPDetectionPostProcessLayer.h
new file mode 100644
index 0000000000..c13def67c7
--- /dev/null
+++ b/arm_compute/runtime/CPP/functions/CPPDetectionPostProcessLayer.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CPP_DETECTION_POSTPROCESS_H__
+#define __ARM_COMPUTE_CPP_DETECTION_POSTPROCESS_H__
+
+#include "arm_compute/runtime/CPP/ICPPSimpleFunction.h"
+
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CPP/functions/CPPNonMaximumSuppression.h"
+#include "arm_compute/runtime/IMemoryManager.h"
+#include "arm_compute/runtime/MemoryGroup.h"
+#include "arm_compute/runtime/Tensor.h"
+
+#include <map>
+
+namespace arm_compute
+{
+class ITensor;
+
+/** CPP Function to generate the detection output based on center size encoded boxes, class prediction and anchors
+ * by doing non maximum suppression.
+ *
+ * @note Intended for use with MultiBox detection method.
+ */
+class CPPDetectionPostProcessLayer : public IFunction
+{
+public:
+ /** Constructor */
+ CPPDetectionPostProcessLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CPPDetectionPostProcessLayer(const CPPDetectionPostProcessLayer &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CPPDetectionPostProcessLayer &operator=(const CPPDetectionPostProcessLayer &) = delete;
+ /** Configure the detection output layer CPP function
+ *
+ * @param[in] input_box_encoding The bounding box input tensor. Data types supported: F32, QASYMM8.
+ * @param[in] input_score The class prediction input tensor. Data types supported: Same as @p input_box_encoding.
+ * @param[in] input_anchors The anchors input tensor. Data types supported: Same as @p input_box_encoding.
+ * @param[out] output_boxes The boxes output tensor. Data types supported: F32.
+ * @param[out] output_classes The classes output tensor. Data types supported: Same as @p output_boxes.
+ * @param[out] output_scores The scores output tensor. Data types supported: Same as @p output_boxes.
+ * @param[out] num_detection The number of output detection. Data types supported: Same as @p output_boxes.
+ * @param[in] info (Optional) DetectionPostProcessLayerInfo information.
+ *
+ * @note Output contains all the detections. Of those, only the ones selected by the valid region are valid.
+ */
+ void configure(const ITensor *input_box_encoding, const ITensor *input_score, const ITensor *input_anchors,
+ ITensor *output_boxes, ITensor *output_classes, ITensor *output_scores, ITensor *num_detection, DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo());
+ /** Static function to check if given info will lead to a valid configuration of @ref CPPDetectionPostProcessLayer
+ *
+ * @param[in] input_box_encoding The bounding box input tensor info. Data types supported: F32, QASYMM8.
+ * @param[in] input_class_score The class prediction input tensor info. Data types supported: F32, QASYMM8.
+ * @param[in] input_anchors The anchors input tensor. Data types supported: F32, QASYMM8.
+ * @param[out] output_boxes The output tensor. Data types supported: F32.
+ * @param[out] output_classes The output tensor. Data types supported: Same as @p output_boxes.
+ * @param[out] output_scores The output tensor. Data types supported: Same as @p output_boxes.
+ * @param[out] num_detection The number of output detection. Data types supported: Same as @p output_boxes.
+ * @param[in] info (Optional) DetectionPostProcessLayerInfo information.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input_box_encoding, const ITensorInfo *input_class_score, const ITensorInfo *input_anchors,
+ ITensorInfo *output_boxes, ITensorInfo *output_classes, ITensorInfo *output_scores, ITensorInfo *num_detection,
+ DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo());
+ // Inherited methods overridden:
+ void run() override;
+
+private:
+ MemoryGroup _memory_group;
+ CPPNonMaximumSuppression _nms;
+ const ITensor *_input_box_encoding;
+ const ITensor *_input_scores;
+ const ITensor *_input_anchors;
+ ITensor *_output_boxes;
+ ITensor *_output_classes;
+ ITensor *_output_scores;
+ ITensor *_num_detection;
+ DetectionPostProcessLayerInfo _info;
+
+ const unsigned int _kBatchSize = 1;
+ const unsigned int _kNumCoordBox = 4;
+ unsigned int _num_boxes;
+ unsigned int _num_classes_with_background;
+ unsigned int _num_max_detected_boxes;
+
+ Tensor _decoded_boxes;
+ Tensor _decoded_scores;
+ Tensor _selected_indices;
+ Tensor _class_scores;
+ const ITensor *_input_scores_to_use;
+
+ // Intermediate results
+ std::vector<int> _result_idx_boxes_after_nms;
+ std::vector<int> _result_classes_after_nms;
+ std::vector<float> _result_scores_after_nms;
+ std::vector<unsigned int> _sorted_indices;
+
+ // Temporary values
+ std::vector<float> _box_scores;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_CPP_DETECTION_POSTPROCESS_H__ */
diff --git a/src/graph/GraphBuilder.cpp b/src/graph/GraphBuilder.cpp
index 54bd066712..228f2d211a 100644
--- a/src/graph/GraphBuilder.cpp
+++ b/src/graph/GraphBuilder.cpp
@@ -393,6 +393,36 @@ NodeID GraphBuilder::add_detection_output_node(Graph &g, NodeParams params, Node
return detect_nid;
}
+NodeID GraphBuilder::add_detection_post_process_node(Graph &g, NodeParams params, NodeIdxPair input_box_encoding, NodeIdxPair input_class_prediction, const DetectionPostProcessLayerInfo &detect_info,
+ ITensorAccessorUPtr anchors_accessor, const QuantizationInfo &anchor_quant_info)
+{
+ check_nodeidx_pair(input_box_encoding, g);
+ check_nodeidx_pair(input_class_prediction, g);
+
+ // Get input tensor descriptor
+ const TensorDescriptor input_box_encoding_tensor_desc = get_tensor_descriptor(g, g.node(input_box_encoding.node_id)->outputs()[0]);
+
+ // Calculate anchor descriptor
+ TensorDescriptor anchor_desc = input_box_encoding_tensor_desc;
+ if(!anchor_quant_info.empty())
+ {
+ anchor_desc.quant_info = anchor_quant_info;
+ }
+
+ // Create anchors nodes
+ auto anchors_nid = add_const_node_with_name(g, params, "Anchors", anchor_desc, std::move(anchors_accessor));
+
+ // Create detection_output node and connect
+ NodeID detect_nid = g.add_node<DetectionPostProcessLayerNode>(detect_info);
+ g.add_connection(input_box_encoding.node_id, input_box_encoding.index, detect_nid, 0);
+ g.add_connection(input_class_prediction.node_id, input_class_prediction.index, detect_nid, 1);
+ g.add_connection(anchors_nid, 0, detect_nid, 2);
+
+ set_node_params(g, detect_nid, params);
+
+ return detect_nid;
+}
+
NodeID GraphBuilder::add_dummy_node(Graph &g, NodeParams params, NodeIdxPair input, TensorShape shape)
{
return create_simple_single_input_output_node<DummyNode>(g, params, input, shape);
diff --git a/src/graph/backends/CL/CLFunctionsFactory.cpp b/src/graph/backends/CL/CLFunctionsFactory.cpp
index b9f22f6199..82b6dd6a54 100644
--- a/src/graph/backends/CL/CLFunctionsFactory.cpp
+++ b/src/graph/backends/CL/CLFunctionsFactory.cpp
@@ -166,6 +166,62 @@ std::unique_ptr<IFunction> create_detection_output_layer<CPPDetectionOutputLayer
return std::move(wrap_function);
}
+template <>
+std::unique_ptr<IFunction> create_detection_post_process_layer<CPPDetectionPostProcessLayer, CLTargetInfo>(DetectionPostProcessLayerNode &node)
+{
+ validate_node<CLTargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
+
+ // Extract IO and info
+ CLTargetInfo::TensorType *input0 = get_backing_tensor<CLTargetInfo>(node.input(0));
+ CLTargetInfo::TensorType *input1 = get_backing_tensor<CLTargetInfo>(node.input(1));
+ CLTargetInfo::TensorType *input2 = get_backing_tensor<CLTargetInfo>(node.input(2));
+ CLTargetInfo::TensorType *output0 = get_backing_tensor<CLTargetInfo>(node.output(0));
+ CLTargetInfo::TensorType *output1 = get_backing_tensor<CLTargetInfo>(node.output(1));
+ CLTargetInfo::TensorType *output2 = get_backing_tensor<CLTargetInfo>(node.output(2));
+ CLTargetInfo::TensorType *output3 = get_backing_tensor<CLTargetInfo>(node.output(3));
+ const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
+
+ ARM_COMPUTE_ERROR_ON(input0 == nullptr);
+ ARM_COMPUTE_ERROR_ON(input1 == nullptr);
+ ARM_COMPUTE_ERROR_ON(input2 == nullptr);
+ ARM_COMPUTE_ERROR_ON(output0 == nullptr);
+ ARM_COMPUTE_ERROR_ON(output1 == nullptr);
+ ARM_COMPUTE_ERROR_ON(output2 == nullptr);
+ ARM_COMPUTE_ERROR_ON(output3 == nullptr);
+
+ // Create and configure function
+ auto func = support::cpp14::make_unique<CPPDetectionPostProcessLayer>();
+ func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
+
+ // Log info
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << node.type()
+ << " Target: " << CLTargetInfo::TargetType
+ << " Data Type: " << input0->info()->data_type()
+ << " Input0 shape: " << input0->info()->tensor_shape()
+ << " Input1 shape: " << input1->info()->tensor_shape()
+ << " Input2 shape: " << input2->info()->tensor_shape()
+ << " Output0 shape: " << output0->info()->tensor_shape()
+ << " Output1 shape: " << output1->info()->tensor_shape()
+ << " Output2 shape: " << output2->info()->tensor_shape()
+ << " Output3 shape: " << output3->info()->tensor_shape()
+ << " DetectionPostProcessLayer info: " << detect_info
+ << std::endl);
+
+ auto wrap_function = support::cpp14::make_unique<CPPWrapperFunction>();
+
+ wrap_function->register_function(std::move(func));
+ wrap_function->register_tensor(input0);
+ wrap_function->register_tensor(input1);
+ wrap_function->register_tensor(input2);
+ wrap_function->register_tensor(output0);
+ wrap_function->register_tensor(output1);
+ wrap_function->register_tensor(output2);
+ wrap_function->register_tensor(output3);
+
+ return std::move(wrap_function);
+}
} // namespace detail
std::unique_ptr<IFunction> CLFunctionFactory::create(INode *node, GraphContext &ctx)
@@ -196,6 +252,8 @@ std::unique_ptr<IFunction> CLFunctionFactory::create(INode *node, GraphContext &
return detail::create_depthwise_convolution_layer<CLDepthwiseConvolutionLayerFunctions, CLTargetInfo>(*polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
case NodeType::DetectionOutputLayer:
return detail::create_detection_output_layer<CPPDetectionOutputLayer, CLTargetInfo>(*polymorphic_downcast<DetectionOutputLayerNode *>(node));
+ case NodeType::DetectionPostProcessLayer:
+ return detail::create_detection_post_process_layer<CPPDetectionPostProcessLayer, CLTargetInfo>(*polymorphic_downcast<DetectionPostProcessLayerNode *>(node));
case NodeType::EltwiseLayer:
return detail::create_eltwise_layer<CLEltwiseFunctions, CLTargetInfo>(*polymorphic_downcast<EltwiseLayerNode *>(node));
case NodeType::FlattenLayer:
diff --git a/src/graph/backends/CL/CLNodeValidator.cpp b/src/graph/backends/CL/CLNodeValidator.cpp
index 78771102e8..40ec508767 100644
--- a/src/graph/backends/CL/CLNodeValidator.cpp
+++ b/src/graph/backends/CL/CLNodeValidator.cpp
@@ -62,6 +62,8 @@ Status CLNodeValidator::validate(INode *node)
CLDepthwiseConvolutionLayer3x3>(*polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
case NodeType::DetectionOutputLayer:
return detail::validate_detection_output_layer<CPPDetectionOutputLayer>(*polymorphic_downcast<DetectionOutputLayerNode *>(node));
+ case NodeType::DetectionPostProcessLayer:
+ return detail::validate_detection_post_process_layer<CPPDetectionPostProcessLayer>(*polymorphic_downcast<DetectionPostProcessLayerNode *>(node));
case NodeType::GenerateProposalsLayer:
return detail::validate_generate_proposals_layer<CLGenerateProposalsLayer>(*polymorphic_downcast<GenerateProposalsLayerNode *>(node));
case NodeType::NormalizePlanarYUVLayer:
diff --git a/src/graph/backends/GLES/GCNodeValidator.cpp b/src/graph/backends/GLES/GCNodeValidator.cpp
index a767d7b107..9cbb9a12ef 100644
--- a/src/graph/backends/GLES/GCNodeValidator.cpp
+++ b/src/graph/backends/GLES/GCNodeValidator.cpp
@@ -113,6 +113,8 @@ Status GCNodeValidator::validate(INode *node)
return validate_depthwise_convolution_layer(*polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
case NodeType::DetectionOutputLayer:
return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported operation : DetectionOutputLayer");
+ case NodeType::DetectionPostProcessLayer:
+ return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported operation : DetectionPostProcessLayer");
case NodeType::FlattenLayer:
return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported operation : FlattenLayer");
case NodeType::GenerateProposalsLayer:
diff --git a/src/graph/backends/NEON/NEFunctionFactory.cpp b/src/graph/backends/NEON/NEFunctionFactory.cpp
index b808ef81f9..852de549fa 100644
--- a/src/graph/backends/NEON/NEFunctionFactory.cpp
+++ b/src/graph/backends/NEON/NEFunctionFactory.cpp
@@ -215,6 +215,8 @@ std::unique_ptr<IFunction> NEFunctionFactory::create(INode *node, GraphContext &
return detail::create_depthwise_convolution_layer<NEDepthwiseConvolutionLayerFunctions, NETargetInfo>(*polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
case NodeType::DetectionOutputLayer:
return detail::create_detection_output_layer<CPPDetectionOutputLayer, NETargetInfo>(*polymorphic_downcast<DetectionOutputLayerNode *>(node));
+ case NodeType::DetectionPostProcessLayer:
+ return detail::create_detection_post_process_layer<CPPDetectionPostProcessLayer, NETargetInfo>(*polymorphic_downcast<DetectionPostProcessLayerNode *>(node));
case NodeType::EltwiseLayer:
return detail::create_eltwise_layer<NEEltwiseFunctions, NETargetInfo>(*polymorphic_downcast<EltwiseLayerNode *>(node));
case NodeType::FlattenLayer:
diff --git a/src/graph/backends/NEON/NENodeValidator.cpp b/src/graph/backends/NEON/NENodeValidator.cpp
index 3b1d2aa59c..734b3401f7 100644
--- a/src/graph/backends/NEON/NENodeValidator.cpp
+++ b/src/graph/backends/NEON/NENodeValidator.cpp
@@ -62,6 +62,8 @@ Status NENodeValidator::validate(INode *node)
NEDepthwiseConvolutionLayer3x3>(*polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
case NodeType::DetectionOutputLayer:
return detail::validate_detection_output_layer<CPPDetectionOutputLayer>(*polymorphic_downcast<DetectionOutputLayerNode *>(node));
+ case NodeType::DetectionPostProcessLayer:
+ return detail::validate_detection_post_process_layer<CPPDetectionPostProcessLayer>(*polymorphic_downcast<DetectionPostProcessLayerNode *>(node));
case NodeType::GenerateProposalsLayer:
return ARM_COMPUTE_CREATE_ERROR(arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported operation : GenerateProposalsLayer");
case NodeType::NormalizePlanarYUVLayer:
diff --git a/src/graph/nodes/DetectionPostProcessLayerNode.cpp b/src/graph/nodes/DetectionPostProcessLayerNode.cpp
new file mode 100644
index 0000000000..4a5df1ac4e
--- /dev/null
+++ b/src/graph/nodes/DetectionPostProcessLayerNode.cpp
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/graph/nodes/DetectionPostProcessLayerNode.h"
+
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph/INodeVisitor.h"
+#include "arm_compute/graph/Utils.h"
+
+namespace arm_compute
+{
+namespace graph
+{
+DetectionPostProcessLayerNode::DetectionPostProcessLayerNode(DetectionPostProcessLayerInfo detection_info)
+ : _info(detection_info)
+{
+ _input_edges.resize(3, EmptyEdgeID);
+ _outputs.resize(4, NullTensorID);
+}
+
+DetectionPostProcessLayerInfo DetectionPostProcessLayerNode::detection_post_process_info() const
+{
+ return _info;
+}
+
+bool DetectionPostProcessLayerNode::forward_descriptors()
+{
+ if((input_id(0) != NullTensorID) && (input_id(1) != NullTensorID) && (input_id(2) != NullTensorID) && (output_id(0) != NullTensorID) && (output_id(1) != NullTensorID)
+ && (output_id(2) != NullTensorID) && (output_id(3) != NullTensorID))
+ {
+ for(unsigned int i = 0; i < 4; ++i)
+ {
+ Tensor *dst = output(i);
+ ARM_COMPUTE_ERROR_ON(dst == nullptr);
+ dst->desc() = configure_output(i);
+ }
+ return true;
+ }
+ return false;
+}
+
+TensorDescriptor DetectionPostProcessLayerNode::configure_output(size_t idx) const
+{
+ ARM_COMPUTE_UNUSED(idx);
+ ARM_COMPUTE_ERROR_ON(idx >= _outputs.size());
+
+ TensorDescriptor output_desc;
+ const unsigned int num_detected_box = _info.max_detections() * _info.max_classes_per_detection();
+
+ switch(idx)
+ {
+ case 0:
+ // Configure boxes output
+ output_desc.shape = TensorShape(kNumCoordBox, num_detected_box, kBatchSize);
+ break;
+ case 1:
+ case 2:
+ // Configure classes or scores output
+ output_desc.shape = TensorShape(num_detected_box, kBatchSize);
+ break;
+ case 3:
+ // Configure num_detection
+ output_desc.shape = TensorShape(1);
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Unsupported output index");
+ }
+ output_desc.data_type = DataType::F32;
+
+ return output_desc;
+}
+
+NodeType DetectionPostProcessLayerNode::type() const
+{
+ return NodeType::DetectionPostProcessLayer;
+}
+
+void DetectionPostProcessLayerNode::accept(INodeVisitor &v)
+{
+ v.visit(*this);
+}
+} // namespace graph
+} // namespace arm_compute \ No newline at end of file
diff --git a/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp b/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp
index a1f4e6e89c..13a34b43cd 100644
--- a/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp
+++ b/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp
@@ -166,9 +166,9 @@ void retrieve_all_conf_scores(const ITensor *input_conf, const int num,
* @param[out] all_location_predictions All the location predictions.
*
*/
-void retrieve_all_priorbox(const ITensor *input_priorbox,
- const int num_priors,
- std::vector<NormalizedBBox> &all_prior_bboxes,
+void retrieve_all_priorbox(const ITensor *input_priorbox,
+ const int num_priors,
+ std::vector<BBox> &all_prior_bboxes,
std::vector<std::array<float, 4>> &all_prior_variances)
{
for(int i = 0; i < num_priors; ++i)
@@ -206,9 +206,9 @@ void retrieve_all_priorbox(const ITensor *input_priorbox,
* @param[out] decode_bbox The decoded bboxes.
*
*/
-void DecodeBBox(const NormalizedBBox &prior_bbox, const std::array<float, 4> &prior_variance,
+void DecodeBBox(const BBox &prior_bbox, const std::array<float, 4> &prior_variance,
const DetectionOutputLayerCodeType code_type, const bool variance_encoded_in_target,
- const bool clip_bbox, const NormalizedBBox &bbox, NormalizedBBox &decode_bbox)
+ const bool clip_bbox, const BBox &bbox, BBox &decode_bbox)
{
// if the variance is encoded in target, we simply need to add the offset predictions
// otherwise we need to scale the offset accordingly.
@@ -287,7 +287,7 @@ void DecodeBBox(const NormalizedBBox &prior_bbox, const std::array<float, 4> &pr
* @param[out] indices The kept indices of bboxes after nms.
*
*/
-void ApplyNMSFast(const std::vector<NormalizedBBox> &bboxes,
+void ApplyNMSFast(const std::vector<BBox> &bboxes,
const std::vector<float> &scores, const float score_threshold,
const float nms_threshold, const float eta, const int top_k,
std::vector<int> &indices)
@@ -329,7 +329,7 @@ void ApplyNMSFast(const std::vector<NormalizedBBox> &bboxes,
if(keep)
{
// Compute the jaccard (intersection over union IoU) overlap between two bboxes.
- NormalizedBBox intersect_bbox = std::array<float, 4>({ { 0, 0, 0, 0 } });
+ BBox intersect_bbox = std::array<float, 4>({ 0, 0, 0, 0 });
if(bboxes[kept_idx][0] > bboxes[idx][2] || bboxes[kept_idx][2] < bboxes[idx][0] || bboxes[kept_idx][1] > bboxes[idx][3] || bboxes[kept_idx][3] < bboxes[idx][1])
{
intersect_bbox = std::array<float, 4>({ { 0, 0, 0, 0 } });
@@ -466,7 +466,7 @@ void CPPDetectionOutputLayer::run()
}
ARM_COMPUTE_ERROR_ON_MSG(_all_location_predictions[i].find(label) == _all_location_predictions[i].end(), "Could not find location predictions for label %d.", label);
- const std::vector<NormalizedBBox> &label_loc_preds = _all_location_predictions[i].find(label)->second;
+ const std::vector<BBox> &label_loc_preds = _all_location_predictions[i].find(label)->second;
const int num_bboxes = _all_prior_bboxes.size();
ARM_COMPUTE_ERROR_ON(_all_prior_variances[i].size() != 4);
@@ -499,8 +499,8 @@ void CPPDetectionOutputLayer::run()
{
ARM_COMPUTE_ERROR("Could not find predictions for label %d.", label);
}
- const std::vector<float> &scores = conf_scores.find(c)->second;
- const std::vector<NormalizedBBox> &bboxes = decode_bboxes.find(label)->second;
+ const std::vector<float> &scores = conf_scores.find(c)->second;
+ const std::vector<BBox> &bboxes = decode_bboxes.find(label)->second;
ApplyNMSFast(bboxes, scores, _info.confidence_threshold(), _info.nms_threshold(), _info.eta(), _info.top_k(), indices[c]);
@@ -572,8 +572,8 @@ void CPPDetectionOutputLayer::run()
// or there are no location predictions for current label.
ARM_COMPUTE_ERROR("Could not find predictions for the label %d.", label);
}
- const std::vector<NormalizedBBox> &bboxes = decode_bboxes.find(loc_label)->second;
- const std::vector<int> &indices = it.second;
+ const std::vector<BBox> &bboxes = decode_bboxes.find(loc_label)->second;
+ const std::vector<int> &indices = it.second;
for(auto idx : indices)
{
diff --git a/src/runtime/CPP/functions/CPPDetectionPostProcessLayer.cpp b/src/runtime/CPP/functions/CPPDetectionPostProcessLayer.cpp
new file mode 100644
index 0000000000..2997b593c6
--- /dev/null
+++ b/src/runtime/CPP/functions/CPPDetectionPostProcessLayer.cpp
@@ -0,0 +1,388 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/CPP/functions/CPPDetectionPostProcessLayer.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Validate.h"
+#include "support/ToolchainSupport.h"
+
+#include <cstddef>
+#include <ios>
+#include <list>
+
+namespace arm_compute
+{
+namespace
+{
+Status validate_arguments(const ITensorInfo *input_box_encoding, const ITensorInfo *input_class_score, const ITensorInfo *input_anchors,
+ ITensorInfo *output_boxes, ITensorInfo *output_classes, ITensorInfo *output_scores, ITensorInfo *num_detection,
+ DetectionPostProcessLayerInfo info, const unsigned int kBatchSize, const unsigned int kNumCoordBox)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input_box_encoding, input_class_score, input_anchors);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input_box_encoding, 1, DataType::F32, DataType::QASYMM8);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input_box_encoding, input_class_score, input_anchors);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_box_encoding->num_dimensions() > 3, "The location input tensor shape should be [4, N, kBatchSize].");
+ if(input_box_encoding->num_dimensions() > 2)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_box_encoding->dimension(2) != kBatchSize, "The third dimension of the input box_encoding tensor should be equal to %d.", kBatchSize);
+ }
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_box_encoding->dimension(0) != kNumCoordBox, "The first dimension of the input box_encoding tensor should be equal to %d.", kNumCoordBox);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_class_score->dimension(0) != (info.num_classes() + 1),
+ "The first dimension of the input class_prediction should be equal to the number of classes plus one.");
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_anchors->num_dimensions() > 3, "The anchors input tensor shape should be [4, N, kBatchSize].");
+ if(input_anchors->num_dimensions() > 2)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_anchors->dimension(0) != kNumCoordBox, "The first dimension of the input anchors tensor should be equal to %d.", kNumCoordBox);
+ }
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((input_box_encoding->dimension(1) != input_class_score->dimension(1))
+ || (input_box_encoding->dimension(1) != input_anchors->dimension(1)),
+ "The second dimension of the inputs should be the same.");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(num_detection->num_dimensions() > 1, "The num_detection output tensor shape should be [M].");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((info.iou_threshold() <= 0.0f) || (info.iou_threshold() > 1.0f), "The intersection over union should be positive and less than 1.");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(info.max_classes_per_detection() <= 0, "The number of max classes per detection should be positive.");
+
+ const unsigned int num_detected_boxes = info.max_detections() * info.max_classes_per_detection();
+
+ // Validate configured outputs
+ if(output_boxes->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output_boxes->tensor_shape(), TensorShape(4U, num_detected_boxes, 1U));
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_boxes, 1, DataType::F32);
+ }
+ if(output_classes->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output_classes->tensor_shape(), TensorShape(num_detected_boxes, 1U));
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_classes, 1, DataType::F32);
+ }
+ if(output_scores->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output_scores->tensor_shape(), TensorShape(num_detected_boxes, 1U));
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_scores, 1, DataType::F32);
+ }
+ if(num_detection->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(num_detection->tensor_shape(), TensorShape(1U));
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(num_detection, 1, DataType::F32);
+ }
+
+ return Status{};
+}
+
+/** Decode a bbox according to a anchors and scale info.
+ *
+ * @param[in] input_box_encoding The input prior bounding boxes.
+ * @param[in] input_anchors The corresponding input variance.
+ * @param[in] info The detection informations
+ * @param[out] decoded_boxes The decoded bboxes.
+ */
+void DecodeCenterSizeBoxes(const ITensor *input_box_encoding, const ITensor *input_anchors, DetectionPostProcessLayerInfo info, Tensor *decoded_boxes)
+{
+ const QuantizationInfo &qi_box = input_box_encoding->info()->quantization_info();
+ const QuantizationInfo &qi_anchors = input_anchors->info()->quantization_info();
+ BBox box_centersize;
+ BBox anchor;
+
+ Window win;
+ win.use_tensor_dimensions(input_box_encoding->info()->tensor_shape());
+ win.set_dimension_step(0U, 4U);
+ win.set_dimension_step(1U, 1U);
+ Iterator box_it(input_box_encoding, win);
+ Iterator anchor_it(input_anchors, win);
+ Iterator decoded_it(decoded_boxes, win);
+
+ const float half_factor = 0.5f;
+
+ execute_window_loop(win, [&](const Coordinates &)
+ {
+ if(is_data_type_quantized(input_box_encoding->info()->data_type()))
+ {
+ const auto box_ptr = reinterpret_cast<const qasymm8_t *>(box_it.ptr());
+ const auto anchor_ptr = reinterpret_cast<const qasymm8_t *>(anchor_it.ptr());
+ box_centersize = BBox({ dequantize_qasymm8(*box_ptr, qi_box), dequantize_qasymm8(*(box_ptr + 1), qi_box),
+ dequantize_qasymm8(*(2 + box_ptr), qi_box), dequantize_qasymm8(*(3 + box_ptr), qi_box)
+ });
+ anchor = BBox({ dequantize_qasymm8(*anchor_ptr, qi_anchors), dequantize_qasymm8(*(anchor_ptr + 1), qi_anchors),
+ dequantize_qasymm8(*(2 + anchor_ptr), qi_anchors), dequantize_qasymm8(*(3 + anchor_ptr), qi_anchors)
+ });
+ }
+ else
+ {
+ const auto box_ptr = reinterpret_cast<const float *>(box_it.ptr());
+ const auto anchor_ptr = reinterpret_cast<const float *>(anchor_it.ptr());
+ box_centersize = BBox({ *box_ptr, *(box_ptr + 1), *(2 + box_ptr), *(3 + box_ptr) });
+ anchor = BBox({ *anchor_ptr, *(anchor_ptr + 1), *(2 + anchor_ptr), *(3 + anchor_ptr) });
+ }
+
+ // BBox is equavalent to CenterSizeEncoding [y,x,h,w]
+ const float y_center = box_centersize[0] / info.scale_value_y() * anchor[2] + anchor[0];
+ const float x_center = box_centersize[1] / info.scale_value_x() * anchor[3] + anchor[1];
+ const float half_h = half_factor * static_cast<float>(std::exp(box_centersize[2] / info.scale_value_h())) * anchor[2];
+ const float half_w = half_factor * static_cast<float>(std::exp(box_centersize[3] / info.scale_value_w())) * anchor[3];
+
+ // Box Corner encoding boxes are saved as [xmin, ymin, xmax, ymax]
+ auto decoded_ptr = reinterpret_cast<float *>(decoded_it.ptr());
+ *(decoded_ptr) = x_center - half_w; // xmin
+ *(1 + decoded_ptr) = y_center - half_h; // ymin
+ *(2 + decoded_ptr) = x_center + half_w; // xmax
+ *(3 + decoded_ptr) = y_center + half_h; // ymax
+ },
+ box_it, anchor_it, decoded_it);
+}
+
+void SaveOutputs(const Tensor *decoded_boxes, const std::vector<int> &result_idx_boxes_after_nms, const std::vector<float> &result_scores_after_nms, const std::vector<int> &result_classes_after_nms,
+ std::vector<unsigned int> &sorted_indices, const unsigned int num_output, const unsigned int max_detections, ITensor *output_boxes, ITensor *output_classes, ITensor *output_scores,
+ ITensor *num_detection)
+{
+ // ymin,xmin,ymax,xmax -> xmin,ymin,xmax,ymax
+ unsigned int i = 0;
+ for(; i < num_output; ++i)
+ {
+ const unsigned int box_in_idx = result_idx_boxes_after_nms[sorted_indices[i]];
+ *(reinterpret_cast<float *>(output_boxes->ptr_to_element(Coordinates(0, i)))) = *(reinterpret_cast<float *>(decoded_boxes->ptr_to_element(Coordinates(1, box_in_idx))));
+ *(reinterpret_cast<float *>(output_boxes->ptr_to_element(Coordinates(1, i)))) = *(reinterpret_cast<float *>(decoded_boxes->ptr_to_element(Coordinates(0, box_in_idx))));
+ *(reinterpret_cast<float *>(output_boxes->ptr_to_element(Coordinates(2, i)))) = *(reinterpret_cast<float *>(decoded_boxes->ptr_to_element(Coordinates(3, box_in_idx))));
+ *(reinterpret_cast<float *>(output_boxes->ptr_to_element(Coordinates(3, i)))) = *(reinterpret_cast<float *>(decoded_boxes->ptr_to_element(Coordinates(2, box_in_idx))));
+ *(reinterpret_cast<float *>(output_classes->ptr_to_element(Coordinates(i)))) = static_cast<float>(result_classes_after_nms[sorted_indices[i]]);
+ *(reinterpret_cast<float *>(output_scores->ptr_to_element(Coordinates(i)))) = result_scores_after_nms[sorted_indices[i]];
+ }
+ for(; i < max_detections; ++i)
+ {
+ *(reinterpret_cast<float *>(output_boxes->ptr_to_element(Coordinates(1, i)))) = 0.0f;
+ *(reinterpret_cast<float *>(output_boxes->ptr_to_element(Coordinates(0, i)))) = 0.0f;
+ *(reinterpret_cast<float *>(output_boxes->ptr_to_element(Coordinates(3, i)))) = 0.0f;
+ *(reinterpret_cast<float *>(output_boxes->ptr_to_element(Coordinates(2, i)))) = 0.0f;
+ *(reinterpret_cast<float *>(output_classes->ptr_to_element(Coordinates(i)))) = 0.0f;
+ *(reinterpret_cast<float *>(output_scores->ptr_to_element(Coordinates(i)))) = 0.0f;
+ }
+ *(reinterpret_cast<float *>(num_detection->ptr_to_element(Coordinates(0)))) = num_output;
+}
+} // namespace
+
+CPPDetectionPostProcessLayer::CPPDetectionPostProcessLayer(std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_group(std::move(memory_manager)), _nms(), _input_box_encoding(nullptr), _input_scores(nullptr), _input_anchors(nullptr), _output_boxes(nullptr), _output_classes(nullptr),
+ _output_scores(nullptr), _num_detection(nullptr), _info(), _num_boxes(), _num_classes_with_background(), _num_max_detected_boxes(), _decoded_boxes(), _decoded_scores(), _selected_indices(),
+ _class_scores(), _input_scores_to_use(nullptr), _result_idx_boxes_after_nms(), _result_classes_after_nms(), _result_scores_after_nms(), _sorted_indices(), _box_scores()
+{
+}
+
+void CPPDetectionPostProcessLayer::configure(const ITensor *input_box_encoding, const ITensor *input_scores, const ITensor *input_anchors,
+ ITensor *output_boxes, ITensor *output_classes, ITensor *output_scores, ITensor *num_detection, DetectionPostProcessLayerInfo info)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input_box_encoding, input_scores, input_anchors, output_boxes, output_classes, output_scores);
+ _num_max_detected_boxes = info.max_detections() * info.max_classes_per_detection();
+
+ auto_init_if_empty(*output_boxes->info(), TensorInfo(TensorShape(_kNumCoordBox, _num_max_detected_boxes, _kBatchSize), 1, DataType::F32));
+ auto_init_if_empty(*output_classes->info(), TensorInfo(TensorShape(_num_max_detected_boxes, _kBatchSize), 1, DataType::F32));
+ auto_init_if_empty(*output_scores->info(), TensorInfo(TensorShape(_num_max_detected_boxes, _kBatchSize), 1, DataType::F32));
+ auto_init_if_empty(*num_detection->info(), TensorInfo(TensorShape(1U), 1, DataType::F32));
+
+ // Perform validation step
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input_box_encoding->info(), input_scores->info(), input_anchors->info(), output_boxes->info(), output_classes->info(), output_scores->info(),
+ num_detection->info(),
+ info, _kBatchSize, _kNumCoordBox));
+
+ _input_box_encoding = input_box_encoding;
+ _input_scores = input_scores;
+ _input_anchors = input_anchors;
+ _output_boxes = output_boxes;
+ _output_classes = output_classes;
+ _output_scores = output_scores;
+ _num_detection = num_detection;
+ _info = info;
+ _num_boxes = input_box_encoding->info()->dimension(1);
+ _num_classes_with_background = _input_scores->info()->dimension(0);
+
+ auto_init_if_empty(*_decoded_boxes.info(), TensorInfo(TensorShape(_kNumCoordBox, _input_box_encoding->info()->dimension(1), _kBatchSize), 1, DataType::F32));
+ auto_init_if_empty(*_decoded_scores.info(), TensorInfo(TensorShape(_input_scores->info()->dimension(0), _input_scores->info()->dimension(1), _kBatchSize), 1, DataType::F32));
+ auto_init_if_empty(*_selected_indices.info(), TensorInfo(TensorShape(info.max_detections()), 1, DataType::S32));
+
+ const unsigned int num_classes_per_box = std::min(info.max_classes_per_detection(), info.num_classes());
+ auto_init_if_empty(*_class_scores.info(), TensorInfo(info.use_regular_nms() ? TensorShape(_num_boxes) : TensorShape(_num_boxes * num_classes_per_box), 1, DataType::F32));
+
+ _input_scores_to_use = is_data_type_quantized(input_box_encoding->info()->data_type()) ? &_decoded_scores : _input_scores;
+
+ // Manage intermediate buffers
+ _memory_group.manage(&_decoded_boxes);
+ _memory_group.manage(&_decoded_scores);
+ _memory_group.manage(&_selected_indices);
+ _memory_group.manage(&_class_scores);
+ _nms.configure(&_decoded_boxes, &_class_scores, &_selected_indices, info.use_regular_nms() ? info.detection_per_class() : info.max_detections(), info.nms_score_threshold(), info.iou_threshold());
+
+ // Allocate and reserve intermediate tensors and vectors
+ _decoded_boxes.allocator()->allocate();
+ _decoded_scores.allocator()->allocate();
+ _selected_indices.allocator()->allocate();
+ _class_scores.allocator()->allocate();
+
+ if(info.use_regular_nms())
+ {
+ _result_idx_boxes_after_nms.reserve(_info.detection_per_class() * _info.num_classes());
+ _result_classes_after_nms.reserve(_info.detection_per_class() * _info.num_classes());
+ _result_scores_after_nms.reserve(_info.detection_per_class() * _info.num_classes());
+ }
+ else
+ {
+ _result_scores_after_nms.reserve(num_classes_per_box * _num_boxes);
+ _result_classes_after_nms.reserve(num_classes_per_box * _num_boxes);
+ _result_scores_after_nms.reserve(num_classes_per_box * _num_boxes);
+ _box_scores.reserve(_num_boxes);
+ }
+ _sorted_indices.resize(info.use_regular_nms() ? info.max_detections() : info.num_classes());
+}
+
+Status CPPDetectionPostProcessLayer::validate(const ITensorInfo *input_box_encoding, const ITensorInfo *input_class_score, const ITensorInfo *input_anchors,
+ ITensorInfo *output_boxes, ITensorInfo *output_classes, ITensorInfo *output_scores, ITensorInfo *num_detection, DetectionPostProcessLayerInfo info)
+{
+ constexpr unsigned int kBatchSize = 1;
+ constexpr unsigned int kNumCoordBox = 4;
+ const TensorInfo _decoded_boxes_info = TensorInfo(TensorShape(kNumCoordBox, input_box_encoding->dimension(1)), 1, DataType::F32);
+ const TensorInfo _decoded_scores_info = TensorInfo(TensorShape(input_box_encoding->dimension(1)), 1, DataType::F32);
+ const TensorInfo _selected_indices_info = TensorInfo(TensorShape(info.max_detections()), 1, DataType::S32);
+
+ ARM_COMPUTE_RETURN_ON_ERROR(CPPNonMaximumSuppression::validate(&_decoded_boxes_info, &_decoded_scores_info, &_selected_indices_info, info.max_detections(), info.nms_score_threshold(),
+ info.iou_threshold()));
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input_box_encoding, input_class_score, input_anchors, output_boxes, output_classes, output_scores, num_detection, info, kBatchSize, kNumCoordBox));
+
+ return Status{};
+}
+
+void CPPDetectionPostProcessLayer::run()
+{
+ const unsigned int num_classes = _info.num_classes();
+ const unsigned int max_detections = _info.max_detections();
+
+ DecodeCenterSizeBoxes(_input_box_encoding, _input_anchors, _info, &_decoded_boxes);
+
+ // Decode scores if necessary
+ if(is_data_type_quantized(_input_box_encoding->info()->data_type()))
+ {
+ for(unsigned int idx_c = 0; idx_c < _num_classes_with_background; ++idx_c)
+ {
+ for(unsigned int idx_b = 0; idx_b < _num_boxes; ++idx_b)
+ {
+ *(reinterpret_cast<float *>(_decoded_scores.ptr_to_element(Coordinates(idx_c, idx_b)))) =
+ dequantize_qasymm8(*(reinterpret_cast<qasymm8_t *>(_input_scores->ptr_to_element(Coordinates(idx_c, idx_b)))), _input_scores->info()->quantization_info());
+ }
+ }
+ }
+ // Regular NMS
+ if(_info.use_regular_nms())
+ {
+ for(unsigned int c = 0; c < num_classes; ++c)
+ {
+ // For each boxes get scores of the boxes for the class c
+ for(unsigned int i = 0; i < _num_boxes; ++i)
+ {
+ *(reinterpret_cast<float *>(_class_scores.ptr_to_element(Coordinates(i)))) =
+ *(reinterpret_cast<float *>(_input_scores_to_use->ptr_to_element(Coordinates(c + 1, i)))); // i * _num_classes_with_background + c + 1
+ }
+ _nms.run();
+
+ for(unsigned int i = 0; i < _info.detection_per_class(); ++i)
+ {
+ const auto selected_index = *(reinterpret_cast<int *>(_selected_indices.ptr_to_element(Coordinates(i))));
+ if(selected_index == -1)
+ {
+ // Nms will return -1 for all the last M-elements not valid
+ continue;
+ }
+ _result_idx_boxes_after_nms.emplace_back(selected_index);
+ _result_scores_after_nms.emplace_back((reinterpret_cast<float *>(_class_scores.buffer()))[selected_index]);
+ _result_classes_after_nms.emplace_back(c);
+ }
+ }
+
+ // We select the max detection numbers of the highest score of all classes
+ const auto num_selected = _result_idx_boxes_after_nms.size();
+ const auto num_output = std::min<unsigned int>(max_detections, num_selected);
+
+ // Sort selected indices based on result scores
+ std::iota(_sorted_indices.begin(), _sorted_indices.end(), 0);
+ std::partial_sort(_sorted_indices.data(),
+ _sorted_indices.data() + num_output,
+ _sorted_indices.data() + num_selected,
+ [&](unsigned int first, unsigned int second)
+ {
+
+ return _result_scores_after_nms[first] > _result_scores_after_nms[second];
+ });
+
+ SaveOutputs(&_decoded_boxes, _result_idx_boxes_after_nms, _result_scores_after_nms, _result_classes_after_nms,
+ _sorted_indices, num_output, max_detections, _output_boxes, _output_classes, _output_scores, _num_detection);
+ }
+ // Fast NMS
+ else
+ {
+ const unsigned int num_classes_per_box = std::min<unsigned int>(_info.max_classes_per_detection(), _info.num_classes());
+ for(unsigned int b = 0, index = 0; b < _num_boxes; ++b)
+ {
+ _box_scores.clear();
+ _sorted_indices.clear();
+ for(unsigned int c = 0; c < num_classes; ++c)
+ {
+ _box_scores.emplace_back(*(reinterpret_cast<float *>(_input_scores_to_use->ptr_to_element(Coordinates(c + 1, b)))));
+ _sorted_indices.push_back(c);
+ }
+ std::partial_sort(_sorted_indices.data(),
+ _sorted_indices.data() + num_classes_per_box,
+ _sorted_indices.data() + num_classes,
+ [&](unsigned int first, unsigned int second)
+ {
+ return _box_scores[first] > _box_scores[second];
+ });
+
+ for(unsigned int i = 0; i < num_classes_per_box; ++i, ++index)
+ {
+ const float score_to_add = _box_scores[_sorted_indices[i]];
+ *(reinterpret_cast<float *>(_class_scores.ptr_to_element(Coordinates(index)))) = score_to_add;
+ _result_scores_after_nms.emplace_back(score_to_add);
+ _result_idx_boxes_after_nms.emplace_back(b);
+ _result_classes_after_nms.emplace_back(_sorted_indices[i]);
+ }
+ }
+
+ // Run NMS
+ _nms.run();
+
+ _sorted_indices.clear();
+ for(unsigned int i = 0; i < max_detections; ++i)
+ {
+ // NMS returns M valid indices, the not valid tail is filled with -1
+ if(*(reinterpret_cast<int *>(_selected_indices.ptr_to_element(Coordinates(i)))) == -1)
+ {
+ // Nms will return -1 for all the last M-elements not valid
+ break;
+ }
+ _sorted_indices.emplace_back(*(reinterpret_cast<int *>(_selected_indices.ptr_to_element(Coordinates(i)))));
+ }
+ // We select the max detection numbers of the highest score of all classes
+ const auto num_output = std::min<unsigned int>(_info.max_detections(), _sorted_indices.size());
+
+ SaveOutputs(&_decoded_boxes, _result_idx_boxes_after_nms, _result_scores_after_nms, _result_classes_after_nms,
+ _sorted_indices, num_output, max_detections, _output_boxes, _output_classes, _output_scores, _num_detection);
+ }
+}
+} // namespace arm_compute \ No newline at end of file
diff --git a/tests/validation/CPP/DetectionPostProcessLayer.cpp b/tests/validation/CPP/DetectionPostProcessLayer.cpp
new file mode 100644
index 0000000000..51f3452b3d
--- /dev/null
+++ b/tests/validation/CPP/DetectionPostProcessLayer.cpp
@@ -0,0 +1,390 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CPP/functions/CPPDetectionPostProcessLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+template <typename U, typename T>
+inline void fill_tensor(U &&tensor, const std::vector<T> &v)
+{
+ std::memcpy(tensor.data(), v.data(), sizeof(T) * v.size());
+}
+template <typename U, typename T>
+inline void quantize_and_fill_tensor(U &&tensor, const std::vector<T> &v)
+{
+ QuantizationInfo qi = tensor.quantization_info();
+ std::vector<uint8_t> quantized;
+ quantized.reserve(v.size());
+ for(auto elem : v)
+ {
+ quantized.emplace_back(quantize_qasymm8(elem, qi));
+ }
+ std::memcpy(tensor.data(), quantized.data(), sizeof(uint8_t) * quantized.size());
+}
+inline QuantizationInfo qinfo_scaleoffset_from_minmax(const float min, const float max)
+{
+ int offset = 0;
+ float scale = 0;
+ const uint8_t qmin = std::numeric_limits<uint8_t>::min();
+ const uint8_t qmax = std::numeric_limits<uint8_t>::max();
+ const float f_qmin = qmin;
+ const float f_qmax = qmax;
+
+ // Continue only if [min,max] is a valid range and not a point
+ if(min != max)
+ {
+ scale = (max - min) / (f_qmax - f_qmin);
+ const float offset_from_min = f_qmin - min / scale;
+ const float offset_from_max = f_qmax - max / scale;
+
+ const float offset_from_min_error = std::abs(f_qmin) + std::abs(min / scale);
+ const float offset_from_max_error = std::abs(f_qmax) + std::abs(max / scale);
+ const float f_offset = offset_from_min_error < offset_from_max_error ? offset_from_min : offset_from_max;
+
+ uint8_t uint8_offset = 0;
+ if(f_offset < f_qmin)
+ {
+ uint8_offset = qmin;
+ }
+ else if(f_offset > f_qmax)
+ {
+ uint8_offset = qmax;
+ }
+ else
+ {
+ uint8_offset = static_cast<uint8_t>(std::round(f_offset));
+ }
+ offset = uint8_offset;
+ }
+ return QuantizationInfo(scale, offset);
+}
+
+inline void base_test_case(DetectionPostProcessLayerInfo info, DataType data_type, const SimpleTensor<float> &expected_output_boxes,
+ const SimpleTensor<float> &expected_output_classes, const SimpleTensor<float> &expected_output_scores, const SimpleTensor<float> &expected_num_detection,
+ AbsoluteTolerance<float> tolerance_boxes = AbsoluteTolerance<float>(0.1f), AbsoluteTolerance<float> tolerance_others = AbsoluteTolerance<float>(0.1f))
+{
+ Tensor box_encoding = create_tensor<Tensor>(TensorShape(4U, 6U, 1U), data_type, 1, qinfo_scaleoffset_from_minmax(-1.0f, 1.0f));
+ Tensor class_prediction = create_tensor<Tensor>(TensorShape(3U, 6U, 1U), data_type, 1, qinfo_scaleoffset_from_minmax(0.0f, 1.0f));
+ Tensor anchors = create_tensor<Tensor>(TensorShape(4U, 6U), data_type, 1, qinfo_scaleoffset_from_minmax(0.0f, 100.5f));
+
+ box_encoding.allocator()->allocate();
+ class_prediction.allocator()->allocate();
+ anchors.allocator()->allocate();
+
+ std::vector<float> box_encoding_vector =
+ {
+ 0.0f, 1.0f, 0.0f, 0.0f,
+ 0.0f, -1.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 1.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f
+ };
+ std::vector<float> class_prediction_vector =
+ {
+ 0.0f, 0.7f, 0.68f,
+ 0.0f, 0.6f, 0.5f,
+ 0.0f, 0.9f, 0.83f,
+ 0.0f, 0.91f, 0.97f,
+ 0.0f, 0.5f, 0.4f,
+ 0.0f, 0.31f, 0.22f
+ };
+ std::vector<float> anchors_vector =
+ {
+ 0.4f, 0.4f, 1.1f, 1.1f,
+ 0.4f, 0.4f, 1.1f, 1.1f,
+ 0.4f, 0.4f, 1.1f, 1.1f,
+ 0.4f, 10.4f, 1.1f, 1.1f,
+ 0.4f, 10.4f, 1.1f, 1.1f,
+ 0.4f, 100.4f, 1.1f, 1.1f
+ };
+
+ // Fill the tensors with random pre-generated values
+ if(data_type == DataType::F32)
+ {
+ fill_tensor(Accessor(box_encoding), box_encoding_vector);
+ fill_tensor(Accessor(class_prediction), class_prediction_vector);
+ fill_tensor(Accessor(anchors), anchors_vector);
+ }
+ else
+ {
+ quantize_and_fill_tensor(Accessor(box_encoding), box_encoding_vector);
+ quantize_and_fill_tensor(Accessor(class_prediction), class_prediction_vector);
+ quantize_and_fill_tensor(Accessor(anchors), anchors_vector);
+ }
+
+ // Determine the output through the CPP kernel
+ Tensor output_boxes;
+ Tensor output_classes;
+ Tensor output_scores;
+ Tensor num_detection;
+ CPPDetectionPostProcessLayer detection;
+ detection.configure(&box_encoding, &class_prediction, &anchors, &output_boxes, &output_classes, &output_scores, &num_detection, info);
+
+ output_boxes.allocator()->allocate();
+ output_classes.allocator()->allocate();
+ output_scores.allocator()->allocate();
+ num_detection.allocator()->allocate();
+
+ // Run the kernel
+ detection.run();
+
+ // Validate against the expected output
+ // Validate output boxes
+ validate(Accessor(output_boxes), expected_output_boxes, tolerance_boxes);
+ // Validate detection classes
+ validate(Accessor(output_classes), expected_output_classes, tolerance_others);
+ // Validate detection scores
+ validate(Accessor(output_scores), expected_output_scores, tolerance_others);
+ // Validate num detections
+ validate(Accessor(num_detection), expected_num_detection, tolerance_others);
+}
+} // namespace
+
+TEST_SUITE(CPP)
+TEST_SUITE(DetectionPostProcessLayer)
+
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zip(zip(
+ framework::dataset::make("BoxEncodingsInfo", { TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(4U, 10U, 3U), 1, DataType::F32), // Mismatching batch_size
+ TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::S8), // Unsupported data type
+ TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32), // Wrong Detection Info
+ TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32), // Wrong boxes dimensions
+ TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::QASYMM8)}), // Wrong score dimension
+ framework::dataset::make("ClassPredsInfo",{ TensorInfo(TensorShape(3U ,10U), 1, DataType::F32),
+ TensorInfo(TensorShape(3U ,10U), 1, DataType::F32),
+ TensorInfo(TensorShape(3U ,10U), 1, DataType::F32),
+ TensorInfo(TensorShape(3U ,10U), 1, DataType::F32),
+ TensorInfo(TensorShape(3U ,10U), 1, DataType::F32),
+ TensorInfo(TensorShape(3U ,10U), 1, DataType::QASYMM8)})),
+ framework::dataset::make("AnchorsInfo",{ TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(4U, 10U, 1U), 1, DataType::QASYMM8)})),
+ framework::dataset::make("OutputBoxInfo", { TensorInfo(TensorShape(4U, 3U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(4U, 3U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(4U, 3U, 1U), 1, DataType::S8),
+ TensorInfo(TensorShape(4U, 3U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(1U, 5U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(4U, 3U, 1U), 1, DataType::F32)})),
+ framework::dataset::make("OuputClassesInfo",{ TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(6U, 1U), 1, DataType::F32)})),
+ framework::dataset::make("OutputScoresInfo",{ TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(3U, 1U), 1, DataType::F32),
+ TensorInfo(TensorShape(6U, 1U), 1, DataType::F32)})),
+ framework::dataset::make("NumDetectionsInfo",{ TensorInfo(TensorShape(1U), 1, DataType::F32),
+ TensorInfo(TensorShape(1U), 1, DataType::F32),
+ TensorInfo(TensorShape(1U), 1, DataType::F32),
+ TensorInfo(TensorShape(1U), 1, DataType::F32),
+ TensorInfo(TensorShape(1U), 1, DataType::F32),
+ TensorInfo(TensorShape(1U), 1, DataType::F32)})),
+ framework::dataset::make("DetectionPostProcessLayerInfo",{ DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f}),
+ DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f}),
+ DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f}),
+ DetectionPostProcessLayerInfo(3, 1, 0.0f, 1.5f, 2, {0.0f,0.1f,0.1f,0.1f}),
+ DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f}),
+ DetectionPostProcessLayerInfo(3, 1, 0.0f, 0.5f, 2, {0.1f,0.1f,0.1f,0.1f})})),
+ framework::dataset::make("Expected", {true, false, false, false, false, false })),
+ box_encodings_info, classes_info, anchors_info, output_boxes_info, output_classes_info,output_scores_info, num_detection_info, detect_info, expected)
+{
+ const Status status = CPPDetectionPostProcessLayer::validate(&box_encodings_info.clone()->set_is_resizable(false),
+ &classes_info.clone()->set_is_resizable(false),
+ &anchors_info.clone()->set_is_resizable(false),
+ &output_boxes_info.clone()->set_is_resizable(false),
+ &output_classes_info.clone()->set_is_resizable(false),
+ &output_scores_info.clone()->set_is_resizable(false), &num_detection_info.clone()->set_is_resizable(false), detect_info);
+ ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+TEST_SUITE(F32)
+TEST_CASE(Float_general, framework::DatasetMode::ALL)
+{
+ DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
+ 0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/);
+ // Fill expected detection boxes
+ SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
+ fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
+ // Fill expected detection classes
+ SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
+ fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
+ // Fill expected detection scores
+ SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
+ fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
+ // Fill expected num detections
+ SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
+ fill_tensor(expected_num_detection, std::vector<float> { 3.f });
+ // Run base test
+ base_test_case(info, DataType::F32, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection);
+}
+
+TEST_CASE(Float_fast, framework::DatasetMode::ALL)
+{
+ DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
+ 0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/,
+ false /*use_regular_nms*/, 1 /*detections_per_class*/);
+
+ // Fill expected detection boxes
+ SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
+ fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
+ // Fill expected detection classes
+ SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
+ fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
+ // Fill expected detection scores
+ SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
+ fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
+ // Fill expected num detections
+ SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
+ fill_tensor(expected_num_detection, std::vector<float> { 3.f });
+
+ // Run base test
+ base_test_case(info, DataType::F32, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection);
+}
+
+TEST_CASE(Float_regular, framework::DatasetMode::ALL)
+{
+ DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
+ 0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/,
+ true /*use_regular_nms*/, 1 /*detections_per_class*/);
+
+ // Fill expected detection boxes
+ SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
+ fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, 9.85, 0.95, 10.95, 0.0f, 0.0f, 0.0f, 0.0f });
+ // Fill expected detection classes
+ SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
+ fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
+ // Fill expected detection scores
+ SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
+ fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.91f, 0.0f });
+ // Fill expected num detections
+ SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
+ fill_tensor(expected_num_detection, std::vector<float> { 2.f });
+
+ // Run test
+ base_test_case(info, DataType::F32, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection);
+}
+TEST_SUITE_END() // F32
+
+TEST_SUITE(QASYMM8)
+TEST_CASE(Quantized_general, framework::DatasetMode::ALL)
+{
+ DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
+ 0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/);
+
+ // Fill expected detection boxes
+ SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
+ fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
+ // Fill expected detection classes
+ SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
+ fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
+ // Fill expected detection scores
+ SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
+ fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
+ // Fill expected num detections
+ SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
+ fill_tensor(expected_num_detection, std::vector<float> { 3.f });
+ // Run test
+ base_test_case(info, DataType::QASYMM8, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection, AbsoluteTolerance<float>(0.3f));
+}
+
+TEST_CASE(Quantized_fast, framework::DatasetMode::ALL)
+{
+ DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
+ 0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/,
+ false /*use_regular_nms*/, 1 /*detections_per_class*/);
+
+ // Fill expected detection boxes
+ SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
+ fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, -0.15, 0.95, 0.95, -0.15, 99.85, 0.95, 100.95 });
+ // Fill expected detection classes
+ SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
+ fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
+ // Fill expected detection scores
+ SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
+ fill_tensor(expected_output_scores, std::vector<float> { 0.97f, 0.95f, 0.31f });
+ // Fill expected num detections
+ SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
+ fill_tensor(expected_num_detection, std::vector<float> { 3.f });
+
+ // Run base test
+ base_test_case(info, DataType::QASYMM8, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection, AbsoluteTolerance<float>(0.3f));
+}
+
+TEST_CASE(Quantized_regular, framework::DatasetMode::ALL)
+{
+ DetectionPostProcessLayerInfo info = DetectionPostProcessLayerInfo(3 /*max_detections*/, 1 /*max_classes_per_detection*/, 0.0 /*nms_score_threshold*/,
+ 0.5 /*nms_iou_threshold*/, 2 /*num_classes*/, { 11.0, 11.0, 6.0, 6.0 } /*scale*/,
+ true /*use_regular_nms*/, 1 /*detections_per_class*/);
+ // Fill expected detection boxes
+ SimpleTensor<float> expected_output_boxes(TensorShape(4U, 3U), DataType::F32);
+ fill_tensor(expected_output_boxes, std::vector<float> { -0.15, 9.85, 0.95, 10.95, -0.15, 9.85, 0.95, 10.95, 0.0f, 0.0f, 0.0f, 0.0f });
+ // Fill expected detection classes
+ SimpleTensor<float> expected_output_classes(TensorShape(3U), DataType::F32);
+ fill_tensor(expected_output_classes, std::vector<float> { 1.0f, 0.0f, 0.0f });
+ // Fill expected detection scores
+ SimpleTensor<float> expected_output_scores(TensorShape(3U), DataType::F32);
+ fill_tensor(expected_output_scores, std::vector<float> { 0.95f, 0.91f, 0.0f });
+ // Fill expected num detections
+ SimpleTensor<float> expected_num_detection(TensorShape(1U), DataType::F32);
+ fill_tensor(expected_num_detection, std::vector<float> { 2.f });
+
+ // Run test
+ base_test_case(info, DataType::QASYMM8, expected_output_boxes, expected_output_classes, expected_output_scores, expected_num_detection, AbsoluteTolerance<float>(0.3f));
+}
+
+TEST_SUITE_END() // QASYMM8
+
+TEST_SUITE_END() // DetectionPostProcessLayer
+TEST_SUITE_END() // CPP
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/utils/GraphUtils.cpp b/utils/GraphUtils.cpp
index dad9aed6a5..00165cd6c2 100644
--- a/utils/GraphUtils.cpp
+++ b/utils/GraphUtils.cpp
@@ -140,12 +140,14 @@ bool DummyAccessor::access_tensor(ITensor &tensor)
return ret;
}
-NumPyAccessor::NumPyAccessor(std::string npy_path, TensorShape shape, DataType data_type, std::ostream &output_stream)
+NumPyAccessor::NumPyAccessor(std::string npy_path, TensorShape shape, DataType data_type, DataLayout data_layout, std::ostream &output_stream)
: _npy_tensor(), _filename(std::move(npy_path)), _output_stream(output_stream)
{
- NumPyBinLoader loader(_filename);
+ NumPyBinLoader loader(_filename, data_layout);
TensorInfo info(shape, 1, data_type);
+ info.set_data_layout(data_layout);
+
_npy_tensor.allocator()->init(info);
_npy_tensor.allocator()->allocate();
diff --git a/utils/GraphUtils.h b/utils/GraphUtils.h
index fe19eb3196..3417135f17 100644
--- a/utils/GraphUtils.h
+++ b/utils/GraphUtils.h
@@ -145,9 +145,10 @@ public:
* @param[in] npy_path Path to npy file.
* @param[in] shape Shape of the numpy tensor data.
* @param[in] data_type DataType of the numpy tensor data.
+ * @param[in] data_layout (Optional) DataLayout of the numpy tensor data.
* @param[out] output_stream (Optional) Output stream
*/
- NumPyAccessor(std::string npy_path, TensorShape shape, DataType data_type, std::ostream &output_stream = std::cout);
+ NumPyAccessor(std::string npy_path, TensorShape shape, DataType data_type, DataLayout data_layout = DataLayout::NCHW, std::ostream &output_stream = std::cout);
/** Allow instances of this class to be move constructed */
NumPyAccessor(NumPyAccessor &&) = default;
/** Prevent instances of this class from being copied (As this class contains pointers) */
@@ -567,11 +568,13 @@ inline std::unique_ptr<graph::ITensorAccessor> get_detection_output_accessor(con
* @param[in] npy_path Path to npy file.
* @param[in] shape Shape of the numpy tensor data.
* @param[in] data_type DataType of the numpy tensor data.
+ * @param[in] data_layout DataLayout of the numpy tensor data.
* @param[out] output_stream (Optional) Output stream
*
* @return An appropriate tensor accessor
*/
-inline std::unique_ptr<graph::ITensorAccessor> get_npy_output_accessor(const std::string &npy_path, TensorShape shape, DataType data_type, std::ostream &output_stream = std::cout)
+inline std::unique_ptr<graph::ITensorAccessor> get_npy_output_accessor(const std::string &npy_path, TensorShape shape, DataType data_type, DataLayout data_layout = DataLayout::NCHW,
+ std::ostream &output_stream = std::cout)
{
if(npy_path.empty())
{
@@ -579,7 +582,7 @@ inline std::unique_ptr<graph::ITensorAccessor> get_npy_output_accessor(const std
}
else
{
- return arm_compute::support::cpp14::make_unique<NumPyAccessor>(npy_path, shape, data_type, output_stream);
+ return arm_compute::support::cpp14::make_unique<NumPyAccessor>(npy_path, shape, data_type, data_layout, output_stream);
}
}
diff --git a/utils/TypePrinter.h b/utils/TypePrinter.h
index 6ba6f45f8c..f51d2368e1 100644
--- a/utils/TypePrinter.h
+++ b/utils/TypePrinter.h
@@ -2059,6 +2059,43 @@ inline std::string to_string(const DetectionOutputLayerInfo &detection_info)
str << detection_info;
return str.str();
}
+/** Formatted output of the DetectionPostProcessLayerInfo type.
+ *
+ * @param[out] os Output stream
+ * @param[in] detection_info Type to output
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, const DetectionPostProcessLayerInfo &detection_info)
+{
+ os << "{MaxDetections=" << detection_info.max_detections() << ","
+ << "MaxClassesPerDetection=" << detection_info.max_classes_per_detection() << ","
+ << "NmsScoreThreshold=" << detection_info.nms_score_threshold() << ","
+ << "NmsIouThreshold=" << detection_info.iou_threshold() << ","
+ << "NumClasses=" << detection_info.num_classes() << ","
+ << "ScaleValue_y=" << detection_info.scale_value_y() << ","
+ << "ScaleValue_x=" << detection_info.scale_value_x() << ","
+ << "ScaleValue_h=" << detection_info.scale_value_h() << ","
+ << "ScaleValue_w=" << detection_info.scale_value_w() << ","
+ << "UseRegularNms=" << detection_info.use_regular_nms() << ","
+ << "DetectionPerClass=" << detection_info.detection_per_class()
+ << "}";
+
+ return os;
+}
+
+/** Formatted output of the DetectionPostProcessLayerInfo type.
+ *
+ * @param[in] detection_info Type to output
+ *
+ * @return Formatted string.
+ */
+inline std::string to_string(const DetectionPostProcessLayerInfo &detection_info)
+{
+ std::stringstream str;
+ str << detection_info;
+ return str.str();
+}
/** Formatted output of the DetectionWindow type.
*
* @param[in] detection_window Type to output
diff --git a/utils/Utils.h b/utils/Utils.h
index ba10d7c803..cc5dfbabc2 100644
--- a/utils/Utils.h
+++ b/utils/Utils.h
@@ -616,10 +616,10 @@ void save_to_ppm(T &tensor, const std::string &ppm_filename)
* @param[in] npy_filename Filename of the file to create.
* @param[in] fortran_order If true, save matrix in fortran order.
*/
-template <typename T>
+template <typename T, typename U = float>
void save_to_npy(T &tensor, const std::string &npy_filename, bool fortran_order)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(&tensor, arm_compute::DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(&tensor, arm_compute::DataType::F32, arm_compute::DataType::QASYMM8);
std::ofstream fs;
try
@@ -637,33 +637,25 @@ void save_to_npy(T &tensor, const std::string &npy_filename, bool fortran_order)
// Map buffer if creating a CLTensor
map(tensor, true);
- switch(tensor.info()->data_type())
- {
- case arm_compute::DataType::F32:
- {
- std::vector<float> tmp; /* Used only to get the typestring */
- npy::Typestring typestring_o{ tmp };
- std::string typestring = typestring_o.str();
+ using typestring_type = typename std::conditional<std::is_floating_point<U>::value, float, qasymm8_t>::type;
- std::ofstream stream(npy_filename, std::ofstream::binary);
- npy::write_header(stream, typestring, fortran_order, shape);
+ std::vector<typestring_type> tmp; /* Used only to get the typestring */
+ npy::Typestring typestring_o{ tmp };
+ std::string typestring = typestring_o.str();
- arm_compute::Window window;
- window.use_tensor_dimensions(tensor.info()->tensor_shape());
+ std::ofstream stream(npy_filename, std::ofstream::binary);
+ npy::write_header(stream, typestring, fortran_order, shape);
- arm_compute::Iterator in(&tensor, window);
+ arm_compute::Window window;
+ window.use_tensor_dimensions(tensor.info()->tensor_shape());
- arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
- {
- stream.write(reinterpret_cast<const char *>(in.ptr()), sizeof(float));
- },
- in);
+ arm_compute::Iterator in(&tensor, window);
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Unsupported format");
- }
+ arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
+ {
+ stream.write(reinterpret_cast<const char *>(in.ptr()), sizeof(typestring_type));
+ },
+ in);
// Unmap buffer if creating a CLTensor
unmap(tensor);