aboutsummaryrefslogtreecommitdiff
path: root/utils
diff options
context:
space:
mode:
authorIsabella Gottardi <isabella.gottardi@arm.com>2019-01-08 13:48:44 +0000
committerIsabella Gottardi <isabella.gottardi@arm.com>2019-08-06 07:58:16 +0000
commita7acb3cbabeb66ce647684466a04c96b2963c9c9 (patch)
tree7988b75372c8ad1dfa3c8d028ab3a603a5e5a047 /utils
parent4746326ecb075dcfa123aaa8b38de5ec3e534b60 (diff)
downloadComputeLibrary-a7acb3cbabeb66ce647684466a04c96b2963c9c9.tar.gz
COMPMID-1849: Implement CPPDetectionPostProcessLayer
* Add DetectionPostProcessLayer * Add DetectionPostProcessLayer at the graph Change-Id: I7e56f6cffc26f112d26dfe74853085bb8ec7d849 Signed-off-by: Isabella Gottardi <isabella.gottardi@arm.com> Reviewed-on: https://review.mlplatform.org/c/1639 Reviewed-by: Giuseppe Rossini <giuseppe.rossini@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'utils')
-rw-r--r--utils/GraphUtils.cpp6
-rw-r--r--utils/GraphUtils.h9
-rw-r--r--utils/TypePrinter.h37
-rw-r--r--utils/Utils.h40
4 files changed, 63 insertions, 29 deletions
diff --git a/utils/GraphUtils.cpp b/utils/GraphUtils.cpp
index dad9aed6a5..00165cd6c2 100644
--- a/utils/GraphUtils.cpp
+++ b/utils/GraphUtils.cpp
@@ -140,12 +140,14 @@ bool DummyAccessor::access_tensor(ITensor &tensor)
return ret;
}
-NumPyAccessor::NumPyAccessor(std::string npy_path, TensorShape shape, DataType data_type, std::ostream &output_stream)
+NumPyAccessor::NumPyAccessor(std::string npy_path, TensorShape shape, DataType data_type, DataLayout data_layout, std::ostream &output_stream)
: _npy_tensor(), _filename(std::move(npy_path)), _output_stream(output_stream)
{
- NumPyBinLoader loader(_filename);
+ NumPyBinLoader loader(_filename, data_layout);
TensorInfo info(shape, 1, data_type);
+ info.set_data_layout(data_layout);
+
_npy_tensor.allocator()->init(info);
_npy_tensor.allocator()->allocate();
diff --git a/utils/GraphUtils.h b/utils/GraphUtils.h
index fe19eb3196..3417135f17 100644
--- a/utils/GraphUtils.h
+++ b/utils/GraphUtils.h
@@ -145,9 +145,10 @@ public:
* @param[in] npy_path Path to npy file.
* @param[in] shape Shape of the numpy tensor data.
* @param[in] data_type DataType of the numpy tensor data.
+ * @param[in] data_layout (Optional) DataLayout of the numpy tensor data.
* @param[out] output_stream (Optional) Output stream
*/
- NumPyAccessor(std::string npy_path, TensorShape shape, DataType data_type, std::ostream &output_stream = std::cout);
+ NumPyAccessor(std::string npy_path, TensorShape shape, DataType data_type, DataLayout data_layout = DataLayout::NCHW, std::ostream &output_stream = std::cout);
/** Allow instances of this class to be move constructed */
NumPyAccessor(NumPyAccessor &&) = default;
/** Prevent instances of this class from being copied (As this class contains pointers) */
@@ -567,11 +568,13 @@ inline std::unique_ptr<graph::ITensorAccessor> get_detection_output_accessor(con
* @param[in] npy_path Path to npy file.
* @param[in] shape Shape of the numpy tensor data.
* @param[in] data_type DataType of the numpy tensor data.
+ * @param[in] data_layout DataLayout of the numpy tensor data.
* @param[out] output_stream (Optional) Output stream
*
* @return An appropriate tensor accessor
*/
-inline std::unique_ptr<graph::ITensorAccessor> get_npy_output_accessor(const std::string &npy_path, TensorShape shape, DataType data_type, std::ostream &output_stream = std::cout)
+inline std::unique_ptr<graph::ITensorAccessor> get_npy_output_accessor(const std::string &npy_path, TensorShape shape, DataType data_type, DataLayout data_layout = DataLayout::NCHW,
+ std::ostream &output_stream = std::cout)
{
if(npy_path.empty())
{
@@ -579,7 +582,7 @@ inline std::unique_ptr<graph::ITensorAccessor> get_npy_output_accessor(const std
}
else
{
- return arm_compute::support::cpp14::make_unique<NumPyAccessor>(npy_path, shape, data_type, output_stream);
+ return arm_compute::support::cpp14::make_unique<NumPyAccessor>(npy_path, shape, data_type, data_layout, output_stream);
}
}
diff --git a/utils/TypePrinter.h b/utils/TypePrinter.h
index 6ba6f45f8c..f51d2368e1 100644
--- a/utils/TypePrinter.h
+++ b/utils/TypePrinter.h
@@ -2059,6 +2059,43 @@ inline std::string to_string(const DetectionOutputLayerInfo &detection_info)
str << detection_info;
return str.str();
}
+/** Formatted output of the DetectionPostProcessLayerInfo type.
+ *
+ * @param[out] os Output stream
+ * @param[in] detection_info Type to output
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, const DetectionPostProcessLayerInfo &detection_info)
+{
+ os << "{MaxDetections=" << detection_info.max_detections() << ","
+ << "MaxClassesPerDetection=" << detection_info.max_classes_per_detection() << ","
+ << "NmsScoreThreshold=" << detection_info.nms_score_threshold() << ","
+ << "NmsIouThreshold=" << detection_info.iou_threshold() << ","
+ << "NumClasses=" << detection_info.num_classes() << ","
+ << "ScaleValue_y=" << detection_info.scale_value_y() << ","
+ << "ScaleValue_x=" << detection_info.scale_value_x() << ","
+ << "ScaleValue_h=" << detection_info.scale_value_h() << ","
+ << "ScaleValue_w=" << detection_info.scale_value_w() << ","
+ << "UseRegularNms=" << detection_info.use_regular_nms() << ","
+ << "DetectionPerClass=" << detection_info.detection_per_class()
+ << "}";
+
+ return os;
+}
+
+/** Formatted output of the DetectionPostProcessLayerInfo type.
+ *
+ * @param[in] detection_info Type to output
+ *
+ * @return Formatted string.
+ */
+inline std::string to_string(const DetectionPostProcessLayerInfo &detection_info)
+{
+ std::stringstream str;
+ str << detection_info;
+ return str.str();
+}
/** Formatted output of the DetectionWindow type.
*
* @param[in] detection_window Type to output
diff --git a/utils/Utils.h b/utils/Utils.h
index ba10d7c803..cc5dfbabc2 100644
--- a/utils/Utils.h
+++ b/utils/Utils.h
@@ -616,10 +616,10 @@ void save_to_ppm(T &tensor, const std::string &ppm_filename)
* @param[in] npy_filename Filename of the file to create.
* @param[in] fortran_order If true, save matrix in fortran order.
*/
-template <typename T>
+template <typename T, typename U = float>
void save_to_npy(T &tensor, const std::string &npy_filename, bool fortran_order)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(&tensor, arm_compute::DataType::F32);
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(&tensor, arm_compute::DataType::F32, arm_compute::DataType::QASYMM8);
std::ofstream fs;
try
@@ -637,33 +637,25 @@ void save_to_npy(T &tensor, const std::string &npy_filename, bool fortran_order)
// Map buffer if creating a CLTensor
map(tensor, true);
- switch(tensor.info()->data_type())
- {
- case arm_compute::DataType::F32:
- {
- std::vector<float> tmp; /* Used only to get the typestring */
- npy::Typestring typestring_o{ tmp };
- std::string typestring = typestring_o.str();
+ using typestring_type = typename std::conditional<std::is_floating_point<U>::value, float, qasymm8_t>::type;
- std::ofstream stream(npy_filename, std::ofstream::binary);
- npy::write_header(stream, typestring, fortran_order, shape);
+ std::vector<typestring_type> tmp; /* Used only to get the typestring */
+ npy::Typestring typestring_o{ tmp };
+ std::string typestring = typestring_o.str();
- arm_compute::Window window;
- window.use_tensor_dimensions(tensor.info()->tensor_shape());
+ std::ofstream stream(npy_filename, std::ofstream::binary);
+ npy::write_header(stream, typestring, fortran_order, shape);
- arm_compute::Iterator in(&tensor, window);
+ arm_compute::Window window;
+ window.use_tensor_dimensions(tensor.info()->tensor_shape());
- arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
- {
- stream.write(reinterpret_cast<const char *>(in.ptr()), sizeof(float));
- },
- in);
+ arm_compute::Iterator in(&tensor, window);
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Unsupported format");
- }
+ arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
+ {
+ stream.write(reinterpret_cast<const char *>(in.ptr()), sizeof(typestring_type));
+ },
+ in);
// Unmap buffer if creating a CLTensor
unmap(tensor);