aboutsummaryrefslogtreecommitdiff
path: root/utils/Utils.h
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2017-10-26 17:14:13 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commitcf3935ffd4c67d9396c2435a3a28d3a159753105 (patch)
treed657c5b9b6724c15fc55847d88ab27e1081c2f44 /utils/Utils.h
parent7362f0de41305eccb4b2b9b606647ffe318d32b7 (diff)
downloadComputeLibrary-cf3935ffd4c67d9396c2435a3a28d3a159753105.tar.gz
COMPMID-627 Create CL SGEMM example
Change-Id: I17478202348d34716d00c78e3cc21254c76b32e1 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/93304 Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'utils/Utils.h')
-rw-r--r--utils/Utils.h378
1 files changed, 334 insertions, 44 deletions
diff --git a/utils/Utils.h b/utils/Utils.h
index c88de0e16b..1f3d971917 100644
--- a/utils/Utils.h
+++ b/utils/Utils.h
@@ -30,6 +30,7 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/Window.h"
#include "arm_compute/runtime/Tensor.h"
+#include "libnpy/npy.hpp"
#include "support/ToolchainSupport.h"
#ifdef ARM_COMPUTE_CL
@@ -41,6 +42,10 @@
#include <cstring>
#include <fstream>
#include <iostream>
+#include <random>
+#include <string>
+#include <tuple>
+#include <vector>
namespace arm_compute
{
@@ -80,6 +85,66 @@ void draw_detection_rectangle(arm_compute::ITensor *tensor, const arm_compute::D
*/
std::tuple<unsigned int, unsigned int, int> parse_ppm_header(std::ifstream &fs);
+/** Parse the npy header from an input file stream. At the end of the execution,
+ * the file position pointer will be located at the first pixel stored in the npy file //TODO
+ *
+ * @param[in] fs Input file stream to parse
+ *
+ * @return The width and height stored in the header of the NPY file
+ */
+std::tuple<std::vector<unsigned long>, bool, std::string> parse_npy_header(std::ifstream &fs);
+
+/** Obtain numpy type string from DataType.
+ *
+ * @param[in] data_type Data type.
+ *
+ * @return numpy type string.
+ */
+inline std::string get_typestring(DataType data_type)
+{
+ // Check endianness
+ const unsigned int i = 1;
+ const char *c = reinterpret_cast<const char *>(&i);
+ std::string endianness;
+ if(*c == 1)
+ {
+ endianness = std::string("<");
+ }
+ else
+ {
+ endianness = std::string(">");
+ }
+ const std::string no_endianness("|");
+
+ switch(data_type)
+ {
+ case DataType::U8:
+ return no_endianness + "u" + support::cpp11::to_string(sizeof(uint8_t));
+ case DataType::S8:
+ return no_endianness + "i" + support::cpp11::to_string(sizeof(int8_t));
+ case DataType::U16:
+ return endianness + "u" + support::cpp11::to_string(sizeof(uint16_t));
+ case DataType::S16:
+ return endianness + "i" + support::cpp11::to_string(sizeof(int16_t));
+ case DataType::U32:
+ return endianness + "u" + support::cpp11::to_string(sizeof(uint32_t));
+ case DataType::S32:
+ return endianness + "i" + support::cpp11::to_string(sizeof(int32_t));
+ case DataType::U64:
+ return endianness + "u" + support::cpp11::to_string(sizeof(uint64_t));
+ case DataType::S64:
+ return endianness + "i" + support::cpp11::to_string(sizeof(int64_t));
+ case DataType::F32:
+ return endianness + "f" + support::cpp11::to_string(sizeof(float));
+ case DataType::F64:
+ return endianness + "f" + support::cpp11::to_string(sizeof(double));
+ case DataType::SIZET:
+ return endianness + "u" + support::cpp11::to_string(sizeof(size_t));
+ default:
+ ARM_COMPUTE_ERROR("NOT SUPPORTED!");
+ }
+}
+
/** Maps a tensor if needed
*
* @param[in] tensor Tensor to be mapped
@@ -350,6 +415,159 @@ private:
unsigned int _width, _height;
};
+class NPYLoader
+{
+public:
+ NPYLoader()
+ : _fs(), _shape(), _fortran_order(false), _typestring()
+ {
+ }
+
+ /** Open a NPY file and reads its metadata
+ *
+ * @param[in] npy_filename File to open
+ */
+ void open(const std::string &npy_filename)
+ {
+ ARM_COMPUTE_ERROR_ON(is_open());
+ try
+ {
+ _fs.exceptions(std::ifstream::failbit | std::ifstream::badbit);
+ _fs.open(npy_filename, std::ios::in | std::ios::binary);
+
+ std::tie(_shape, _fortran_order, _typestring) = parse_npy_header(_fs);
+ }
+ catch(const std::ifstream::failure &e)
+ {
+ ARM_COMPUTE_ERROR("Accessing %s: %s", npy_filename.c_str(), e.what());
+ }
+ }
+ /** Return true if a NPY file is currently open */
+ bool is_open()
+ {
+ return _fs.is_open();
+ }
+
+ /** Return true if a NPY file is in fortran order */
+ bool is_fortran()
+ {
+ return _fortran_order;
+ }
+
+ /** Initialise an image's metadata with the dimensions of the NPY file currently open
+ *
+ * @param[out] tensor Tensor to initialise
+ * @param[in] format Format to use for the image
+ */
+ template <typename T>
+ void init_tensor(T &tensor, arm_compute::Format format)
+ {
+ ARM_COMPUTE_ERROR_ON(!is_open());
+ ARM_COMPUTE_ERROR_ON(format != arm_compute::Format::F32);
+
+ // Use the size of the input NPY tensor
+ TensorShape shape;
+ shape.set_num_dimensions(_shape.size());
+ for(size_t i = 0; i < _shape.size(); ++i)
+ {
+ shape.set(i, _shape.at(i));
+ }
+
+ arm_compute::TensorInfo tensor_info(shape, format);
+ tensor.allocator()->init(tensor_info);
+ }
+
+ /** Fill a tensor with the content of the currently open NPY file.
+ *
+ * @note If the tensor is a CLTensor, the function maps and unmaps the tensor
+ *
+ * @param[in,out] tensor Tensor to fill (Must be allocated, and of matching dimensions with the opened NPY).
+ */
+ template <typename T>
+ void fill_tensor(T &tensor)
+ {
+ ARM_COMPUTE_ERROR_ON(!is_open());
+ ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(&tensor, arm_compute::Format::F32);
+ try
+ {
+ // Map buffer if creating a CLTensor
+ map(tensor, true);
+
+ // Check if the file is large enough to fill the tensor
+ const size_t current_position = _fs.tellg();
+ _fs.seekg(0, std::ios_base::end);
+ const size_t end_position = _fs.tellg();
+ _fs.seekg(current_position, std::ios_base::beg);
+
+ ARM_COMPUTE_ERROR_ON_MSG((end_position - current_position) < tensor.info()->tensor_shape().total_size() * tensor.info()->element_size(),
+ "Not enough data in file");
+ ARM_COMPUTE_UNUSED(end_position);
+
+ // Check if the typestring matches the given one
+ std::string expect_typestr = get_typestring(tensor.info()->data_type());
+ ARM_COMPUTE_ERROR_ON_MSG(_typestring != expect_typestr, "Typestrings mismatch");
+
+ // Validate tensor shape
+ ARM_COMPUTE_ERROR_ON_MSG(_shape.size() != tensor.shape().num_dimensions(), "Tensor ranks mismatch");
+ if(_fortran_order)
+ {
+ for(size_t i = 0; i < _shape.size(); ++i)
+ {
+ ARM_COMPUTE_ERROR_ON_MSG(tensor.shape()[i] != _shape[i], "Tensor dimensions mismatch");
+ }
+ }
+ else
+ {
+ for(size_t i = 0; i < _shape.size(); ++i)
+ {
+ ARM_COMPUTE_ERROR_ON_MSG(tensor.shape()[i] != _shape[_shape.size() - i - 1], "Tensor dimensions mismatch");
+ }
+ }
+
+ switch(tensor.info()->format())
+ {
+ case arm_compute::Format::F32:
+ {
+ // Read data
+ if(tensor.info()->padding().empty())
+ {
+ // If tensor has no padding read directly from stream.
+ _fs.read(reinterpret_cast<char *>(tensor.buffer()), tensor.info()->total_size());
+ }
+ else
+ {
+ // If tensor has padding accessing tensor elements through execution window.
+ Window window;
+ window.use_tensor_dimensions(tensor.info()->tensor_shape());
+
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ _fs.read(reinterpret_cast<char *>(tensor.ptr_to_element(id)), tensor.info()->element_size());
+ });
+ }
+
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Unsupported format");
+ }
+
+ // Unmap buffer if creating a CLTensor
+ unmap(tensor);
+ }
+ catch(const std::ifstream::failure &e)
+ {
+ ARM_COMPUTE_ERROR("Loading NPY file: %s", e.what());
+ }
+ }
+
+private:
+ std::ifstream _fs;
+ std::vector<unsigned long> _shape;
+ bool _fortran_order;
+ std::string _typestring;
+};
+
/** Template helper function to save a tensor image to a PPM file.
*
* @note Only U8 and RGB888 formats supported.
@@ -430,6 +648,83 @@ void save_to_ppm(T &tensor, const std::string &ppm_filename)
}
}
+/** Template helper function to save a tensor image to a NPY file.
+ *
+ * @note Only F32 format supported.
+ * @note Only works with 2D tensors.
+ * @note If the input tensor is a CLTensor, the function maps and unmaps the image
+ *
+ * @param[in] tensor The tensor to save as NPY file
+ * @param[in] npy_filename Filename of the file to create.
+ * @param[in] fortran_order If true, save matrix in fortran order.
+ */
+template <typename T>
+void save_to_npy(T &tensor, const std::string &npy_filename, bool fortran_order)
+{
+ ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(&tensor, arm_compute::Format::F32);
+ ARM_COMPUTE_ERROR_ON(tensor.info()->num_dimensions() > 2);
+
+ std::ofstream fs;
+
+ try
+ {
+ fs.exceptions(std::ofstream::failbit | std::ofstream::badbit | std::ofstream::eofbit);
+ fs.open(npy_filename, std::ios::out | std::ios::binary);
+
+ const unsigned int width = tensor.info()->tensor_shape()[0];
+ const unsigned int height = tensor.info()->tensor_shape()[1];
+ unsigned long shape[2];
+
+ if(!fortran_order)
+ {
+ shape[0] = height, shape[1] = width;
+ }
+ else
+ {
+ shape[0] = width, shape[1] = height;
+ }
+
+ // Map buffer if creating a CLTensor
+ map(tensor, true);
+
+ switch(tensor.info()->format())
+ {
+ case arm_compute::Format::F32:
+ {
+ std::vector<float> tmp; /* Used only to get the typestring */
+ npy::Typestring typestring_o{ tmp };
+ std::string typestring = typestring_o.str();
+
+ std::ofstream stream(npy_filename, std::ofstream::binary);
+ npy::WriteHeader(stream, typestring, fortran_order, 2, shape);
+
+ arm_compute::Window window;
+ window.set(arm_compute::Window::DimX, arm_compute::Window::Dimension(0, width, 1));
+ window.set(arm_compute::Window::DimY, arm_compute::Window::Dimension(0, height, 1));
+
+ arm_compute::Iterator in(&tensor, window);
+
+ arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
+ {
+ stream.write(reinterpret_cast<const char *>(in.ptr()), sizeof(float));
+ },
+ in);
+
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Unsupported format");
+ }
+
+ // Unmap buffer if creating a CLTensor
+ unmap(tensor);
+ }
+ catch(const std::ofstream::failure &e)
+ {
+ ARM_COMPUTE_ERROR("Writing %s: (%s)", npy_filename.c_str(), e.what());
+ }
+}
+
/** Load the tensor with pre-trained data from a binary file
*
* @param[in] tensor The tensor to be filled. Data type supported: F32.
@@ -484,56 +779,51 @@ void load_trained_data(T &tensor, const std::string &filename)
}
}
-/** Obtain numpy type string from DataType.
- *
- * @param[in] data_type Data type.
- *
- * @return numpy type string.
- */
-inline std::string get_typestring(DataType data_type)
+template <typename T>
+void fill_random_tensor(T &tensor, float lower_bound, float upper_bound)
{
- // Check endianness
- const unsigned int i = 1;
- const char *c = reinterpret_cast<const char *>(&i);
- std::string endianness;
- if(*c == 1)
- {
- endianness = std::string("<");
- }
- else
- {
- endianness = std::string(">");
- }
- const std::string no_endianness("|");
+ std::random_device rd;
+ std::mt19937 gen(rd());
- switch(data_type)
+ TensorShape shape(tensor.info()->dimension(0), tensor.info()->dimension(1));
+
+ Window window;
+ window.set(Window::DimX, Window::Dimension(0, shape.x(), 1));
+ window.set(Window::DimY, Window::Dimension(0, shape.y(), 1));
+
+ map(tensor, true);
+
+ Iterator it(&tensor, window);
+
+ switch(tensor.info()->format())
{
- case DataType::U8:
- return no_endianness + "u" + support::cpp11::to_string(sizeof(uint8_t));
- case DataType::S8:
- return no_endianness + "i" + support::cpp11::to_string(sizeof(int8_t));
- case DataType::U16:
- return endianness + "u" + support::cpp11::to_string(sizeof(uint16_t));
- case DataType::S16:
- return endianness + "i" + support::cpp11::to_string(sizeof(int16_t));
- case DataType::U32:
- return endianness + "u" + support::cpp11::to_string(sizeof(uint32_t));
- case DataType::S32:
- return endianness + "i" + support::cpp11::to_string(sizeof(int32_t));
- case DataType::U64:
- return endianness + "u" + support::cpp11::to_string(sizeof(uint64_t));
- case DataType::S64:
- return endianness + "i" + support::cpp11::to_string(sizeof(int64_t));
- case DataType::F32:
- return endianness + "f" + support::cpp11::to_string(sizeof(float));
- case DataType::F64:
- return endianness + "f" + support::cpp11::to_string(sizeof(double));
- case DataType::SIZET:
- return endianness + "u" + support::cpp11::to_string(sizeof(size_t));
+ case arm_compute::Format::F32:
+ {
+ std::uniform_real_distribution<float> dist(lower_bound, upper_bound);
+
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ *reinterpret_cast<float *>(it.ptr()) = dist(gen);
+ },
+ it);
+
+ break;
+ }
default:
- ARM_COMPUTE_ERROR("NOT SUPPORTED!");
+ {
+ ARM_COMPUTE_ERROR("Unsupported format");
+ }
}
+
+ unmap(tensor);
+}
+
+template <typename T>
+void init_sgemm_output(T &dst, T &src0, T &src1, arm_compute::Format format)
+{
+ dst.allocator()->init(TensorInfo(src1.info()->dimension(0), src0.info()->dimension(1), format));
}
+
} // namespace utils
} // namespace arm_compute
#endif /* __UTILS_UTILS_H__*/