aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGian Marco <gianmarco.iodice@arm.com>2017-10-19 14:13:38 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commit44ec2e7c1edd57eb2fb2674a28452aac02bd8c0a (patch)
tree648311be8809fd64a027d3e26485896db915dbb8
parentfa4cacdff825a38eac31ef7ecd3ad6b30da53eaa (diff)
downloadComputeLibrary-44ec2e7c1edd57eb2fb2674a28452aac02bd8c0a.tar.gz
COMPMID-639 - Add PPMAccessor and TopNPredictionsAccessor in graph_utils
Change-Id: I40c3e2dfcde10c65ed9c86f9283a53f9e679d4fa Reviewed-on: http://mpd-gerrit.cambridge.arm.com/92437 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
-rw-r--r--arm_compute/graph/Tensor.h10
-rw-r--r--examples/graph_alexnet.cpp81
-rw-r--r--utils/GraphUtils.cpp111
-rw-r--r--utils/GraphUtils.h56
-rw-r--r--utils/Utils.h82
5 files changed, 317 insertions, 23 deletions
diff --git a/arm_compute/graph/Tensor.h b/arm_compute/graph/Tensor.h
index dcb0c661d6..94822c2c1e 100644
--- a/arm_compute/graph/Tensor.h
+++ b/arm_compute/graph/Tensor.h
@@ -68,6 +68,16 @@ public:
* @param[in] accessor Tensor accessor
*/
template <typename AccessorType>
+ Tensor(TensorInfo &&info, std::unique_ptr<AccessorType> &&accessor)
+ : _target(TargetHint::DONT_CARE), _info(info), _accessor(std::move(accessor)), _tensor(nullptr)
+ {
+ }
+ /** Constructor
+ *
+ * @param[in] info Tensor info to use
+ * @param[in] accessor Tensor accessor
+ */
+ template <typename AccessorType>
Tensor(TensorInfo &&info, AccessorType &&accessor)
: _target(TargetHint::DONT_CARE), _info(info), _accessor(arm_compute::support::cpp14::make_unique<AccessorType>(std::forward<AccessorType>(accessor))), _tensor(nullptr)
{
diff --git a/examples/graph_alexnet.cpp b/examples/graph_alexnet.cpp
index 9a747b6b0c..be852b94ff 100644
--- a/examples/graph_alexnet.cpp
+++ b/examples/graph_alexnet.cpp
@@ -64,35 +64,91 @@ std::unique_ptr<ITensorAccessor> get_accessor(const std::string &path, const std
}
}
+/** Generates appropriate input accessor according to the specified ppm_path
+ *
+ * @note If ppm_path is empty will generate a DummyAccessor else will generate a PPMAccessor
+ *
+ * @param[in] ppm_path Path to PPM file
+ * @param[in] mean_r Red mean value to be subtracted from red channel
+ * @param[in] mean_g Green mean value to be subtracted from green channel
+ * @param[in] mean_b Blue mean value to be subtracted from blue channel
+ *
+ * @return An appropriate tensor accessor
+ */
+std::unique_ptr<ITensorAccessor> get_input_accessor(const std::string &ppm_path, float mean_r, float mean_g, float mean_b)
+{
+ if(ppm_path.empty())
+ {
+ return arm_compute::support::cpp14::make_unique<DummyAccessor>();
+ }
+ else
+ {
+ return arm_compute::support::cpp14::make_unique<PPMAccessor>(ppm_path, true, mean_r, mean_g, mean_b);
+ }
+}
+
+/** Generates appropriate output accessor according to the specified labels_path
+ *
+ * @note If labels_path is empty will generate a DummyAccessor else will generate a TopNPredictionsAccessor
+ *
+ * @param[in] labels_path Path to labels text file
+ * @param[in] top_n (Optional) Number of output classes to print
+ * @param[out] output_stream (Optional) Output stream
+ *
+ * @return An appropriate tensor accessor
+ */
+std::unique_ptr<ITensorAccessor> get_output_accessor(const std::string &labels_path, size_t top_n = 5, std::ostream &output_stream = std::cout)
+{
+ if(labels_path.empty())
+ {
+ return arm_compute::support::cpp14::make_unique<DummyAccessor>();
+ }
+ else
+ {
+ return arm_compute::support::cpp14::make_unique<TopNPredictionsAccessor>(labels_path, top_n, output_stream);
+ }
+}
+
/** Example demonstrating how to implement AlexNet's network using the Compute Library's graph API
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Path to the weights folder, [optional] batches )
+ * @param[in] argv Arguments ( [optional] Path to the weights folder, [optional] image, [optional] labels )
*/
void main_graph_alexnet(int argc, const char **argv)
{
- std::string data_path; /** Path to the trainable data */
- unsigned int batches = 4; /** Number of batches */
+ std::string data_path; /* Path to the trainable data */
+ std::string image; /* Image data */
+ std::string label; /* Label data */
+
+ constexpr float mean_r = 122.68f; /* Mean value to subtract from red channel */
+ constexpr float mean_g = 116.67f; /* Mean value to subtract from green channel */
+ constexpr float mean_b = 104.01f; /* Mean value to subtract from blue channel */
// Parse arguments
if(argc < 2)
{
// Print help
- std::cout << "Usage: " << argv[0] << " [path_to_data] [batches]\n\n";
+ std::cout << "Usage: " << argv[0] << " [path_to_data] [image] [labels]\n\n";
std::cout << "No data folder provided: using random values\n\n";
}
else if(argc == 2)
{
- //Do something with argv[1]
data_path = argv[1];
- std::cout << "Usage: " << argv[0] << " [path_to_data] [batches]\n\n";
- std::cout << "No number of batches where specified, thus will use the default : " << batches << "\n\n";
+ std::cout << "Usage: " << argv[0] << " " << argv[1] << " [image] [labels]\n\n";
+ std::cout << "No image provided: using random values\n\n";
+ }
+ else if(argc == 3)
+ {
+ data_path = argv[1];
+ image = argv[2];
+ std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [labels]\n\n";
+ std::cout << "No text file with labels provided: skipping output accessor\n\n";
}
else
{
- //Do something with argv[1] and argv[2]
data_path = argv[1];
- batches = std::strtol(argv[2], nullptr, 0);
+ image = argv[2];
+ label = argv[3];
}
// Check if OpenCL is available and initialize the scheduler
@@ -107,7 +163,8 @@ void main_graph_alexnet(int argc, const char **argv)
LoggerRegistry::get().create_reserved_loggers(LogLevel::INFO, { std::make_shared<StdPrinter>() });
graph << hint
- << Tensor(TensorInfo(TensorShape(227U, 227U, 3U, batches), 1, DataType::F32), DummyAccessor())
+ << Tensor(TensorInfo(TensorShape(227U, 227U, 3U, 1U), 1, DataType::F32),
+ get_input_accessor(image, mean_r, mean_g, mean_b))
// Layer 1
<< ConvolutionLayer(
11U, 11U, 96U,
@@ -168,7 +225,7 @@ void main_graph_alexnet(int argc, const char **argv)
get_accessor(data_path, "/cnn_data/alexnet_model/fc8_b.npy"))
// Softmax
<< SoftmaxLayer()
- << Tensor(DummyAccessor());
+ << Tensor(get_output_accessor(label, 5));
// Run graph
graph.run();
@@ -177,7 +234,7 @@ void main_graph_alexnet(int argc, const char **argv)
/** Main program for AlexNet
*
* @param[in] argc Number of arguments
- * @param[in] argv Arguments ( [optional] Path to the weights folder, [optional] batches )
+ * @param[in] argv Arguments ( [optional] Path to the weights folder, [optional] image, [optional] labels )
*/
int main(int argc, const char **argv)
{
diff --git a/utils/GraphUtils.cpp b/utils/GraphUtils.cpp
index bdd831075d..bcfc0f7994 100644
--- a/utils/GraphUtils.cpp
+++ b/utils/GraphUtils.cpp
@@ -34,8 +34,10 @@
#include "arm_compute/core/PixelValue.h"
#include "libnpy/npy.hpp"
+#include <algorithm>
+#include <iomanip>
+#include <ostream>
#include <random>
-#include <sstream>
using namespace arm_compute::graph_utils;
@@ -48,16 +50,8 @@ bool PPMWriter::access_tensor(ITensor &tensor)
{
std::stringstream ss;
ss << _name << _iterator << ".ppm";
- if(dynamic_cast<Tensor *>(&tensor) != nullptr)
- {
- arm_compute::utils::save_to_ppm(dynamic_cast<Tensor &>(tensor), ss.str());
- }
-#ifdef ARM_COMPUTE_CL
- else if(dynamic_cast<CLTensor *>(&tensor) != nullptr)
- {
- arm_compute::utils::save_to_ppm(dynamic_cast<CLTensor &>(tensor), ss.str());
- }
-#endif /* ARM_COMPUTE_CL */
+
+ arm_compute::utils::save_to_ppm(tensor, ss.str());
_iterator++;
if(_maximum == 0)
@@ -87,6 +81,101 @@ bool DummyAccessor::access_tensor(ITensor &tensor)
return ret;
}
+PPMAccessor::PPMAccessor(const std::string &ppm_path, bool bgr, float mean_r, float mean_g, float mean_b)
+ : _ppm_path(ppm_path), _bgr(bgr), _mean_r(mean_r), _mean_g(mean_g), _mean_b(mean_b)
+{
+}
+
+bool PPMAccessor::access_tensor(ITensor &tensor)
+{
+ utils::PPMLoader ppm;
+ const float mean[3] =
+ {
+ _bgr ? _mean_b : _mean_r,
+ _mean_g,
+ _bgr ? _mean_r : _mean_b
+ };
+
+ // Open PPM file
+ ppm.open(_ppm_path);
+
+ // Fill the tensor with the PPM content (BGR)
+ ppm.fill_planar_tensor(tensor, _bgr);
+
+ // Subtract the mean value from each channel
+ Window window;
+ window.use_tensor_dimensions(tensor.info()->tensor_shape());
+
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ const float value = *reinterpret_cast<float *>(tensor.ptr_to_element(id)) - mean[id.z()];
+ *reinterpret_cast<float *>(tensor.ptr_to_element(id)) = value;
+ });
+
+ return true;
+}
+
+TopNPredictionsAccessor::TopNPredictionsAccessor(const std::string &labels_path, size_t top_n, std::ostream &output_stream)
+ : _labels(), _output_stream(output_stream), _top_n(top_n)
+{
+ _labels.clear();
+
+ std::ifstream ifs;
+
+ try
+ {
+ ifs.exceptions(std::ifstream::badbit);
+ ifs.open(labels_path, std::ios::in | std::ios::binary);
+
+ for(std::string line; !std::getline(ifs, line).fail();)
+ {
+ _labels.emplace_back(line);
+ }
+ }
+ catch(const std::ifstream::failure &e)
+ {
+ ARM_COMPUTE_ERROR("Accessing %s: %s", labels_path.c_str(), e.what());
+ }
+}
+
+bool TopNPredictionsAccessor::access_tensor(ITensor &tensor)
+{
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::F32);
+ ARM_COMPUTE_ERROR_ON(_labels.size() != tensor.info()->dimension(0));
+
+ // Get the predicted class
+ std::vector<float> classes_prob;
+ std::vector<size_t> index;
+
+ const auto output_net = reinterpret_cast<float *>(tensor.buffer() + tensor.info()->offset_first_element_in_bytes());
+ const size_t num_classes = tensor.info()->dimension(0);
+
+ classes_prob.resize(num_classes);
+ index.resize(num_classes);
+
+ std::copy(output_net, output_net + num_classes, classes_prob.begin());
+
+ // Sort results
+ std::iota(std::begin(index), std::end(index), static_cast<size_t>(0));
+ std::sort(std::begin(index), std::end(index),
+ [&](size_t a, size_t b)
+ {
+ return classes_prob[a] > classes_prob[b];
+ });
+
+ _output_stream << "---------- Top " << _top_n << " predictions ----------" << std::endl
+ << std::endl;
+ for(size_t i = 0; i < _top_n; ++i)
+ {
+ _output_stream << std::fixed << std::setprecision(4)
+ << classes_prob[index.at(i)]
+ << " - [id = " << index.at(i) << "]"
+ << ", " << _labels[index.at(i)] << std::endl;
+ }
+
+ return false;
+}
+
RandomAccessor::RandomAccessor(PixelValue lower, PixelValue upper, std::random_device::result_type seed)
: _lower(lower), _upper(upper), _seed(seed)
{
diff --git a/utils/GraphUtils.h b/utils/GraphUtils.h
index 5c370e5eba..39b3f115bd 100644
--- a/utils/GraphUtils.h
+++ b/utils/GraphUtils.h
@@ -29,6 +29,8 @@
#include "arm_compute/graph/Types.h"
#include <random>
+#include <string>
+#include <vector>
namespace arm_compute
{
@@ -76,6 +78,60 @@ private:
unsigned int _maximum;
};
+/** PPM accessor class */
+class PPMAccessor final : public graph::ITensorAccessor
+{
+public:
+ /** Constructor
+ *
+ * @param[in] ppm_path Path to PPM file
+ * @param[in] bgr (Optional) Fill the first plane with blue channel (default = false)
+ * @param[in] mean_r (Optional) Red mean value to be subtracted from red channel
+ * @param[in] mean_g (Optional) Green mean value to be subtracted from green channel
+ * @param[in] mean_b (Optional) Blue mean value to be subtracted from blue channel
+ */
+ PPMAccessor(const std::string &ppm_path, bool bgr = true, float mean_r = 0.0f, float mean_g = 0.0f, float mean_b = 0.0f);
+ /** Allow instances of this class to be move constructed */
+ PPMAccessor(PPMAccessor &&) = default;
+
+ // Inherited methods overriden:
+ bool access_tensor(ITensor &tensor) override;
+
+private:
+ const std::string &_ppm_path;
+ const bool _bgr;
+ const float _mean_r;
+ const float _mean_g;
+ const float _mean_b;
+};
+
+/** Result accessor class */
+class TopNPredictionsAccessor final : public graph::ITensorAccessor
+{
+public:
+ /** Constructor
+ *
+ * @param[in] labels_path Path to labels text file.
+ * @param[in] top_n (Optional) Number of output classes to print
+ * @param[out] output_stream (Optional) Output stream
+ */
+ TopNPredictionsAccessor(const std::string &labels_path, size_t top_n = 5, std::ostream &output_stream = std::cout);
+ /** Allow instances of this class to be move constructed */
+ TopNPredictionsAccessor(TopNPredictionsAccessor &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ TopNPredictionsAccessor(const TopNPredictionsAccessor &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ TopNPredictionsAccessor &operator=(const TopNPredictionsAccessor &) = delete;
+
+ // Inherited methods overriden:
+ bool access_tensor(ITensor &tensor) override;
+
+private:
+ std::vector<std::string> _labels;
+ std::ostream &_output_stream;
+ size_t _top_n;
+};
+
/** Random accessor class */
class RandomAccessor final : public graph::ITensorAccessor
{
diff --git a/utils/Utils.h b/utils/Utils.h
index b0e1abeb5f..c88de0e16b 100644
--- a/utils/Utils.h
+++ b/utils/Utils.h
@@ -263,6 +263,88 @@ public:
}
}
+ /** Fill a tensor with 3 planes (one for each channel) with the content of the currently open PPM file.
+ *
+ * @note If the image is a CLImage, the function maps and unmaps the image
+ *
+ * @param[in,out] tensor Tensor with 3 planes to fill (Must be allocated, and of matching dimensions with the opened PPM). Data types supported: U8/F32
+ * @param[in] bgr (Optional) Fill the first plane with blue channel (default = false)
+ */
+ template <typename T>
+ void fill_planar_tensor(T &tensor, bool bgr = false)
+ {
+ ARM_COMPUTE_ERROR_ON(!is_open());
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::U8, DataType::F32);
+ ARM_COMPUTE_ERROR_ON(tensor.info()->dimension(0) != _width || tensor.info()->dimension(1) != _height || tensor.info()->dimension(2) != 3);
+
+ try
+ {
+ // Map buffer if creating a CLTensor
+ map(tensor, true);
+
+ // Check if the file is large enough to fill the image
+ const size_t current_position = _fs.tellg();
+ _fs.seekg(0, std::ios_base::end);
+ const size_t end_position = _fs.tellg();
+ _fs.seekg(current_position, std::ios_base::beg);
+
+ ARM_COMPUTE_ERROR_ON_MSG((end_position - current_position) < tensor.info()->tensor_shape().total_size(),
+ "Not enough data in file");
+ ARM_COMPUTE_UNUSED(end_position);
+
+ // Iterate through every pixel of the image
+ arm_compute::Window window;
+ window.set(arm_compute::Window::DimX, arm_compute::Window::Dimension(0, _width, 1));
+ window.set(arm_compute::Window::DimY, arm_compute::Window::Dimension(0, _height, 1));
+ window.set(arm_compute::Window::DimZ, arm_compute::Window::Dimension(0, 1, 1));
+
+ arm_compute::Iterator out(&tensor, window);
+
+ unsigned char red = 0;
+ unsigned char green = 0;
+ unsigned char blue = 0;
+
+ size_t stride_z = tensor.info()->strides_in_bytes()[2];
+
+ arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
+ {
+ red = _fs.get();
+ green = _fs.get();
+ blue = _fs.get();
+
+ switch(tensor.info()->data_type())
+ {
+ case arm_compute::DataType::U8:
+ {
+ *(out.ptr() + 0 * stride_z) = bgr ? blue : red;
+ *(out.ptr() + 1 * stride_z) = green;
+ *(out.ptr() + 2 * stride_z) = bgr ? red : blue;
+ break;
+ }
+ case arm_compute::DataType::F32:
+ {
+ *reinterpret_cast<float *>(out.ptr() + 0 * stride_z) = static_cast<float>(bgr ? blue : red);
+ *reinterpret_cast<float *>(out.ptr() + 1 * stride_z) = static_cast<float>(green);
+ *reinterpret_cast<float *>(out.ptr() + 2 * stride_z) = static_cast<float>(bgr ? red : blue);
+ break;
+ }
+ default:
+ {
+ ARM_COMPUTE_ERROR("Unsupported data type");
+ }
+ }
+ },
+ out);
+
+ // Unmap buffer if creating a CLTensor
+ unmap(tensor);
+ }
+ catch(const std::ifstream::failure &e)
+ {
+ ARM_COMPUTE_ERROR("Loading PPM file: %s", e.what());
+ }
+ }
+
private:
std::ifstream _fs;
unsigned int _width, _height;