aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/core/Utils.h12
-rw-r--r--src/core/CL/kernels/CLActivationLayerKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLConvolutionKernel.cpp20
-rw-r--r--src/core/CL/kernels/CLFillBorderKernel.cpp8
-rw-r--r--src/core/CL/kernels/CLGEMMInterleave4x4Kernel.cpp2
-rw-r--r--src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp3
-rw-r--r--src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLMinMaxLocationKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLPoolingLayerKernel.cpp2
-rw-r--r--src/core/CL/kernels/CLSoftmaxLayerKernel.cpp6
-rw-r--r--tests/validation/CL/FixedPoint/FixedPoint_QS8.cpp2
11 files changed, 27 insertions, 38 deletions
diff --git a/arm_compute/core/Utils.h b/arm_compute/core/Utils.h
index c2f0e3982a..4ecd464cdb 100644
--- a/arm_compute/core/Utils.h
+++ b/arm_compute/core/Utils.h
@@ -78,18 +78,6 @@ std::string build_information();
*/
std::string read_file(const std::string &filename, bool binary);
-/** Return a value as a string
- *
- * @param[in] val Input value.
- *
- * @return Value represented as a string
- */
-template <typename T>
-const std::string val_to_string(T val)
-{
- return static_cast<const std::ostringstream &>(std::ostringstream() << val).str();
-}
-
/** The size in bytes of the data type
*
* @param[in] data_type Input data type
diff --git a/src/core/CL/kernels/CLActivationLayerKernel.cpp b/src/core/CL/kernels/CLActivationLayerKernel.cpp
index 6439426e83..5e8204fa1b 100644
--- a/src/core/CL/kernels/CLActivationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLActivationLayerKernel.cpp
@@ -59,8 +59,8 @@ void CLActivationLayerKernel::configure(ICLTensor *input, ICLTensor *output, Act
build_opts.insert(("-D" + string_from_activation_func(act_info.activation())));
build_opts.insert(("-D" + ((is_data_type_float(input->info()->data_type())) ? std::string("TYPE_FP") : std::string("TYPE_INT"))));
build_opts.insert(("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())));
- build_opts.insert(("-DA=" + val_to_string(act_info.a())));
- build_opts.insert(("-DB=" + val_to_string(act_info.b())));
+ build_opts.insert(("-DA=" + support::cpp11::to_string(act_info.a())));
+ build_opts.insert(("-DB=" + support::cpp11::to_string(act_info.b())));
build_opts.insert(output == nullptr ? "-DIN_PLACE" : "");
// Create kernel
diff --git a/src/core/CL/kernels/CLConvolutionKernel.cpp b/src/core/CL/kernels/CLConvolutionKernel.cpp
index bdfe398a1d..fd64dc4fe0 100644
--- a/src/core/CL/kernels/CLConvolutionKernel.cpp
+++ b/src/core/CL/kernels/CLConvolutionKernel.cpp
@@ -79,7 +79,7 @@ void CLConvolutionKernel<matrix_size>::configure(const ICLTensor *input, ICLTens
options.insert(mat_str.str());
}
- options.insert("-DSCALE=" + val_to_string(scale));
+ options.insert("-DSCALE=" + support::cpp11::to_string(scale));
DataType data_type = data_type_for_convolution_matrix(conv, matrix_size * matrix_size);
options.insert("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
@@ -143,7 +143,7 @@ void CLSeparableConvolutionHorKernel<matrix_size>::configure(const ICLTensor *in
for(unsigned int j = 0; j < matrix_size * matrix_size; j++)
{
- build_opts.insert("-DMAT" + val_to_string(j) + "=" + val_to_string(mat[j]));
+ build_opts.insert("-DMAT" + support::cpp11::to_string(j) + "=" + support::cpp11::to_string(mat[j]));
}
build_opts.insert("-DSCALE=0");
@@ -151,7 +151,7 @@ void CLSeparableConvolutionHorKernel<matrix_size>::configure(const ICLTensor *in
build_opts.insert("-DDATA_TYPE=" + get_cl_type_from_data_type(output->info()->data_type()));
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("convolution_separable1x" + val_to_string(matrix_size) + "_static", build_opts));
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("convolution_separable1x" + support::cpp11::to_string(matrix_size) + "_static", build_opts));
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 8;
@@ -195,10 +195,10 @@ void CLSeparableConvolutionVertKernel<matrix_size>::configure(const ICLTensor *i
for(unsigned int j = 0; j < matrix_size * matrix_size; j++)
{
- build_opts.insert("-DMAT" + val_to_string(j) + "=" + val_to_string(mat[j]));
+ build_opts.insert("-DMAT" + support::cpp11::to_string(j) + "=" + support::cpp11::to_string(mat[j]));
}
- build_opts.insert("-DSCALE=" + val_to_string(scale));
+ build_opts.insert("-DSCALE=" + support::cpp11::to_string(scale));
build_opts.insert("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
@@ -209,7 +209,7 @@ void CLSeparableConvolutionVertKernel<matrix_size>::configure(const ICLTensor *i
build_opts.insert(out_type.str());
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("convolution_separable" + val_to_string(matrix_size) + "x1_static", build_opts));
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("convolution_separable" + support::cpp11::to_string(matrix_size) + "x1_static", build_opts));
// Configure kernel window
constexpr unsigned int num_elems_processed_per_iteration = 8;
@@ -270,16 +270,16 @@ void CLConvolutionRectangleKernel::configure(const ICLTensor *input, ICLTensor *
for(unsigned int j = 0; j < MAX_MATRIX_SIZE; j++)
{
- options.insert("-DMAT" + val_to_string(j) + "=" + val_to_string(mat[j]));
+ options.insert("-DMAT" + support::cpp11::to_string(j) + "=" + support::cpp11::to_string(mat[j]));
}
- options.insert("-DSCALE=" + val_to_string(scale));
+ options.insert("-DSCALE=" + support::cpp11::to_string(scale));
DataType data_type = data_type_for_convolution_matrix(conv, matrix_size);
options.insert("-DDATA_TYPE=" + get_cl_type_from_data_type(data_type));
- options.insert("-DMATRIX_WIDTH=" + val_to_string(width));
- options.insert("-DMATRIX_HEIGHT=" + val_to_string(height));
+ options.insert("-DMATRIX_WIDTH=" + support::cpp11::to_string(width));
+ options.insert("-DMATRIX_HEIGHT=" + support::cpp11::to_string(height));
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("convolution_rectangle", options));
diff --git a/src/core/CL/kernels/CLFillBorderKernel.cpp b/src/core/CL/kernels/CLFillBorderKernel.cpp
index 981aad665a..7683ff9a49 100644
--- a/src/core/CL/kernels/CLFillBorderKernel.cpp
+++ b/src/core/CL/kernels/CLFillBorderKernel.cpp
@@ -86,10 +86,10 @@ void CLFillBorderKernel::configure(ICLTensor *tensor, BorderSize border_size, Bo
std::set<std::string> build_opts;
build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(dt)));
build_opts.emplace(("-DSELECT_TYPE=" + select_type));
- build_opts.emplace(("-DBORDER_SIZE_TOP=" + val_to_string(border_size.top)));
- build_opts.emplace(("-DBORDER_SIZE_BOTTOM=" + val_to_string(border_size.bottom)));
- build_opts.emplace(("-DBORDER_SIZE_LEFT=" + val_to_string(border_size.left)));
- build_opts.emplace(("-DBORDER_SIZE_RIGHT=" + val_to_string(border_size.right)));
+ build_opts.emplace(("-DBORDER_SIZE_TOP=" + support::cpp11::to_string(border_size.top)));
+ build_opts.emplace(("-DBORDER_SIZE_BOTTOM=" + support::cpp11::to_string(border_size.bottom)));
+ build_opts.emplace(("-DBORDER_SIZE_LEFT=" + support::cpp11::to_string(border_size.left)));
+ build_opts.emplace(("-DBORDER_SIZE_RIGHT=" + support::cpp11::to_string(border_size.right)));
// Create kernel
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
diff --git a/src/core/CL/kernels/CLGEMMInterleave4x4Kernel.cpp b/src/core/CL/kernels/CLGEMMInterleave4x4Kernel.cpp
index 7312cc25cb..3850c4d2cd 100644
--- a/src/core/CL/kernels/CLGEMMInterleave4x4Kernel.cpp
+++ b/src/core/CL/kernels/CLGEMMInterleave4x4Kernel.cpp
@@ -62,7 +62,7 @@ void CLGEMMInterleave4x4Kernel::configure(const ICLTensor *input, ICLTensor *out
// Create kernel
std::string data_type_name;
- data_type_name = val_to_string(input->info()->element_size() * 8) + "bit";
+ data_type_name = support::cpp11::to_string(input->info()->element_size() * 8) + "bit";
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemm_interleave4x4_" + data_type_name));
// Configure kernel window
diff --git a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp
index c6e05b92a2..ce68c1f9cd 100644
--- a/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMLowpMatrixMultiplyKernel.cpp
@@ -33,6 +33,7 @@
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/Window.h"
+#include "support/ToolchainSupport.h"
#include <cstddef>
#include <cstdint>
@@ -63,7 +64,7 @@ void CLGEMMLowpMatrixMultiplyKernel::configure(const ICLTensor *input0, const IC
_output = output;
// Create kernel and set static arguments
- std::set<std::string> build_opts = { ("-DWIDTH_MATRIX_B=" + val_to_string(input1->info()->dimension(0))) };
+ std::set<std::string> build_opts = { ("-DWIDTH_MATRIX_B=" + support::cpp11::to_string(input1->info()->dimension(0))) };
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gemm_mm_u8", build_opts));
unsigned int idx = 3 * num_arguments_per_2D_tensor(); //Skip the input and output parameters
_kernel.setArg<int32_t>(idx++, a_offset);
diff --git a/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp b/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp
index 0ef02f8a46..ecef7e1774 100644
--- a/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp
+++ b/src/core/CL/kernels/CLGEMMTranspose1xWKernel.cpp
@@ -73,7 +73,7 @@ void CLGEMMTranspose1xWKernel::configure(const ICLTensor *input, ICLTensor *outp
* The output matrix will have the following shape: [ height * W, ceil(width / W) ], where W = (16 / element size of the tensor)
*/
// Create kernel
- std::string kernel_name = "gemm_transpose1x" + val_to_string(num_elems_processed_per_iteration);
+ std::string kernel_name = "gemm_transpose1x" + support::cpp11::to_string(num_elems_processed_per_iteration);
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name));
// Configure window
diff --git a/src/core/CL/kernels/CLMinMaxLocationKernel.cpp b/src/core/CL/kernels/CLMinMaxLocationKernel.cpp
index 939a53b03a..8a493209ca 100644
--- a/src/core/CL/kernels/CLMinMaxLocationKernel.cpp
+++ b/src/core/CL/kernels/CLMinMaxLocationKernel.cpp
@@ -66,8 +66,8 @@ void CLMinMaxKernel::configure(const ICLImage *input, cl::Buffer *min_max)
// Set kernel build options
std::set<std::string> build_opts;
build_opts.emplace("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
- build_opts.emplace("-DDATA_TYPE_MAX=" + val_to_string<int>(_data_type_max_min[0]));
- build_opts.emplace("-DDATA_TYPE_MIN=" + val_to_string<int>(_data_type_max_min[1]));
+ build_opts.emplace("-DDATA_TYPE_MAX=" + support::cpp11::to_string(_data_type_max_min[0]));
+ build_opts.emplace("-DDATA_TYPE_MIN=" + support::cpp11::to_string(_data_type_max_min[1]));
build_opts.emplace((0 != (num_elems_processed_per_iteration % max_cl_vector_width)) ? "-DNON_MULTIPLE_OF_16" : "");
// Create kernel
diff --git a/src/core/CL/kernels/CLPoolingLayerKernel.cpp b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
index 08f0d4a4e5..3777e3bb49 100644
--- a/src/core/CL/kernels/CLPoolingLayerKernel.cpp
+++ b/src/core/CL/kernels/CLPoolingLayerKernel.cpp
@@ -112,7 +112,7 @@ void CLPoolingLayerKernel::configure(const ICLTensor *input, ICLTensor *output,
build_opts.emplace(("-DPOOL_" + ((PoolingType::MAX == pool_type) ? std::string("MAX") : std::string("AVG"))));
// Create kernel
- std::string kernel_name = "pooling_layer_" + val_to_string(pool_size);
+ std::string kernel_name = "pooling_layer_" + support::cpp11::to_string(pool_size);
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
// Set static kernel arguments
diff --git a/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp b/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp
index c488f90b91..3608a174ef 100644
--- a/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp
+++ b/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp
@@ -56,7 +56,7 @@ void CLLogits1DMaxKernel::configure(const ICLTensor *input, ICLTensor *output)
build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())));
if(is_data_type_fixed_point(input->info()->data_type()))
{
- build_opts.emplace(("-DFIXED_POINT_POSITION=" + val_to_string(input->info()->fixed_point_position())));
+ build_opts.emplace(("-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input->info()->fixed_point_position())));
}
// Tell the kernel that the width is not a multiple of 16
@@ -111,7 +111,7 @@ void CLLogits1DShiftExpSumKernel::configure(const ICLTensor *input, const ICLTen
build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())));
if(is_data_type_fixed_point(input->info()->data_type()))
{
- build_opts.emplace(("-DFIXED_POINT_POSITION=" + val_to_string(input->info()->fixed_point_position())));
+ build_opts.emplace(("-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input->info()->fixed_point_position())));
}
// Tell the kernel that the width is not a multiple of 16
@@ -184,7 +184,7 @@ void CLLogits1DNormKernel::configure(const ICLTensor *input, const ICLTensor *su
build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())));
if(is_data_type_fixed_point(input->info()->data_type()))
{
- build_opts.emplace(("-DFIXED_POINT_POSITION=" + val_to_string(input->info()->fixed_point_position())));
+ build_opts.emplace(("-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input->info()->fixed_point_position())));
}
// Create kernel
diff --git a/tests/validation/CL/FixedPoint/FixedPoint_QS8.cpp b/tests/validation/CL/FixedPoint/FixedPoint_QS8.cpp
index fabd9ad161..bc35de0f22 100644
--- a/tests/validation/CL/FixedPoint/FixedPoint_QS8.cpp
+++ b/tests/validation/CL/FixedPoint/FixedPoint_QS8.cpp
@@ -104,7 +104,7 @@ CLTensor compute_fixed_point_op(const TensorShape &shape, int fixed_point_positi
BOOST_TEST(!dst.info()->is_resizable());
// Set build options
- std::string build_opts = "-DFIXED_POINT_POS=" + val_to_string<int>(fixed_point_position);
+ std::string build_opts = "-DFIXED_POINT_POS=" + support::cpp11::to_string(fixed_point_position);
build_opts += " -DDATA_TYPE=qs8";
// Fill tensors.