aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2019-10-10 14:33:47 +0100
committerMichalis Spyrou <michalis.spyrou@arm.com>2019-10-16 12:04:25 +0000
commit7c60c990fbed62aab1369c0e4462c4081dc3cfeb (patch)
tree94329c7a6214b1385b15bc5225c198fd77cec5c9 /src
parenta07ce151674e28a3e755f1c48785b599f1d34827 (diff)
downloadComputeLibrary-7c60c990fbed62aab1369c0e4462c4081dc3cfeb.tar.gz
COMPMID-2486: Remove disabled compiler warnings
Removed the following flags: -Wno-format-nonliteral: This had a side effect on Error.h that resulted in rewriting most of the macros. Since I was at it I removed all the va_args in order to comply with DCL50-CPP. -Wno-deprecated-increment-bool -Wno-vla-extension -Wno-mismatched-tags -Wno-redundant-move Change-Id: I7c593854ecc3b7d595b8edcbd6a86d3c2563c6bd Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com> Reviewed-on: https://review.mlplatform.org/c/2069 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/core/CL/CLKernelLibrary.cpp8
-rw-r--r--src/core/CL/ICLKernel.cpp4
-rw-r--r--src/core/CL/kernels/CLColorConvertKernel.cpp24
-rw-r--r--src/core/Error.cpp24
-rw-r--r--src/core/GLES_COMPUTE/GCKernelLibrary.cpp18
-rw-r--r--src/core/GLES_COMPUTE/IGCKernel.cpp18
-rw-r--r--src/core/NEON/kernels/NEFillBorderKernel.cpp2
-rw-r--r--src/core/NEON/kernels/NEPoolingLayerKernel.cpp44
-rw-r--r--src/core/SubTensorInfo.cpp4
-rw-r--r--src/core/TensorInfo.cpp4
-rw-r--r--src/core/Utils.cpp2
-rw-r--r--src/core/Validate.cpp20
-rw-r--r--src/runtime/CL/CLTuner.cpp6
-rw-r--r--src/runtime/CL/functions/CLCannyEdge.cpp2
-rw-r--r--src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp8
-rw-r--r--src/runtime/CPP/functions/CPPDetectionPostProcessLayer.cpp6
-rw-r--r--src/runtime/GLES_COMPUTE/GCScheduler.cpp12
-rw-r--r--src/runtime/NEON/functions/NECannyEdge.cpp3
18 files changed, 102 insertions, 107 deletions
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index c27f886129..0cd6e49824 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -1074,7 +1074,7 @@ Kernel CLKernelLibrary::create_kernel(const std::string &kernel_name, const Stri
if(_kernel_program_map.end() == kernel_program_it)
{
- ARM_COMPUTE_ERROR("Kernel %s not found in the CLKernelLibrary", kernel_name.c_str());
+ ARM_COMPUTE_ERROR_VAR("Kernel %s not found in the CLKernelLibrary", kernel_name.c_str());
}
std::string concat_str;
@@ -1218,7 +1218,7 @@ const Program &CLKernelLibrary::load_program(const std::string &program_name) co
if(_program_source_map.end() == program_source_it)
{
- ARM_COMPUTE_ERROR("Embedded program for %s does not exist.", program_name.c_str());
+ ARM_COMPUTE_ERROR_VAR("Embedded program for %s does not exist.", program_name.c_str());
}
program = Program(_context, program_name, program_source_it->second);
@@ -1238,7 +1238,7 @@ const Program &CLKernelLibrary::load_program(const std::string &program_name) co
}
else
{
- ARM_COMPUTE_ERROR("Kernel file %s does not exist.", source_name.c_str());
+ ARM_COMPUTE_ERROR_VAR("Kernel file %s does not exist.", source_name.c_str());
}
#endif /* EMBEDDED_KERNELS */
@@ -1293,7 +1293,7 @@ std::string CLKernelLibrary::get_program_source(const std::string &program_name)
if(program_source_it == _program_source_map.end())
{
- ARM_COMPUTE_ERROR("Embedded program for %s does not exist.", program_name.c_str());
+ ARM_COMPUTE_ERROR_VAR("Embedded program for %s does not exist.", program_name.c_str());
}
return program_source_it->second;
diff --git a/src/core/CL/ICLKernel.cpp b/src/core/CL/ICLKernel.cpp
index d81ad46b29..ea9c62a4c3 100644
--- a/src/core/CL/ICLKernel.cpp
+++ b/src/core/CL/ICLKernel.cpp
@@ -112,8 +112,8 @@ void ICLKernel::add_tensor_argument(unsigned &idx, const ICLTensor *tensor, cons
_kernel.setArg<cl_uint>(idx++, offset_first_element);
- ARM_COMPUTE_ERROR_ON_MSG(idx_start + num_arguments_per_tensor<dimension_size>() != idx,
- "add_%dD_tensor_argument() is supposed to add exactly %d arguments to the kernel", dimension_size, num_arguments_per_tensor<dimension_size>());
+ ARM_COMPUTE_ERROR_ON_MSG_VAR(idx_start + num_arguments_per_tensor<dimension_size>() != idx,
+ "add_%dD_tensor_argument() is supposed to add exactly %d arguments to the kernel", dimension_size, num_arguments_per_tensor<dimension_size>());
ARM_COMPUTE_UNUSED(idx_start);
}
diff --git a/src/core/CL/kernels/CLColorConvertKernel.cpp b/src/core/CL/kernels/CLColorConvertKernel.cpp
index f3b93282e8..d8a8380fad 100644
--- a/src/core/CL/kernels/CLColorConvertKernel.cpp
+++ b/src/core/CL/kernels/CLColorConvertKernel.cpp
@@ -98,9 +98,9 @@ void CLColorConvertKernel::configure(const ICLTensor *input, ICLTensor *output)
default:
break;
}
- ARM_COMPUTE_ERROR_ON_MSG(num_elems_processed_per_iteration == 0, "Conversion from %s to %s not supported",
- string_from_format(input->info()->format()).c_str(),
- string_from_format(output->info()->format()).c_str());
+ ARM_COMPUTE_ERROR_ON_MSG_VAR(num_elems_processed_per_iteration == 0, "Conversion from %s to %s not supported",
+ string_from_format(input->info()->format()).c_str(),
+ string_from_format(output->info()->format()).c_str());
std::stringstream kernel_name;
@@ -164,9 +164,9 @@ void CLColorConvertKernel::configure(const ICLMultiImage *input, ICLImage *outpu
default:
break;
}
- ARM_COMPUTE_ERROR_ON_MSG(num_elems_processed_per_iteration == 0, "Conversion from %s to %s not supported",
- string_from_format(input->info()->format()).c_str(),
- string_from_format(output->info()->format()).c_str());
+ ARM_COMPUTE_ERROR_ON_MSG_VAR(num_elems_processed_per_iteration == 0, "Conversion from %s to %s not supported",
+ string_from_format(input->info()->format()).c_str(),
+ string_from_format(output->info()->format()).c_str());
std::stringstream kernel_name;
@@ -274,9 +274,9 @@ void CLColorConvertKernel::configure(const ICLImage *input, ICLMultiImage *outpu
break;
}
- ARM_COMPUTE_ERROR_ON_MSG(num_elems_processed_per_iteration == 0, "Conversion from %s to %s not supported",
- string_from_format(input->info()->format()).c_str(),
- string_from_format(output->info()->format()).c_str());
+ ARM_COMPUTE_ERROR_ON_MSG_VAR(num_elems_processed_per_iteration == 0, "Conversion from %s to %s not supported",
+ string_from_format(input->info()->format()).c_str(),
+ string_from_format(output->info()->format()).c_str());
std::stringstream kernel_name;
@@ -365,9 +365,9 @@ void CLColorConvertKernel::configure(const ICLMultiImage *input, ICLMultiImage *
default:
break;
}
- ARM_COMPUTE_ERROR_ON_MSG(num_elems_processed_per_iteration == 0, "Conversion from %s to %s not supported",
- string_from_format(input->info()->format()).c_str(),
- string_from_format(output->info()->format()).c_str());
+ ARM_COMPUTE_ERROR_ON_MSG_VAR(num_elems_processed_per_iteration == 0, "Conversion from %s to %s not supported",
+ string_from_format(input->info()->format()).c_str(),
+ string_from_format(output->info()->format()).c_str());
std::stringstream kernel_name;
diff --git a/src/core/Error.cpp b/src/core/Error.cpp
index 45cce66804..8d321c01fd 100644
--- a/src/core/Error.cpp
+++ b/src/core/Error.cpp
@@ -31,30 +31,20 @@
using namespace arm_compute;
-Status arm_compute::create_error_va_list(ErrorCode error_code, const char *function, const char *file, const int line, const char *msg, va_list args)
+Status arm_compute::create_error(ErrorCode error_code, std::string msg)
{
- std::array<char, 512> out{ 0 };
- int offset = snprintf(out.data(), out.size(), "in %s %s:%d: ", function, file, line);
- vsnprintf(out.data() + offset, out.size() - offset, msg, args);
-
- return Status(error_code, std::string(out.data()));
+ return Status(error_code, msg);
}
-Status arm_compute::create_error(ErrorCode error_code, const char *function, const char *file, const int line, const char *msg, ...)
+Status arm_compute::create_error_msg(ErrorCode error_code, const char *func, const char *file, int line, const char *msg)
{
- va_list args;
- va_start(args, msg);
- auto err = create_error_va_list(error_code, function, file, line, msg, args);
- va_end(args);
- return err;
+ std::array<char, 512> out{ 0 };
+ snprintf(out.data(), out.size(), "in %s %s:%d: %s", func, file, line, msg);
+ return Status(error_code, std::string(out.data()));
}
-void arm_compute::error(const char *function, const char *file, const int line, const char *msg, ...)
+void arm_compute::throw_error(Status err)
{
- va_list args;
- va_start(args, msg);
- auto err = create_error_va_list(ErrorCode::RUNTIME_ERROR, function, file, line, msg, args);
- va_end(args);
ARM_COMPUTE_THROW(std::runtime_error(err.error_description()));
}
void Status::internal_throw_on_error() const
diff --git a/src/core/GLES_COMPUTE/GCKernelLibrary.cpp b/src/core/GLES_COMPUTE/GCKernelLibrary.cpp
index 0af8c7d4cc..015e085355 100644
--- a/src/core/GLES_COMPUTE/GCKernelLibrary.cpp
+++ b/src/core/GLES_COMPUTE/GCKernelLibrary.cpp
@@ -66,7 +66,7 @@ GLuint GCProgram::link_program(GLuint shader)
std::vector<GLchar> log(length);
ARM_COMPUTE_GL_CHECK(glGetProgramInfoLog(program, length, nullptr, log.data()));
- ARM_COMPUTE_ERROR("Error: Linker log:\n%s\n", log.data());
+ ARM_COMPUTE_ERROR_VAR("Error: Linker log:\n%s\n", log.data());
return 0;
}
@@ -120,7 +120,7 @@ GLuint GCProgram::compile_shader(const std::string &build_options)
<< output_stream.rdbuf());
#endif /* ARM_COMPUTE_DEBUG_ENABLED */
- ARM_COMPUTE_ERROR("Error: Compiler log:\n%s\n", log.data());
+ ARM_COMPUTE_ERROR_VAR("Error: Compiler log:\n%s\n", log.data());
return 0;
}
@@ -152,9 +152,9 @@ GCKernel::GCKernel(std::string name, GLuint program)
ARM_COMPUTE_GL_CHECK(glGenBuffers(1, &_shader_params_ubo_name));
_shader_params_index = ARM_COMPUTE_GL_CHECK(glGetUniformBlockIndex(_program, _shader_params_name));
- ARM_COMPUTE_ERROR_ON_MSG(_shader_params_index == GL_INVALID_INDEX, "Failed to get index of %s", _shader_params_name);
+ ARM_COMPUTE_ERROR_ON_MSG_VAR(_shader_params_index == GL_INVALID_INDEX, "Failed to get index of %s", _shader_params_name);
ARM_COMPUTE_GL_CHECK(glGetActiveUniformBlockiv(_program, _shader_params_index, GL_UNIFORM_BLOCK_DATA_SIZE, &_shader_params_size));
- ARM_COMPUTE_ERROR_ON_MSG(_shader_params_size == 0, "Failed to get size of %s", _shader_params_name);
+ ARM_COMPUTE_ERROR_ON_MSG_VAR(_shader_params_size == 0, "Failed to get size of %s", _shader_params_name);
}
void GCKernel::cleanup()
@@ -177,8 +177,8 @@ void GCKernel::unuse()
void GCKernel::update_shader_params()
{
- ARM_COMPUTE_ERROR_ON_MSG((_shader_params_size != (int)(_shader_arguments.size() * sizeof(_shader_arguments[0]))), "Arguments size (%d) is not equal to shader params block size (%d)",
- _shader_arguments.size() * sizeof(_shader_arguments[0]), _shader_params_size);
+ ARM_COMPUTE_ERROR_ON_MSG_VAR((_shader_params_size != (int)(_shader_arguments.size() * sizeof(_shader_arguments[0]))), "Arguments size (%zu) is not equal to shader params block size (%d)",
+ _shader_arguments.size() * sizeof(_shader_arguments[0]), _shader_params_size);
ARM_COMPUTE_GL_CHECK(glUniformBlockBinding(_program, _shader_params_index, _shader_params_binding_point));
ARM_COMPUTE_GL_CHECK(glBindBufferBase(GL_UNIFORM_BUFFER, _shader_params_binding_point, _shader_params_ubo_name));
@@ -344,7 +344,7 @@ GCKernel GCKernelLibrary::create_kernel(const std::string &shader_name, const St
if(_shader_program_map.end() == shader_program_it)
{
- ARM_COMPUTE_ERROR("Shader %s not found in the GCKernelLibrary", shader_name.c_str());
+ ARM_COMPUTE_ERROR_VAR("Shader %s not found in the GCKernelLibrary", shader_name.c_str());
}
// Check if the program has been built before with same build options.
@@ -473,7 +473,7 @@ const GCProgram &GCKernelLibrary::load_program(const std::string &program_name)
if(_program_source_map.end() == program_source_it)
{
- ARM_COMPUTE_ERROR("Embedded program for %s does not exist.", program_name.c_str());
+ ARM_COMPUTE_ERROR_VAR("Embedded program for %s does not exist.", program_name.c_str());
}
program = GCProgram(program_name, program_source_it->second);
@@ -486,7 +486,7 @@ const GCProgram &GCKernelLibrary::load_program(const std::string &program_name)
}
else
{
- ARM_COMPUTE_ERROR("Shader file %s does not exist.", source_name.c_str());
+ ARM_COMPUTE_ERROR_VAR("Shader file %s does not exist.", source_name.c_str());
}
#endif /* EMBEDDED_KERNELS */
diff --git a/src/core/GLES_COMPUTE/IGCKernel.cpp b/src/core/GLES_COMPUTE/IGCKernel.cpp
index ecd63b54a4..4da35c7d5e 100644
--- a/src/core/GLES_COMPUTE/IGCKernel.cpp
+++ b/src/core/GLES_COMPUTE/IGCKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,12 +49,12 @@ void arm_compute::enqueue(IGCKernel &kernel, const Window &window, const gles::N
ARM_COMPUTE_ERROR_ON((0 == (window.x().end() - window.x().start())) || (0 == (window.y().end() - window.y().start())));
- ARM_COMPUTE_ERROR_ON_MSG((((window.x().end() - window.x().start()) % (window.x().step() * lws[0])) != 0),
- "window x end =%d, start=%d, step=%d, lws x=%d", window.x().end(), window.x().start(), window.x().step(), lws[0]);
- ARM_COMPUTE_ERROR_ON_MSG((((window.y().end() - window.y().start()) % (window.y().step() * lws[1])) != 0),
- "window y end =%d, start=%d, step=%d, lws y=%d", window.y().end(), window.y().start(), window.y().step(), lws[1]);
- ARM_COMPUTE_ERROR_ON_MSG((((window.z().end() - window.z().start()) % (window.z().step() * lws[2])) != 0),
- "window z end =%d, start=%d, step=%d, lws z=%d", window.z().end(), window.z().start(), window.z().step(), lws[2]);
+ ARM_COMPUTE_ERROR_ON_MSG_VAR((((window.x().end() - window.x().start()) % (window.x().step() * lws[0])) != 0),
+ "window x end =%d, start=%d, step=%d, lws x=%zu", window.x().end(), window.x().start(), window.x().step(), lws[0]);
+ ARM_COMPUTE_ERROR_ON_MSG_VAR((((window.y().end() - window.y().start()) % (window.y().step() * lws[1])) != 0),
+ "window y end =%d, start=%d, step=%d, lws y=%zu", window.y().end(), window.y().start(), window.y().step(), lws[1]);
+ ARM_COMPUTE_ERROR_ON_MSG_VAR((((window.z().end() - window.z().start()) % (window.z().step() * lws[2])) != 0),
+ "window z end =%d, start=%d, step=%d, lws z=%zu", window.z().end(), window.z().start(), window.z().step(), lws[2]);
ARM_COMPUTE_GL_CHECK(glDispatchCompute(((window.x().end() - window.x().start()) / window.x().step()) / lws[0],
((window.y().end() - window.y().start()) / window.y().step()) / lws[1],
@@ -114,8 +114,8 @@ void IGCKernel::add_tensor_argument(unsigned int &idx, const IGCTensor *tensor,
ARM_COMPUTE_GL_CHECK(glBindBufferBase(GL_SHADER_STORAGE_BUFFER, binding_point, tensor->gc_buffer()));
- ARM_COMPUTE_ERROR_ON_MSG(idx_start + num_arguments_per_tensor<dimension_size>() != idx,
- "add_%dD_tensor_argument() is supposed to add exactly %d arguments to the kernel", dimension_size, num_arguments_per_tensor<dimension_size>());
+ ARM_COMPUTE_ERROR_ON_MSG_VAR(idx_start + num_arguments_per_tensor<dimension_size>() != idx,
+ "add_%dD_tensor_argument() is supposed to add exactly %d arguments to the kernel", dimension_size, num_arguments_per_tensor<dimension_size>());
ARM_COMPUTE_UNUSED(idx_start);
}
diff --git a/src/core/NEON/kernels/NEFillBorderKernel.cpp b/src/core/NEON/kernels/NEFillBorderKernel.cpp
index 4127dc8fbd..13db1659ce 100644
--- a/src/core/NEON/kernels/NEFillBorderKernel.cpp
+++ b/src/core/NEON/kernels/NEFillBorderKernel.cpp
@@ -195,7 +195,7 @@ void NEFillBorderKernel::fill_replicate_single_channel(const Window &window)
for(int i = -_border_size.top; i < 0; ++i)
{
// Copy top rows including left/right borders
- std::memcpy(base_addr + i * _tensor->info()->strides_in_bytes()[1] - _border_size.left * element_size,
+ std::memcpy(base_addr + i * static_cast<int>(_tensor->info()->strides_in_bytes()[1]) - _border_size.left * element_size,
base_addr - _border_size.left * element_size, (_border_size.left + width + _border_size.right) * element_size);
}
diff --git a/src/core/NEON/kernels/NEPoolingLayerKernel.cpp b/src/core/NEON/kernels/NEPoolingLayerKernel.cpp
index 62c9ca0d5e..58fa2d6b41 100644
--- a/src/core/NEON/kernels/NEPoolingLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEPoolingLayerKernel.cpp
@@ -1196,8 +1196,8 @@ void NEPoolingLayerKernel::poolingMxN_f32_nchw(const Window &window_input, const
int x = 0;
for(; x <= (pool_size_x - 4); x += 4)
{
- const float32x4_t data = vld1q_f32(reinterpret_cast<const float *>(input.ptr() + (x - pool_pad_left) * _input->info()->strides_in_bytes().x() +
- (y - pool_pad_top) * _input->info()->strides_in_bytes().y()));
+ const float32x4_t data = vld1q_f32(reinterpret_cast<const float *>(input.ptr() + (x - pool_pad_left) * static_cast<int>(_input->info()->strides_in_bytes().x()) + (y - pool_pad_top) * static_cast<int>
+ (_input->info()->strides_in_bytes().y())));
// Get power of 2 in case of l2 pooling and accumulate
if(pooling_type == PoolingType::L2)
@@ -1213,7 +1213,8 @@ void NEPoolingLayerKernel::poolingMxN_f32_nchw(const Window &window_input, const
// Leftover for loop
for(; x < pool_size_x; ++x)
{
- float data = *(reinterpret_cast<const float *>(input.ptr() + (x - pool_pad_left) * _input->info()->strides_in_bytes().x() + (y - pool_pad_top) * _input->info()->strides_in_bytes().y()));
+ float data = *(reinterpret_cast<const float *>(input.ptr() + (x - pool_pad_left) * static_cast<int>(_input->info()->strides_in_bytes().x()) + (y - pool_pad_top) * static_cast<int>
+ (_input->info()->strides_in_bytes().y())));
// Get power of 2 in case of l2 pooling
if(pooling_type == PoolingType::L2)
@@ -1248,15 +1249,16 @@ void NEPoolingLayerKernel::poolingMxN_f32_nchw(const Window &window_input, const
int x = 0;
for(; x <= (pool_size_x - 4); x += 4)
{
- const float32x4_t data = vld1q_f32(reinterpret_cast<const float *>(input.ptr() + (x - pool_pad_left) * _input->info()->strides_in_bytes().x() +
- (y - pool_pad_top) * _input->info()->strides_in_bytes().y()));
+ const float32x4_t data = vld1q_f32(reinterpret_cast<const float *>(input.ptr() + (x - pool_pad_left) * static_cast<int>(_input->info()->strides_in_bytes().x()) + (y - pool_pad_top) * static_cast<int>
+ (_input->info()->strides_in_bytes().y())));
vres = vmaxq_f32(vres, data);
}
// Leftover for loop
for(; x < pool_size_x; ++x)
{
- const float data = *(reinterpret_cast<const float *>(input.ptr() + (x - pool_pad_left) * _input->info()->strides_in_bytes().x() + (y - pool_pad_top) * _input->info()->strides_in_bytes().y()));
+ const float data = *(reinterpret_cast<const float *>(input.ptr() + (x - pool_pad_left) * static_cast<int>(_input->info()->strides_in_bytes().x()) + (y - pool_pad_top) * static_cast<int>
+ (_input->info()->strides_in_bytes().y())));
res = std::max(res, data);
}
}
@@ -1540,8 +1542,8 @@ void NEPoolingLayerKernel::poolingMxN_f32_nhwc(const Window &window_input, const
{
for(int x = pool_start_x; x < pool_end_x; ++x)
{
- const float32x4_t data = vld1q_f32(reinterpret_cast<const float *>(input.ptr() + (x - pool_pad_left) * _input->info()->strides_in_bytes().y() +
- (y - pool_pad_top) * _input->info()->strides_in_bytes().z()));
+ const float32x4_t data = vld1q_f32(reinterpret_cast<const float *>(input.ptr() + (x - pool_pad_left) * static_cast<int>(_input->info()->strides_in_bytes().y()) + (y - pool_pad_top) * static_cast<int>
+ (_input->info()->strides_in_bytes().z())));
// Get power of 2 in case of l2 pooling and accumulate
if(pooling_type == PoolingType::L2)
@@ -1564,8 +1566,8 @@ void NEPoolingLayerKernel::poolingMxN_f32_nhwc(const Window &window_input, const
{
for(int x = pool_start_x; x < pool_end_x; ++x)
{
- const float32x4_t data = vld1q_f32(reinterpret_cast<const float *>(input.ptr() + (x - pool_pad_left) * _input->info()->strides_in_bytes().y() +
- (y - pool_pad_top) * _input->info()->strides_in_bytes().z()));
+ const float32x4_t data = vld1q_f32(reinterpret_cast<const float *>(input.ptr() + (x - pool_pad_left) * static_cast<int>(_input->info()->strides_in_bytes().y()) + (y - pool_pad_top) * static_cast<int>
+ (_input->info()->strides_in_bytes().z())));
vres = vmaxq_f32(vres, data);
}
}
@@ -1621,8 +1623,8 @@ void NEPoolingLayerKernel::poolingMxN_qasymm8_nchw(const Window &window_input, c
int x = 0;
for(; x <= (pool_size_x - 8); x += 8)
{
- const uint8x8_t data = vld1_u8(reinterpret_cast<const uint8_t *>(input.ptr() + (x - pool_pad_left) * _input->info()->strides_in_bytes().x() +
- (y - pool_pad_top) * _input->info()->strides_in_bytes().y()));
+ const uint8x8_t data = vld1_u8(reinterpret_cast<const uint8_t *>(input.ptr() + (x - pool_pad_left) * static_cast<int>(_input->info()->strides_in_bytes().x()) + (y - pool_pad_top) * static_cast<int>
+ (_input->info()->strides_in_bytes().y())));
const uint16x8_t data_u16 = vmovl_u8(data);
vres = vaddq_u32(vres, vaddl_u16(vget_high_u16(data_u16), vget_low_u16(data_u16)));
@@ -1631,7 +1633,8 @@ void NEPoolingLayerKernel::poolingMxN_qasymm8_nchw(const Window &window_input, c
// Leftover for loop
for(; x < pool_size_x; ++x)
{
- uint8_t data = *(reinterpret_cast<const uint8_t *>(input.ptr() + (x - pool_pad_left) * _input->info()->strides_in_bytes().x() + (y - pool_pad_top) * _input->info()->strides_in_bytes().y()));
+ uint8_t data = *(reinterpret_cast<const uint8_t *>(input.ptr() + (x - pool_pad_left) * static_cast<int>(_input->info()->strides_in_bytes().x()) + (y - pool_pad_top) * static_cast<int>
+ (_input->info()->strides_in_bytes().y())));
sres += data;
}
}
@@ -1653,15 +1656,16 @@ void NEPoolingLayerKernel::poolingMxN_qasymm8_nchw(const Window &window_input, c
int x = 0;
for(; x <= (pool_size_x - 8); x += 8)
{
- const uint8x8_t data = vld1_u8(reinterpret_cast<const uint8_t *>(input.ptr() + (x - pool_pad_left) * _input->info()->strides_in_bytes().x() +
- (y - pool_pad_top) * _input->info()->strides_in_bytes().y()));
+ const uint8x8_t data = vld1_u8(reinterpret_cast<const uint8_t *>(input.ptr() + (x - pool_pad_left) * static_cast<int>(_input->info()->strides_in_bytes().x()) + (y - pool_pad_top) * static_cast<int>
+ (_input->info()->strides_in_bytes().y())));
vres = vmax_u8(vres, data);
}
// Leftover for loop
for(; x < pool_size_x; ++x)
{
- const uint8_t data = *(reinterpret_cast<const uint8_t *>(input.ptr() + (x - pool_pad_left) * _input->info()->strides_in_bytes().x() + (y - pool_pad_top) * _input->info()->strides_in_bytes().y()));
+ const uint8_t data = *(reinterpret_cast<const uint8_t *>(input.ptr() + (x - pool_pad_left) * static_cast<int>(_input->info()->strides_in_bytes().x()) + (y - pool_pad_top) * static_cast<int>
+ (_input->info()->strides_in_bytes().y())));
res = std::max(res, data);
}
}
@@ -1732,8 +1736,8 @@ void NEPoolingLayerKernel::poolingMxN_qasymm8_nhwc(const Window &window_input, c
{
for(int x = pool_start_x; x < pool_end_x; ++x)
{
- const uint8x16_t data = vld1q_u8(reinterpret_cast<const uint8_t *>(input.ptr() + (x - pool_pad_left) * _input->info()->strides_in_bytes().y() +
- (y - pool_pad_top) * _input->info()->strides_in_bytes().z()));
+ const uint8x16_t data = vld1q_u8(reinterpret_cast<const uint8_t *>(input.ptr() + (x - pool_pad_left) * static_cast<int>(_input->info()->strides_in_bytes().y()) + (y - pool_pad_top) * static_cast<int>
+ (_input->info()->strides_in_bytes().z())));
const uint16x8_t data_u16 = vmovl_u8(vget_low_u8(data));
const uint16x8_t data2_u16 = vmovl_u8(vget_high_u8(data));
@@ -1770,8 +1774,8 @@ void NEPoolingLayerKernel::poolingMxN_qasymm8_nhwc(const Window &window_input, c
{
for(int x = pool_start_x; x < pool_end_x; ++x)
{
- const uint8x16_t data = vld1q_u8(reinterpret_cast<const uint8_t *>(input.ptr() + (x - pool_pad_left) * _input->info()->strides_in_bytes().y() +
- (y - pool_pad_top) * _input->info()->strides_in_bytes().z()));
+ const uint8x16_t data = vld1q_u8(reinterpret_cast<const uint8_t *>(input.ptr() + (x - pool_pad_left) * static_cast<int>(_input->info()->strides_in_bytes().y()) + (y - pool_pad_top) * static_cast<int>
+ (_input->info()->strides_in_bytes().z())));
vres = vmaxq_u8(vres, data);
}
}
diff --git a/src/core/SubTensorInfo.cpp b/src/core/SubTensorInfo.cpp
index 237f1333f2..2db76b475a 100644
--- a/src/core/SubTensorInfo.cpp
+++ b/src/core/SubTensorInfo.cpp
@@ -123,11 +123,11 @@ bool SubTensorInfo::extend_padding(const PaddingSize &padding)
return _parent->extend_padding(padding);
}
-size_t SubTensorInfo::offset_element_in_bytes(const Coordinates &pos) const
+int32_t SubTensorInfo::offset_element_in_bytes(const Coordinates &pos) const
{
ARM_COMPUTE_ERROR_ON_COORDINATES_DIMENSIONS_GTE(pos, _tensor_shape.num_dimensions());
- size_t offset = offset_first_element_in_bytes();
+ int32_t offset = offset_first_element_in_bytes();
const Strides &strides = strides_in_bytes();
for(size_t i = 0; i < _tensor_shape.num_dimensions(); ++i)
diff --git a/src/core/TensorInfo.cpp b/src/core/TensorInfo.cpp
index 33d682f772..cd36e8be2c 100644
--- a/src/core/TensorInfo.cpp
+++ b/src/core/TensorInfo.cpp
@@ -384,11 +384,11 @@ ITensorInfo &TensorInfo::reset_padding()
return *this;
}
-size_t TensorInfo::offset_element_in_bytes(const Coordinates &pos) const
+int32_t TensorInfo::offset_element_in_bytes(const Coordinates &pos) const
{
ARM_COMPUTE_ERROR_ON_COORDINATES_DIMENSIONS_GTE(pos, _tensor_shape.num_dimensions());
- size_t offset = _offset_first_element_in_bytes;
+ int32_t offset = _offset_first_element_in_bytes;
for(size_t i = 0; i < _tensor_shape.num_dimensions(); ++i)
{
diff --git a/src/core/Utils.cpp b/src/core/Utils.cpp
index 7ce94e2aa4..d9e05d7ee8 100644
--- a/src/core/Utils.cpp
+++ b/src/core/Utils.cpp
@@ -75,7 +75,7 @@ std::string arm_compute::read_file(const std::string &filename, bool binary)
}
catch(const std::ifstream::failure &e)
{
- ARM_COMPUTE_ERROR("Accessing %s: %s", filename.c_str(), e.what());
+ ARM_COMPUTE_ERROR_VAR("Accessing %s: %s", filename.c_str(), e.what());
}
#endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
diff --git a/src/core/Validate.cpp b/src/core/Validate.cpp
index 5587dad77c..f9bd6d6a45 100644
--- a/src/core/Validate.cpp
+++ b/src/core/Validate.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -82,9 +82,9 @@ arm_compute::Status arm_compute::error_on_window_dimensions_gte(const char *func
{
for(unsigned int i = max_dim; i < arm_compute::Coordinates::num_max_dimensions; ++i)
{
- ARM_COMPUTE_RETURN_ERROR_ON_LOC_MSG((win[i].start() != 0) || (win[i].end() != win[i].step()),
- function, file, line,
- "Maximum number of dimensions expected %u but dimension %u is not empty", max_dim, i);
+ ARM_COMPUTE_RETURN_ERROR_ON_LOC_MSG_VAR((win[i].start() != 0) || (win[i].end() != win[i].step()),
+ function, file, line,
+ "Maximum number of dimensions expected %u but dimension %u is not empty", max_dim, i);
}
return arm_compute::Status{};
}
@@ -94,9 +94,9 @@ arm_compute::Status arm_compute::error_on_tensor_not_2d(const char *function, co
{
ARM_COMPUTE_RETURN_ERROR_ON_LOC(tensor == nullptr, function, file, line);
ARM_COMPUTE_RETURN_ERROR_ON_LOC(tensor->info() == nullptr, function, file, line);
- ARM_COMPUTE_RETURN_ERROR_ON_LOC_MSG(tensor->info()->num_dimensions() != 2,
- function, file, line,
- "Only 2D Tensors are supported by this kernel (%d passed)", tensor->info()->num_dimensions());
+ ARM_COMPUTE_RETURN_ERROR_ON_LOC_MSG_VAR(tensor->info()->num_dimensions() != 2,
+ function, file, line,
+ "Only 2D Tensors are supported by this kernel (%zu passed)", tensor->info()->num_dimensions());
return arm_compute::Status{};
}
@@ -104,9 +104,9 @@ arm_compute::Status arm_compute::error_on_tensor_not_2d(const char *function, co
const arm_compute::ITensorInfo *tensor)
{
ARM_COMPUTE_RETURN_ERROR_ON_LOC(tensor == nullptr, function, file, line);
- ARM_COMPUTE_RETURN_ERROR_ON_LOC_MSG(tensor->num_dimensions() != 2,
- function, file, line,
- "Only 2D Tensors are supported by this kernel (%d passed)", tensor->num_dimensions());
+ ARM_COMPUTE_RETURN_ERROR_ON_LOC_MSG_VAR(tensor->num_dimensions() != 2,
+ function, file, line,
+ "Only 2D Tensors are supported by this kernel (%zu passed)", tensor->num_dimensions());
return arm_compute::Status{};
}
diff --git a/src/runtime/CL/CLTuner.cpp b/src/runtime/CL/CLTuner.cpp
index a079503671..cba4ddc5e3 100644
--- a/src/runtime/CL/CLTuner.cpp
+++ b/src/runtime/CL/CLTuner.cpp
@@ -237,7 +237,7 @@ void CLTuner::load_from_file(const std::string &filename)
fs.open(filename, std::ios::in);
if(!fs.is_open())
{
- ARM_COMPUTE_ERROR("Failed to open '%s' (%s [%d])", filename.c_str(), strerror(errno), errno);
+ ARM_COMPUTE_ERROR_VAR("Failed to open '%s' (%s [%d])", filename.c_str(), strerror(errno), errno);
}
std::string line;
while(!std::getline(fs, line).fail())
@@ -246,7 +246,7 @@ void CLTuner::load_from_file(const std::string &filename)
std::string token;
if(std::getline(ss, token, ';').fail())
{
- ARM_COMPUTE_ERROR("Malformed row '%s' in %s (Should be of the form 'kernel_id;lws[0];lws[1];lws[2]')", ss.str().c_str(), filename.c_str());
+ ARM_COMPUTE_ERROR_VAR("Malformed row '%s' in %s (Should be of the form 'kernel_id;lws[0];lws[1];lws[2]')", ss.str().c_str(), filename.c_str());
}
std::string kernel_id = token;
cl::NDRange lws(1, 1, 1);
@@ -254,7 +254,7 @@ void CLTuner::load_from_file(const std::string &filename)
{
if(std::getline(ss, token, ';').fail())
{
- ARM_COMPUTE_ERROR("Malformed row '%s' in %s (Should be of the form 'kernel_id;lws[0];lws[1];lws[2]')", ss.str().c_str(), filename.c_str());
+ ARM_COMPUTE_ERROR_VAR("Malformed row '%s' in %s (Should be of the form 'kernel_id;lws[0];lws[1];lws[2]')", ss.str().c_str(), filename.c_str());
}
lws.get()[i] = support::cpp11::stoi(token);
}
diff --git a/src/runtime/CL/functions/CLCannyEdge.cpp b/src/runtime/CL/functions/CLCannyEdge.cpp
index 4c7458d1ed..dbaea81bff 100644
--- a/src/runtime/CL/functions/CLCannyEdge.cpp
+++ b/src/runtime/CL/functions/CLCannyEdge.cpp
@@ -128,7 +128,7 @@ void CLCannyEdge::configure(ICLTensor *input, ICLTensor *output, int32_t upper_t
}
else
{
- ARM_COMPUTE_ERROR("Gradient size %d not supported", gradient_size);
+ ARM_COMPUTE_ERROR_VAR("Gradient size %d not supported", gradient_size);
}
// Manage intermediate buffers
diff --git a/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp b/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp
index 13a34b43cd..e0acf06d49 100644
--- a/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp
+++ b/src/runtime/CPP/functions/CPPDetectionOutputLayer.cpp
@@ -464,7 +464,7 @@ void CPPDetectionOutputLayer::run()
// Ignore background class.
continue;
}
- ARM_COMPUTE_ERROR_ON_MSG(_all_location_predictions[i].find(label) == _all_location_predictions[i].end(), "Could not find location predictions for label %d.", label);
+ ARM_COMPUTE_ERROR_ON_MSG_VAR(_all_location_predictions[i].find(label) == _all_location_predictions[i].end(), "Could not find location predictions for label %d.", label);
const std::vector<BBox> &label_loc_preds = _all_location_predictions[i].find(label)->second;
@@ -497,7 +497,7 @@ void CPPDetectionOutputLayer::run()
const int label = _info.share_location() ? -1 : c;
if(conf_scores.find(c) == conf_scores.end() || decode_bboxes.find(label) == decode_bboxes.end())
{
- ARM_COMPUTE_ERROR("Could not find predictions for label %d.", label);
+ ARM_COMPUTE_ERROR_VAR("Could not find predictions for label %d.", label);
}
const std::vector<float> &scores = conf_scores.find(c)->second;
const std::vector<BBox> &bboxes = decode_bboxes.find(label)->second;
@@ -518,7 +518,7 @@ void CPPDetectionOutputLayer::run()
if(conf_scores.find(label) == conf_scores.end())
{
- ARM_COMPUTE_ERROR("Could not find predictions for label %d.", label);
+ ARM_COMPUTE_ERROR_VAR("Could not find predictions for label %d.", label);
}
const std::vector<float> &scores = conf_scores.find(label)->second;
@@ -570,7 +570,7 @@ void CPPDetectionOutputLayer::run()
{
// Either if there are no confidence predictions
// or there are no location predictions for current label.
- ARM_COMPUTE_ERROR("Could not find predictions for the label %d.", label);
+ ARM_COMPUTE_ERROR_VAR("Could not find predictions for the label %d.", label);
}
const std::vector<BBox> &bboxes = decode_bboxes.find(loc_label)->second;
const std::vector<int> &indices = it.second;
diff --git a/src/runtime/CPP/functions/CPPDetectionPostProcessLayer.cpp b/src/runtime/CPP/functions/CPPDetectionPostProcessLayer.cpp
index 7b4f7b97c4..0addb0ead3 100644
--- a/src/runtime/CPP/functions/CPPDetectionPostProcessLayer.cpp
+++ b/src/runtime/CPP/functions/CPPDetectionPostProcessLayer.cpp
@@ -46,16 +46,16 @@ Status validate_arguments(const ITensorInfo *input_box_encoding, const ITensorIn
ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_box_encoding->num_dimensions() > 3, "The location input tensor shape should be [4, N, kBatchSize].");
if(input_box_encoding->num_dimensions() > 2)
{
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_box_encoding->dimension(2) != kBatchSize, "The third dimension of the input box_encoding tensor should be equal to %d.", kBatchSize);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG_VAR(input_box_encoding->dimension(2) != kBatchSize, "The third dimension of the input box_encoding tensor should be equal to %d.", kBatchSize);
}
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_box_encoding->dimension(0) != kNumCoordBox, "The first dimension of the input box_encoding tensor should be equal to %d.", kNumCoordBox);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG_VAR(input_box_encoding->dimension(0) != kNumCoordBox, "The first dimension of the input box_encoding tensor should be equal to %d.", kNumCoordBox);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_class_score->dimension(0) != (info.num_classes() + 1),
"The first dimension of the input class_prediction should be equal to the number of classes plus one.");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_anchors->num_dimensions() > 3, "The anchors input tensor shape should be [4, N, kBatchSize].");
if(input_anchors->num_dimensions() > 2)
{
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(input_anchors->dimension(0) != kNumCoordBox, "The first dimension of the input anchors tensor should be equal to %d.", kNumCoordBox);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG_VAR(input_anchors->dimension(0) != kNumCoordBox, "The first dimension of the input anchors tensor should be equal to %d.", kNumCoordBox);
}
ARM_COMPUTE_RETURN_ERROR_ON_MSG((input_box_encoding->dimension(1) != input_class_score->dimension(1))
|| (input_box_encoding->dimension(1) != input_anchors->dimension(1)),
diff --git a/src/runtime/GLES_COMPUTE/GCScheduler.cpp b/src/runtime/GLES_COMPUTE/GCScheduler.cpp
index 6a39e7c360..0824af3ed4 100644
--- a/src/runtime/GLES_COMPUTE/GCScheduler.cpp
+++ b/src/runtime/GLES_COMPUTE/GCScheduler.cpp
@@ -85,11 +85,11 @@ void GCScheduler::setup_context()
EGLBoolean res;
_display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
- ARM_COMPUTE_ERROR_ON_MSG(_display == EGL_NO_DISPLAY, "Failed to get display: 0x%x.", eglGetError());
+ ARM_COMPUTE_ERROR_ON_MSG_VAR(_display == EGL_NO_DISPLAY, "Failed to get display: 0x%x.", eglGetError());
res = eglInitialize(_display, nullptr, nullptr);
- ARM_COMPUTE_ERROR_ON_MSG(res == EGL_FALSE, "Failed to initialize egl: 0x%x.", eglGetError());
+ ARM_COMPUTE_ERROR_ON_MSG_VAR(res == EGL_FALSE, "Failed to initialize egl: 0x%x.", eglGetError());
ARM_COMPUTE_UNUSED(res);
const char *egl_extension_st = eglQueryString(_display, EGL_EXTENSIONS);
@@ -107,12 +107,12 @@ void GCScheduler::setup_context()
res = eglChooseConfig(_display, config_attribs.data(), &cfg, 1, &count);
- ARM_COMPUTE_ERROR_ON_MSG(res == EGL_FALSE, "Failed to choose config: 0x%x.", eglGetError());
+ ARM_COMPUTE_ERROR_ON_MSG_VAR(res == EGL_FALSE, "Failed to choose config: 0x%x.", eglGetError());
ARM_COMPUTE_UNUSED(res);
res = eglBindAPI(EGL_OPENGL_ES_API);
- ARM_COMPUTE_ERROR_ON_MSG(res == EGL_FALSE, "Failed to bind api: 0x%x.", eglGetError());
+ ARM_COMPUTE_ERROR_ON_MSG_VAR(res == EGL_FALSE, "Failed to bind api: 0x%x.", eglGetError());
const std::array<EGLint, 3> attribs =
{
@@ -124,11 +124,11 @@ void GCScheduler::setup_context()
EGL_NO_CONTEXT,
attribs.data());
- ARM_COMPUTE_ERROR_ON_MSG(_context == EGL_NO_CONTEXT, "Failed to create context: 0x%x.", eglGetError());
+ ARM_COMPUTE_ERROR_ON_MSG_VAR(_context == EGL_NO_CONTEXT, "Failed to create context: 0x%x.", eglGetError());
ARM_COMPUTE_UNUSED(res);
res = eglMakeCurrent(_display, EGL_NO_SURFACE, EGL_NO_SURFACE, _context);
- ARM_COMPUTE_ERROR_ON_MSG(res == EGL_FALSE, "Failed to make current: 0x%x.", eglGetError());
+ ARM_COMPUTE_ERROR_ON_MSG_VAR(res == EGL_FALSE, "Failed to make current: 0x%x.", eglGetError());
ARM_COMPUTE_UNUSED(res);
}
diff --git a/src/runtime/NEON/functions/NECannyEdge.cpp b/src/runtime/NEON/functions/NECannyEdge.cpp
index 032e617b1b..3d5fbfbfaa 100644
--- a/src/runtime/NEON/functions/NECannyEdge.cpp
+++ b/src/runtime/NEON/functions/NECannyEdge.cpp
@@ -37,6 +37,7 @@
#include "support/ToolchainSupport.h"
#include <cstring>
+#include <inttypes.h>
#include <utility>
using namespace arm_compute;
@@ -118,7 +119,7 @@ void NECannyEdge::configure(ITensor *input, ITensor *output, int32_t upper_thr,
}
else
{
- ARM_COMPUTE_ERROR("Gradient size %d not supported\n", gradient_size);
+ ARM_COMPUTE_ERROR_VAR("Gradient size %+" PRId32 " not supported\n", gradient_size);
}
// Manage intermediate buffers