aboutsummaryrefslogtreecommitdiff
path: root/src/c
diff options
context:
space:
mode:
authorFelix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>2023-09-27 17:46:17 +0100
committerfelixjohnny.thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>2023-09-28 12:08:05 +0000
commitafd38f0c617d6f89b2b4532c6c44f116617e2b6f (patch)
tree03bc7d5a762099989b16a656fa8d397b490ed70e /src/c
parentbdcb4c148ee2fdeaaddf4cf1e57bbb0de02bb894 (diff)
downloadComputeLibrary-afd38f0c617d6f89b2b4532c6c44f116617e2b6f.tar.gz
Apply clang-format on repository
Code is formatted as per a revised clang format configuration file(not part of this delivery). Version 14.0.6 is used. Exclusion List: - files with .cl extension - files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...) And the following directories - compute_kernel_writer/validation/ - tests/ - include/ - src/core/NEON/kernels/convolution/ - src/core/NEON/kernels/arm_gemm/ - src/core/NEON/kernels/arm_conv/ - data/ There will be a follow up for formatting of .cl files and the files under tests/ and compute_kernel_writer/validation/. Signed-off-by: Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391 Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Diffstat (limited to 'src/c')
-rw-r--r--src/c/AclContext.cpp21
-rw-r--r--src/c/AclQueue.cpp6
-rw-r--r--src/c/AclTensor.cpp29
-rw-r--r--src/c/AclTensorPack.cpp10
-rw-r--r--src/c/AclVersion.cpp3
-rw-r--r--src/c/cl/AclOpenClExt.cpp45
6 files changed, 55 insertions, 59 deletions
diff --git a/src/c/AclContext.cpp b/src/c/AclContext.cpp
index 9b8ffea619..c6c0820c92 100644
--- a/src/c/AclContext.cpp
+++ b/src/c/AclContext.cpp
@@ -22,7 +22,6 @@
* SOFTWARE.
*/
#include "arm_compute/AclEntrypoints.h"
-
#include "arm_compute/core/Error.h"
#include "src/common/IContext.h"
@@ -42,25 +41,25 @@ namespace
template <typename ContextType>
arm_compute::IContext *create_backend_ctx(const AclContextOptions *options)
{
- return new(std::nothrow) ContextType(options);
+ return new (std::nothrow) ContextType(options);
}
bool is_target_valid(AclTarget target)
{
- return arm_compute::utils::is_in(target, { AclCpu, AclGpuOcl });
+ return arm_compute::utils::is_in(target, {AclCpu, AclGpuOcl});
}
bool are_context_options_valid(const AclContextOptions *options)
{
ARM_COMPUTE_ASSERT_NOT_NULLPTR(options);
- return arm_compute::utils::is_in(options->mode, { AclPreferFastRerun, AclPreferFastStart });
+ return arm_compute::utils::is_in(options->mode, {AclPreferFastRerun, AclPreferFastStart});
}
arm_compute::IContext *create_context(AclTarget target, const AclContextOptions *options)
{
ARM_COMPUTE_UNUSED(options);
- switch(target)
+ switch (target)
{
#ifdef ARM_COMPUTE_CPU_ENABLED
case AclCpu:
@@ -77,24 +76,22 @@ arm_compute::IContext *create_context(AclTarget target, const AclContextOptions
}
} // namespace
-extern "C" AclStatus AclCreateContext(AclContext *external_ctx,
- AclTarget target,
- const AclContextOptions *options)
+extern "C" AclStatus AclCreateContext(AclContext *external_ctx, AclTarget target, const AclContextOptions *options)
{
- if(!is_target_valid(target))
+ if (!is_target_valid(target))
{
ARM_COMPUTE_LOG_ERROR_WITH_FUNCNAME_ACL("Target is invalid!");
return AclUnsupportedTarget;
}
- if(options != nullptr && !are_context_options_valid(options))
+ if (options != nullptr && !are_context_options_valid(options))
{
ARM_COMPUTE_LOG_ERROR_WITH_FUNCNAME_ACL("Context options are invalid!");
return AclInvalidArgument;
}
auto ctx = create_context(target, options);
- if(ctx == nullptr)
+ if (ctx == nullptr)
{
ARM_COMPUTE_LOG_ERROR_WITH_FUNCNAME_ACL("Couldn't allocate internal resources for context creation!");
return AclOutOfMemory;
@@ -113,7 +110,7 @@ extern "C" AclStatus AclDestroyContext(AclContext external_ctx)
StatusCode status = detail::validate_internal_context(ctx);
ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
- if(ctx->refcount() != 0)
+ if (ctx->refcount() != 0)
{
ARM_COMPUTE_LOG_ERROR_WITH_FUNCNAME_ACL("Context has references on it that haven't been released!");
// TODO: Fix the refcount with callback when reaches 0
diff --git a/src/c/AclQueue.cpp b/src/c/AclQueue.cpp
index 020c6ed531..c3e867bffc 100644
--- a/src/c/AclQueue.cpp
+++ b/src/c/AclQueue.cpp
@@ -38,7 +38,7 @@ namespace
bool is_mode_valid(const AclQueueOptions *options)
{
ARM_COMPUTE_ASSERT_NOT_NULLPTR(options);
- return arm_compute::utils::is_in(options->mode, { AclTuningModeNone, AclRapid, AclNormal, AclExhaustive });
+ return arm_compute::utils::is_in(options->mode, {AclTuningModeNone, AclRapid, AclNormal, AclExhaustive});
}
} // namespace
@@ -51,14 +51,14 @@ extern "C" AclStatus AclCreateQueue(AclQueue *external_queue, AclContext externa
StatusCode status = detail::validate_internal_context(ctx);
ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
- if(options != nullptr && !is_mode_valid(options))
+ if (options != nullptr && !is_mode_valid(options))
{
ARM_COMPUTE_LOG_ERROR_ACL("Queue options are invalid");
return AclInvalidArgument;
}
auto queue = ctx->create_queue(options);
- if(queue == nullptr)
+ if (queue == nullptr)
{
ARM_COMPUTE_LOG_ERROR_ACL("Couldn't allocate internal resources");
return AclOutOfMemory;
diff --git a/src/c/AclTensor.cpp b/src/c/AclTensor.cpp
index 5b184697aa..c4cd08ac70 100644
--- a/src/c/AclTensor.cpp
+++ b/src/c/AclTensor.cpp
@@ -24,6 +24,7 @@
#include "arm_compute/AclEntrypoints.h"
#include "arm_compute/AclUtils.h"
#include "arm_compute/core/Error.h"
+
#include "src/common/ITensorV2.h"
#include "src/common/utils/Macros.h"
@@ -41,17 +42,17 @@ constexpr int32_t max_allowed_dims = 6;
*/
bool is_desc_valid(const AclTensorDescriptor &desc)
{
- if(desc.data_type > AclFloat32 || desc.data_type <= AclDataTypeUnknown)
+ if (desc.data_type > AclFloat32 || desc.data_type <= AclDataTypeUnknown)
{
ARM_COMPUTE_LOG_ERROR_ACL("[AclCreateTensor]: Unknown data type!");
return false;
}
- if(desc.ndims > max_allowed_dims)
+ if (desc.ndims > max_allowed_dims)
{
ARM_COMPUTE_LOG_ERROR_ACL("[AclCreateTensor]: Dimensions surpass the maximum allowed value!");
return false;
}
- if(desc.ndims > 0 && desc.shape == nullptr)
+ if (desc.ndims > 0 && desc.shape == nullptr)
{
ARM_COMPUTE_LOG_ERROR_ACL("[AclCreateTensor]: Dimensions values are empty while dimensionality is > 0!");
return false;
@@ -66,10 +67,8 @@ StatusCode convert_and_validate_tensor(AclTensor tensor, ITensorV2 **internal_te
}
} // namespace
-extern "C" AclStatus AclCreateTensor(AclTensor *external_tensor,
- AclContext external_ctx,
- const AclTensorDescriptor *desc,
- bool allocate)
+extern "C" AclStatus
+AclCreateTensor(AclTensor *external_tensor, AclContext external_ctx, const AclTensorDescriptor *desc, bool allocate)
{
using namespace arm_compute;
@@ -78,14 +77,14 @@ extern "C" AclStatus AclCreateTensor(AclTensor *external_tensor,
StatusCode status = detail::validate_internal_context(ctx);
ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
- if(desc == nullptr || !is_desc_valid(*desc))
+ if (desc == nullptr || !is_desc_valid(*desc))
{
ARM_COMPUTE_LOG_ERROR_ACL("[AclCreateTensor]: Descriptor is invalid!");
return AclInvalidArgument;
}
auto tensor = ctx->create_tensor(*desc, allocate);
- if(tensor == nullptr)
+ if (tensor == nullptr)
{
ARM_COMPUTE_LOG_ERROR_ACL("[AclCreateTensor]: Couldn't allocate internal resources for tensor creation!");
return AclOutOfMemory;
@@ -103,7 +102,7 @@ extern "C" AclStatus AclMapTensor(AclTensor external_tensor, void **handle)
StatusCode status = detail::validate_internal_tensor(tensor);
ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
- if(handle == nullptr)
+ if (handle == nullptr)
{
ARM_COMPUTE_LOG_ERROR_ACL("[AclMapTensor]: Handle object is nullptr!");
return AclInvalidArgument;
@@ -160,12 +159,12 @@ extern "C" AclStatus AclGetTensorSize(AclTensor tensor, uint64_t *size)
{
using namespace arm_compute;
- if(size == nullptr)
+ if (size == nullptr)
{
return AclStatus::AclInvalidArgument;
}
- ITensorV2 *internal_tensor{ nullptr };
+ ITensorV2 *internal_tensor{nullptr};
auto status = convert_and_validate_tensor(tensor, &internal_tensor);
ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
@@ -177,15 +176,15 @@ extern "C" AclStatus AclGetTensorDescriptor(AclTensor tensor, AclTensorDescripto
{
using namespace arm_compute;
- if(desc == nullptr)
+ if (desc == nullptr)
{
return AclStatus::AclInvalidArgument;
}
- ITensorV2 *internal_tensor{ nullptr };
+ ITensorV2 *internal_tensor{nullptr};
const auto status = convert_and_validate_tensor(tensor, &internal_tensor);
ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
*desc = internal_tensor->get_descriptor();
return utils::as_cenum<AclStatus>(status);
-} \ No newline at end of file
+}
diff --git a/src/c/AclTensorPack.cpp b/src/c/AclTensorPack.cpp
index 6202524ca7..daf1be4f44 100644
--- a/src/c/AclTensorPack.cpp
+++ b/src/c/AclTensorPack.cpp
@@ -22,6 +22,7 @@
* SOFTWARE.
*/
#include "arm_compute/AclEntrypoints.h"
+
#include "src/common/ITensorV2.h"
#include "src/common/TensorPack.h"
#include "src/common/utils/Macros.h"
@@ -36,7 +37,7 @@ StatusCode PackTensorInternal(TensorPack &pack, AclTensor external_tensor, int32
status = detail::validate_internal_tensor(tensor);
- if(status != StatusCode::Success)
+ if (status != StatusCode::Success)
{
return status;
}
@@ -57,7 +58,7 @@ extern "C" AclStatus AclCreateTensorPack(AclTensorPack *external_pack, AclContex
ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
auto pack = new TensorPack(ctx);
- if(pack == nullptr)
+ if (pack == nullptr)
{
ARM_COMPUTE_LOG_ERROR_WITH_FUNCNAME_ACL("Couldn't allocate internal resources!");
return AclOutOfMemory;
@@ -77,14 +78,15 @@ extern "C" AclStatus AclPackTensor(AclTensorPack external_pack, AclTensor extern
return AclStatus::AclSuccess;
}
-extern "C" AclStatus AclPackTensors(AclTensorPack external_pack, AclTensor *external_tensors, int32_t *slot_ids, size_t num_tensors)
+extern "C" AclStatus
+AclPackTensors(AclTensorPack external_pack, AclTensor *external_tensors, int32_t *slot_ids, size_t num_tensors)
{
using namespace arm_compute;
auto pack = get_internal(external_pack);
ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(detail::validate_internal_pack(pack));
- for(unsigned i = 0; i < num_tensors; ++i)
+ for (unsigned i = 0; i < num_tensors; ++i)
{
ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(PackTensorInternal(*pack, external_tensors[i], slot_ids[i]));
}
diff --git a/src/c/AclVersion.cpp b/src/c/AclVersion.cpp
index 971189a6d4..a659e90837 100644
--- a/src/c/AclVersion.cpp
+++ b/src/c/AclVersion.cpp
@@ -25,8 +25,7 @@
namespace
{
-constexpr AclVersion version_info
-{
+constexpr AclVersion version_info{
ARM_COMPUTE_LIBRARY_VERSION_MAJOR,
ARM_COMPUTE_LIBRARY_VERSION_MINOR,
ARM_COMPUTE_LIBRARY_VERSION_PATCH,
diff --git a/src/c/cl/AclOpenClExt.cpp b/src/c/cl/AclOpenClExt.cpp
index e72babcae8..8e42cf5510 100644
--- a/src/c/cl/AclOpenClExt.cpp
+++ b/src/c/cl/AclOpenClExt.cpp
@@ -23,13 +23,12 @@
*/
#include "arm_compute/AclOpenClExt.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+
#include "src/common/ITensorV2.h"
#include "src/common/Types.h"
#include "src/gpu/cl/ClContext.h"
#include "src/gpu/cl/ClQueue.h"
-
-#include "arm_compute/core/CL/ICLTensor.h"
-
#include "support/Cast.h"
extern "C" AclStatus AclGetClContext(AclContext external_ctx, cl_context *opencl_context)
@@ -37,17 +36,17 @@ extern "C" AclStatus AclGetClContext(AclContext external_ctx, cl_context *opencl
using namespace arm_compute;
IContext *ctx = get_internal(external_ctx);
- if(detail::validate_internal_context(ctx) != StatusCode::Success)
+ if (detail::validate_internal_context(ctx) != StatusCode::Success)
{
return AclStatus::AclInvalidArgument;
}
- if(ctx->type() != Target::GpuOcl)
+ if (ctx->type() != Target::GpuOcl)
{
return AclStatus::AclInvalidTarget;
}
- if(opencl_context == nullptr)
+ if (opencl_context == nullptr)
{
return AclStatus::AclInvalidArgument;
}
@@ -62,23 +61,23 @@ extern "C" AclStatus AclSetClContext(AclContext external_ctx, cl_context opencl_
using namespace arm_compute;
IContext *ctx = get_internal(external_ctx);
- if(detail::validate_internal_context(ctx) != StatusCode::Success)
+ if (detail::validate_internal_context(ctx) != StatusCode::Success)
{
return AclStatus::AclInvalidArgument;
}
- if(ctx->type() != Target::GpuOcl)
+ if (ctx->type() != Target::GpuOcl)
{
return AclStatus::AclInvalidTarget;
}
- if(ctx->refcount() != 0)
+ if (ctx->refcount() != 0)
{
return AclStatus::AclUnsupportedConfig;
}
auto cl_ctx = utils::cast::polymorphic_downcast<arm_compute::gpu::opencl::ClContext *>(ctx);
- if(!cl_ctx->set_cl_ctx(::cl::Context(opencl_context)))
+ if (!cl_ctx->set_cl_ctx(::cl::Context(opencl_context)))
{
return AclStatus::AclRuntimeError;
}
@@ -91,17 +90,17 @@ extern "C" AclStatus AclGetClDevice(AclContext external_ctx, cl_device_id *openc
using namespace arm_compute;
IContext *ctx = get_internal(external_ctx);
- if(detail::validate_internal_context(ctx) != StatusCode::Success)
+ if (detail::validate_internal_context(ctx) != StatusCode::Success)
{
return AclStatus::AclInvalidArgument;
}
- if(ctx->type() != Target::GpuOcl)
+ if (ctx->type() != Target::GpuOcl)
{
return AclStatus::AclInvalidTarget;
}
- if(opencl_device == nullptr)
+ if (opencl_device == nullptr)
{
return AclStatus::AclInvalidArgument;
}
@@ -116,17 +115,17 @@ extern "C" AclStatus AclGetClQueue(AclQueue external_queue, cl_command_queue *op
using namespace arm_compute;
IQueue *queue = get_internal(external_queue);
- if(detail::validate_internal_queue(queue) != StatusCode::Success)
+ if (detail::validate_internal_queue(queue) != StatusCode::Success)
{
return AclStatus::AclInvalidArgument;
}
- if(queue->header.ctx->type() != Target::GpuOcl)
+ if (queue->header.ctx->type() != Target::GpuOcl)
{
return AclStatus::AclInvalidTarget;
}
- if(opencl_queue == nullptr)
+ if (opencl_queue == nullptr)
{
return AclStatus::AclInvalidArgument;
}
@@ -141,18 +140,18 @@ extern "C" AclStatus AclSetClQueue(AclQueue external_queue, cl_command_queue ope
using namespace arm_compute;
IQueue *queue = get_internal(external_queue);
- if(detail::validate_internal_queue(queue) != StatusCode::Success)
+ if (detail::validate_internal_queue(queue) != StatusCode::Success)
{
return AclStatus::AclInvalidArgument;
}
- if(queue->header.ctx->type() != Target::GpuOcl)
+ if (queue->header.ctx->type() != Target::GpuOcl)
{
return AclStatus::AclInvalidTarget;
}
auto cl_queue = utils::cast::polymorphic_downcast<arm_compute::gpu::opencl::ClQueue *>(queue);
- if(!cl_queue->set_cl_queue(::cl::CommandQueue(opencl_queue)))
+ if (!cl_queue->set_cl_queue(::cl::CommandQueue(opencl_queue)))
{
return AclStatus::AclRuntimeError;
}
@@ -165,17 +164,17 @@ extern "C" AclStatus AclGetClMem(AclTensor external_tensor, cl_mem *opencl_mem)
using namespace arm_compute;
ITensorV2 *tensor = get_internal(external_tensor);
- if(detail::validate_internal_tensor(tensor) != StatusCode::Success)
+ if (detail::validate_internal_tensor(tensor) != StatusCode::Success)
{
return AclStatus::AclInvalidArgument;
}
- if(tensor->header.ctx->type() != Target::GpuOcl)
+ if (tensor->header.ctx->type() != Target::GpuOcl)
{
return AclStatus::AclInvalidTarget;
}
- if(opencl_mem == nullptr)
+ if (opencl_mem == nullptr)
{
return AclStatus::AclInvalidArgument;
}
@@ -184,4 +183,4 @@ extern "C" AclStatus AclGetClMem(AclTensor external_tensor, cl_mem *opencl_mem)
*opencl_mem = cl_tensor->cl_buffer().get();
return AclStatus::AclSuccess;
-} \ No newline at end of file
+}