aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/TensorAllocator.cpp
diff options
context:
space:
mode:
authorFelix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>2023-09-27 17:46:17 +0100
committerfelixjohnny.thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>2023-09-28 12:08:05 +0000
commitafd38f0c617d6f89b2b4532c6c44f116617e2b6f (patch)
tree03bc7d5a762099989b16a656fa8d397b490ed70e /src/runtime/TensorAllocator.cpp
parentbdcb4c148ee2fdeaaddf4cf1e57bbb0de02bb894 (diff)
downloadComputeLibrary-afd38f0c617d6f89b2b4532c6c44f116617e2b6f.tar.gz
Apply clang-format on repository
Code is formatted as per a revised clang format configuration file(not part of this delivery). Version 14.0.6 is used. Exclusion List: - files with .cl extension - files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...) And the following directories - compute_kernel_writer/validation/ - tests/ - include/ - src/core/NEON/kernels/convolution/ - src/core/NEON/kernels/arm_gemm/ - src/core/NEON/kernels/arm_conv/ - data/ There will be a follow up for formatting of .cl files and the files under tests/ and compute_kernel_writer/validation/. Signed-off-by: Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391 Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Diffstat (limited to 'src/runtime/TensorAllocator.cpp')
-rw-r--r--src/runtime/TensorAllocator.cpp19
1 files changed, 10 insertions, 9 deletions
diff --git a/src/runtime/TensorAllocator.cpp b/src/runtime/TensorAllocator.cpp
index 4ae27c59fc..372852bfea 100644
--- a/src/runtime/TensorAllocator.cpp
+++ b/src/runtime/TensorAllocator.cpp
@@ -43,13 +43,13 @@ bool validate_subtensor_shape(const TensorInfo &parent_info, const TensorInfo &c
const size_t parent_dims = parent_info.num_dimensions();
const size_t child_dims = child_info.num_dimensions();
- if(child_dims <= parent_dims)
+ if (child_dims <= parent_dims)
{
- for(size_t num_dimensions = child_dims; num_dimensions > 0; --num_dimensions)
+ for (size_t num_dimensions = child_dims; num_dimensions > 0; --num_dimensions)
{
const size_t child_dim_size = coords[num_dimensions - 1] + child_shape[num_dimensions - 1];
- if((coords[num_dimensions - 1] < 0) || (child_dim_size > parent_shape[num_dimensions - 1]))
+ if ((coords[num_dimensions - 1] < 0) || (child_dim_size > parent_shape[num_dimensions - 1]))
{
is_valid = false;
break;
@@ -65,8 +65,7 @@ bool validate_subtensor_shape(const TensorInfo &parent_info, const TensorInfo &c
}
} // namespace
-TensorAllocator::TensorAllocator(IMemoryManageable *owner)
- : _owner(owner), _associated_memory_group(nullptr), _memory()
+TensorAllocator::TensorAllocator(IMemoryManageable *owner) : _owner(owner), _associated_memory_group(nullptr), _memory()
{
}
@@ -88,7 +87,7 @@ TensorAllocator::TensorAllocator(TensorAllocator &&o) noexcept
TensorAllocator &TensorAllocator::operator=(TensorAllocator &&o) noexcept
{
- if(&o != this)
+ if (&o != this)
{
_owner = o._owner;
o._owner = nullptr;
@@ -117,8 +116,10 @@ void TensorAllocator::init(const TensorAllocator &allocator, const Coordinates &
_memory = Memory(allocator._memory.region());
// Init tensor info with new dimensions
- size_t total_size = parent_info.offset_element_in_bytes(coords) + sub_info.total_size() - sub_info.offset_first_element_in_bytes();
- sub_info.init(sub_info.tensor_shape(), sub_info.format(), parent_info.strides_in_bytes(), parent_info.offset_element_in_bytes(coords), total_size);
+ size_t total_size =
+ parent_info.offset_element_in_bytes(coords) + sub_info.total_size() - sub_info.offset_first_element_in_bytes();
+ sub_info.init(sub_info.tensor_shape(), sub_info.format(), parent_info.strides_in_bytes(),
+ parent_info.offset_element_in_bytes(coords), total_size);
// Set TensorInfo
init(sub_info);
@@ -133,7 +134,7 @@ void TensorAllocator::allocate()
{
// Align to 64-byte boundaries by default if alignment is not specified
const size_t alignment_to_use = (alignment() != 0) ? alignment() : 64;
- if(_associated_memory_group == nullptr)
+ if (_associated_memory_group == nullptr)
{
_memory.set_owned_region(std::make_unique<MemoryRegion>(info().total_size(), alignment_to_use));
}