diff options
author | Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> | 2023-09-27 17:46:17 +0100 |
---|---|---|
committer | felixjohnny.thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> | 2023-09-28 12:08:05 +0000 |
commit | afd38f0c617d6f89b2b4532c6c44f116617e2b6f (patch) | |
tree | 03bc7d5a762099989b16a656fa8d397b490ed70e /compute_kernel_writer/prototype/examples/add_exp_store.cpp | |
parent | bdcb4c148ee2fdeaaddf4cf1e57bbb0de02bb894 (diff) | |
download | ComputeLibrary-afd38f0c617d6f89b2b4532c6c44f116617e2b6f.tar.gz |
Apply clang-format on repository
Code is formatted as per a revised clang format configuration
file(not part of this delivery). Version 14.0.6 is used.
Exclusion List:
- files with .cl extension
- files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...)
And the following directories
- compute_kernel_writer/validation/
- tests/
- include/
- src/core/NEON/kernels/convolution/
- src/core/NEON/kernels/arm_gemm/
- src/core/NEON/kernels/arm_conv/
- data/
There will be a follow up for formatting of .cl files and the
files under tests/ and compute_kernel_writer/validation/.
Signed-off-by: Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>
Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391
Benchmark: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Diffstat (limited to 'compute_kernel_writer/prototype/examples/add_exp_store.cpp')
-rw-r--r-- | compute_kernel_writer/prototype/examples/add_exp_store.cpp | 39 |
1 files changed, 21 insertions, 18 deletions
diff --git a/compute_kernel_writer/prototype/examples/add_exp_store.cpp b/compute_kernel_writer/prototype/examples/add_exp_store.cpp index 6a9884543c..2b640ca01b 100644 --- a/compute_kernel_writer/prototype/examples/add_exp_store.cpp +++ b/compute_kernel_writer/prototype/examples/add_exp_store.cpp @@ -32,7 +32,6 @@ #include "common/ExampleComponentArgument.h" #include "common/ExampleKernelWriter.h" #include "common/ExampleScopedKernelWriter.h" - #include <iostream> #include <vector> @@ -78,14 +77,14 @@ void op_binary_elementwise(ExampleScopedKernelWriter writer, std::vector<Example auto dst = operands.at(2); // Load the LHS and RHS tile and prepare the tensor sampler. - if(!lhs->has_tile() && !rhs->has_tile()) + if (!lhs->has_tile() && !rhs->has_tile()) { const auto sampler = create_simple_sampler(writer); writer->op_load_once(lhs, sampler); writer->op_load_once(rhs, sampler); } - else if(lhs->has_tile()) + else if (lhs->has_tile()) { const auto &sampler = lhs->tile_sampler(); writer->op_load_once(rhs, sampler); @@ -101,7 +100,7 @@ void op_binary_elementwise(ExampleScopedKernelWriter writer, std::vector<Example const auto &sampler = lhs->tile_sampler(); // Prepare the output tile. - if(!dst->has_tile()) + if (!dst->has_tile()) { auto &tile = writer->declare_tile("dst_tile", lhs_tile.tile_info()); dst->init_virtual_tensor(tile, sampler); @@ -119,7 +118,7 @@ void op_exp(ExampleScopedKernelWriter writer, std::vector<ExampleComponentArgume auto dst = operands.at(1); // Load the source tile and prepare the sampler. - if(!src->has_tile()) + if (!src->has_tile()) { const auto sampler = create_simple_sampler(writer); writer->op_load_once(src, sampler); @@ -129,7 +128,7 @@ void op_exp(ExampleScopedKernelWriter writer, std::vector<ExampleComponentArgume const auto &sampler = src->tile_sampler(); // Prepare the output tile. - if(!dst->has_tile()) + if (!dst->has_tile()) { auto &tile = writer->declare_tile("dst_tile", src_tile.tile_info()); dst->init_virtual_tensor(tile, sampler); @@ -160,34 +159,38 @@ int main() ExampleScopedKernelWriter writer(&root_writer); - const TensorInfo src0_info(DataType::Fp32, TensorShape({ 3, 10, 20, 1, 1 }), TensorDataLayout::Nhwc, 0); - const TensorInfo src1_info(DataType::Fp32, TensorShape({ 3, 10, 20, 1, 1 }), TensorDataLayout::Nhwc, 1); - const TensorInfo dst_info(DataType::Fp32, TensorShape({ 3, 10, 20, 1, 1 }), TensorDataLayout::Nhwc, 2); + const TensorInfo src0_info(DataType::Fp32, TensorShape({3, 10, 20, 1, 1}), TensorDataLayout::Nhwc, 0); + const TensorInfo src1_info(DataType::Fp32, TensorShape({3, 10, 20, 1, 1}), TensorDataLayout::Nhwc, 1); + const TensorInfo dst_info(DataType::Fp32, TensorShape({3, 10, 20, 1, 1}), TensorDataLayout::Nhwc, 2); - ExampleComponentArgument src0(writer->declare_tensor_argument("src0", src0_info, TensorStorageType::BufferUint8Ptr)); - ExampleComponentArgument src1(writer->declare_tensor_argument("src1", src1_info, TensorStorageType::BufferUint8Ptr)); + ExampleComponentArgument src0( + writer->declare_tensor_argument("src0", src0_info, TensorStorageType::BufferUint8Ptr)); + ExampleComponentArgument src1( + writer->declare_tensor_argument("src1", src1_info, TensorStorageType::BufferUint8Ptr)); ExampleComponentArgument dst(writer->declare_tensor_argument("dst", dst_info, TensorStorageType::BufferUint8Ptr)); ExampleComponentArgument ans; - op_binary_elementwise(writer, { &src0, &src1, &ans }); - op_exp(writer, { &ans, &ans }); - op_store(writer, { &ans, &dst }); + op_binary_elementwise(writer, {&src0, &src1, &ans}); + op_exp(writer, {&ans, &ans}); + op_store(writer, {&ans, &dst}); const auto arguments = kernel.arguments(); std::cout << "\n====================\nArguments:\n====================\n"; - for(auto &arg : arguments) + for (auto &arg : arguments) { - switch(arg.type()) + switch (arg.type()) { case ckw::KernelArgument::Type::TensorStorage: - std::cout << "* Tensor storage: ID = " << arg.id() << ", type = " << std::hex << "0x" << static_cast<uint32_t>(arg.tensor_storage_type()) << std::dec << "\n"; + std::cout << "* Tensor storage: ID = " << arg.id() << ", type = " << std::hex << "0x" + << static_cast<uint32_t>(arg.tensor_storage_type()) << std::dec << "\n"; break; case ckw::KernelArgument::Type::TensorComponent: - std::cout << "* Tensor component: ID = " << arg.id() << ", type = " << std::hex << "0x" << static_cast<uint32_t>(arg.tensor_component_type()) << std::dec << "\n"; + std::cout << "* Tensor component: ID = " << arg.id() << ", type = " << std::hex << "0x" + << static_cast<uint32_t>(arg.tensor_component_type()) << std::dec << "\n"; break; default: |