aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h
diff options
context:
space:
mode:
authorViet-Hoa Do <viet-hoa.do@arm.com>2024-01-18 16:10:46 +0000
committerViet-Hoa Do <viet-hoa.do@arm.com>2024-01-23 09:52:40 +0000
commitfdf56fb9d414a754e7cedfdc1351ab0ce2866a0c (patch)
tree75b48446e9b4041ae9c520070e432d32b9748ef7 /tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h
parente812c0cafc6f224ec9caca30c2e97ec062012d53 (diff)
downloadComputeLibrary-fdf56fb9d414a754e7cedfdc1351ab0ce2866a0c.tar.gz
Make GpuWorkloadContext own all tensor info objects
* The tensor info objects created by calling create_tensor_info is now solely owned by the context object. The user only receives pointers to those objects. - Internally pointers to tensor info objects are used in various places. It's safer for dynamic fusion to manage these objects directly rather than relying on the users. - The validation test is updated to use the modified API. * Make various changes in dynamic fusion API to make it more friendly (e.g. making some of the objects moveable). Partially resolves: COMPMID-6707 Signed-off-by: Viet-Hoa Do <viet-hoa.do@arm.com> Change-Id: Ifee70e53c05f8e7b72bf9ef123701ff291c5ee80 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10990 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Jakub Sujak <jakub.sujak@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h')
-rw-r--r--tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h53
1 files changed, 30 insertions, 23 deletions
diff --git a/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h
index d8e250cb36..edf0dff54b 100644
--- a/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h
+++ b/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CASTFIXTURE
-#define TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CASTFIXTURE
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CASTFIXTURE_H
+#define ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CASTFIXTURE_H
#include "arm_compute/core/CL/CLKernelLibrary.h"
#include "arm_compute/core/TensorInfo.h"
@@ -58,14 +58,14 @@ protected:
void fill(U &&tensor, int i, DataType dt_in, DataType dt_out)
{
// Restricting range to avoid inf values
- if(dt_out == DataType::F16)
+ if (dt_out == DataType::F16)
{
constexpr int signed_min = -32000;
constexpr int signed_max = 32000;
constexpr int unsigned_min = 0;
constexpr int unsigned_max = 65000;
- switch(dt_in)
+ switch (dt_in)
{
case DataType::U8:
case DataType::QASYMM8:
@@ -78,22 +78,26 @@ protected:
}
case DataType::U16:
{
- library->fill_tensor_uniform(tensor, i, static_cast<uint16_t>(unsigned_min), static_cast<uint16_t>(unsigned_max));
+ library->fill_tensor_uniform(tensor, i, static_cast<uint16_t>(unsigned_min),
+ static_cast<uint16_t>(unsigned_max));
break;
}
case DataType::S16:
{
- library->fill_tensor_uniform(tensor, i, static_cast<int16_t>(signed_min), static_cast<int16_t>(signed_max));
+ library->fill_tensor_uniform(tensor, i, static_cast<int16_t>(signed_min),
+ static_cast<int16_t>(signed_max));
break;
}
case DataType::U32:
{
- library->fill_tensor_uniform(tensor, i, static_cast<uint32_t>(unsigned_min), static_cast<uint32_t>(unsigned_max));
+ library->fill_tensor_uniform(tensor, i, static_cast<uint32_t>(unsigned_min),
+ static_cast<uint32_t>(unsigned_max));
break;
}
case DataType::S32:
{
- library->fill_tensor_uniform(tensor, i, static_cast<int32_t>(signed_min), static_cast<int32_t>(signed_max));
+ library->fill_tensor_uniform(tensor, i, static_cast<int32_t>(signed_min),
+ static_cast<int32_t>(signed_max));
break;
}
default:
@@ -107,29 +111,31 @@ protected:
}
// Given input is in nchw format
- TensorType compute_target(const TensorShape &shape, const DataType dt_in, const DataType dt_out, const ConvertPolicy policy)
+ TensorType
+ compute_target(const TensorShape &shape, const DataType dt_in, const DataType dt_out, const ConvertPolicy policy)
{
// Create a new workload sketch
auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
- auto context = GpuWorkloadContext{ &cl_compile_ctx };
- GpuWorkloadSketch sketch{ &context };
+ auto context = GpuWorkloadContext{&cl_compile_ctx};
+ GpuWorkloadSketch sketch{&context};
// Create sketch tensors
- TensorInfo src_info = context.create_tensor_info(TensorInfo(shape, 1, dt_in, DataLayout::NCHW)); // layout is not important
- TensorInfo dst_info = context.create_tensor_info();
+ ITensorInfo *src_info =
+ context.create_tensor_info(TensorInfo(shape, 1, dt_in, DataLayout::NCHW)); // layout is not important
+ ITensorInfo *dst_info = context.create_tensor_info();
CastAttributes attributes;
attributes.convert_policy(policy).data_type(dt_out);
- ITensorInfo *ans_info = FunctionType::create_op(sketch, &src_info, attributes);
- GpuOutput::create_op(sketch, ans_info, &dst_info);
+ ITensorInfo *ans_info = FunctionType::create_op(sketch, src_info, attributes);
+ GpuOutput::create_op(sketch, ans_info, dst_info);
// Configure runtime
ClWorkloadRuntime runtime;
runtime.configure(sketch);
// (Important) Allocate auxiliary tensor memory if there are any
- for(auto &data : runtime.get_auxiliary_tensors())
+ for (auto &data : runtime.get_auxiliary_tensors())
{
CLTensor *tensor = std::get<0>(data);
TensorInfo info = std::get<1>(data);
@@ -143,8 +149,8 @@ protected:
TensorType t_dst{};
// Initialize user tensors
- t_src.allocator()->init(src_info);
- t_dst.allocator()->init(dst_info);
+ t_src.allocator()->init(*src_info);
+ t_dst.allocator()->init(*dst_info);
// Allocate and fill user tensors
t_src.allocator()->allocate();
@@ -153,14 +159,15 @@ protected:
fill(AccessorType(t_src), 0, dt_in, dt_out);
// Run runtime
- runtime.run({ &t_src, &t_dst });
+ runtime.run({&t_src, &t_dst});
return t_dst;
}
- SimpleTensor<T2> compute_reference(const TensorShape &shape, const DataType dt_in, const DataType dt_out, const ConvertPolicy policy)
+ SimpleTensor<T2>
+ compute_reference(const TensorShape &shape, const DataType dt_in, const DataType dt_out, const ConvertPolicy policy)
{
// Create reference
- SimpleTensor<T1> src{ shape, dt_in, 1 };
+ SimpleTensor<T1> src{shape, dt_in, 1};
// Fill reference
fill(src, 0, dt_in, dt_out);
@@ -174,4 +181,4 @@ protected:
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CASTFIXTURE */
+#endif // ACL_TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CASTFIXTURE_H