From 002e6530f6218b00a28aef9be8b21efb08cf3602 Mon Sep 17 00:00:00 2001 From: Ramy Elgammal Date: Wed, 11 Jan 2023 18:48:04 +0000 Subject: Implement dynamic fusion softmax operator - Return aux tensorInfo by get_aux_tensors() at runtime to init the aux tensor with the right size. - Keep softmax unfusable for this commit - Hence, added Tensor3D to template writer arguments declaration, for sake of keeping dynamic fusion softmax componenets' kernels matching their cl counterparts. Resolves: COMPMID-5523 Change-Id: I667f39545db925f667036ef448302c79a0330373 Signed-off-by: Ramy Elgammal Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/c/VisualCompute/ComputeLibrary/+/483924 Tested-by: bsgcomp Reviewed-by: Gunes Bayir Comments-Addressed: bsgcomp Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8986 Comments-Addressed: Arm Jenkins Reviewed-by: Jakub Sujak Tested-by: Arm Jenkins Benchmark: Arm Jenkins --- .../validation/dynamic_fusion/gpu/Integration.cpp | 21 ++- tests/validation/dynamic_fusion/gpu/cl/Softmax.cpp | 198 +++++++++++++++++++++ 2 files changed, 210 insertions(+), 9 deletions(-) create mode 100644 tests/validation/dynamic_fusion/gpu/cl/Softmax.cpp (limited to 'tests/validation/dynamic_fusion/gpu') diff --git a/tests/validation/dynamic_fusion/gpu/Integration.cpp b/tests/validation/dynamic_fusion/gpu/Integration.cpp index 7f2d439183..6a283f8082 100644 --- a/tests/validation/dynamic_fusion/gpu/Integration.cpp +++ b/tests/validation/dynamic_fusion/gpu/Integration.cpp @@ -90,9 +90,10 @@ TEST_CASE(Conv2d, framework::DatasetMode::ALL) // Instead of using ACL allocated memory, the user can choose to import memory into the tensors for(auto &data : runtime.get_auxiliary_tensors()) { - CLTensor *tensor = data.first; - AuxMemoryInfo aux_mem_req = data.second; - tensor->allocator()->init(*data.first->info(), aux_mem_req.alignment); + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); tensor->allocator()->allocate(); // Use ACL allocated memory // auto buf = cl::Buffer(); // tensor->allocator()->import_memory(buf); // Or, import external memory @@ -178,9 +179,10 @@ TEST_CASE(Add_Output_Add_Output, framework::DatasetMode::ALL) // Instead of using ACL allocated memory, the user can choose to import memory into the tensors for(auto &data : runtime.get_auxiliary_tensors()) { - CLTensor *tensor = data.first; - AuxMemoryInfo aux_mem_req = data.second; - tensor->allocator()->init(*data.first->info(), aux_mem_req.alignment); + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); tensor->allocator()->allocate(); // Use ACL allocated memory // auto buf = cl::Buffer(); // tensor->allocator()->import_memory(buf); // Or, import external memory @@ -282,9 +284,10 @@ TEST_CASE(Add_Output_Add_Cast_Cast_Output, framework::DatasetMode::ALL) // Instead of using ACL allocated memory, the user can choose to import memory into the tensors for(auto &data : runtime.get_auxiliary_tensors()) { - CLTensor *tensor = data.first; - AuxMemoryInfo aux_mem_req = data.second; - tensor->allocator()->init(*data.first->info(), aux_mem_req.alignment); + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); tensor->allocator()->allocate(); // Use ACL allocated memory // auto buf = cl::Buffer(); // tensor->allocator()->import_memory(buf); // Or, import external memory diff --git a/tests/validation/dynamic_fusion/gpu/cl/Softmax.cpp b/tests/validation/dynamic_fusion/gpu/cl/Softmax.cpp new file mode 100644 index 0000000000..d09454e05b --- /dev/null +++ b/tests/validation/dynamic_fusion/gpu/cl/Softmax.cpp @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Types.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuSoftmax.h" + +#include "tests/CL/CLAccessor.h" +#include "tests/datasets/ShapeDatasets.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Fixture.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/validation/Validation.h" +#include "tests/validation/fixtures/dynamic_fusion/operators/SoftmaxFixture.h" + +using namespace arm_compute::experimental::dynamic_fusion; + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +/** Tolerance for float operations */ +RelativeTolerance tolerance_f16(half(0.2)); +RelativeTolerance tolerance_f32(0.001f); + +TEST_SUITE(CL) +TEST_SUITE(DYNAMIC_FUSION) +TEST_SUITE(SOFTMAX) + +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U), 1, DataType::F32), // Mismatching data types + TensorInfo(TensorShape(27U, 13U), 1, DataType::F32), // Mismatching shapes + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::S32), // Unsupported data type + TensorInfo(TensorShape(32U, 13U), 1, DataType::F16), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + + }), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U), 1, DataType::F16), + TensorInfo(TensorShape(27U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM16), // Unsupported data type + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + + })), + framework::dataset::make("beta", { 1.0, + 2.0, + 2.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + })), + framework::dataset::make("axis", { + 0, + 0, + 1, // Invalid as axis != 0 + 0, + 0, + 0, + -3, // Invalid as axis != 0 + 2, // Invalid as axis != 0 + 1, // Invalid as axis != 0 + -1, // Invalid as axis != 0 + })), + framework::dataset::make("Expected", { false, false, false, true, false, false, false, false, false, false})), + input_info, output_info, beta, axis, expected) +{ + // Create a new workload sketch + CLCompileContext cl_compile_ctx = CLKernelLibrary::get().get_compile_context(); + GpuWorkloadContext gpu_ctx = GpuWorkloadContext{ &cl_compile_ctx }; + GpuWorkloadSketch sketch{ &gpu_ctx }; + + SoftmaxAttributes softmax_attr{}; + softmax_attr.axis(axis).beta(beta).is_log_softmax(false); + TensorInfo src_info = sketch.create_tensor_info(input_info); + TensorInfo dst_info = sketch.create_tensor_info(output_info); + const bool res = static_cast(GpuSoftmax::validate_op(sketch, &src_info, &dst_info, softmax_attr)); + ARM_COMPUTE_EXPECT(res == expected, framework::LogLevel::ERRORS); +} + +template +using DynamicFusionSoftmaxLayerFixture = DynamicFusionSoftmaxValidationFixture; + +TEST_SUITE(FLOAT) +TEST_SUITE(FP32) + +FIXTURE_DATA_TEST_CASE(RunSmall, DynamicFusionSoftmaxLayerFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SoftmaxLayerSmallShapes(), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("Beta", { 1.0f, 2.0f })), + framework::dataset::make("Axis", { 0 })), + framework::dataset::make("is_log", {false, true}))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32); +} + + +FIXTURE_DATA_TEST_CASE(RunLarge, DynamicFusionSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("Beta", { 1.0f, 2.0f })), + framework::dataset::make("Axis", { 0 })), + framework::dataset::make("is_log", {false, true}))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32); +} + + +FIXTURE_DATA_TEST_CASE(Run4D, DynamicFusionSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SoftmaxLayer4DShapes(), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("Beta", { 1.0f, 2.0f })), + framework::dataset::make("Axis", { 0 })), + framework::dataset::make("is_log", {false, true}))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32); +} +TEST_SUITE_END() // FP32 +TEST_SUITE(FP16) + +FIXTURE_DATA_TEST_CASE(RunSmall, DynamicFusionSoftmaxLayerFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SoftmaxLayerSmallShapes(), + framework::dataset::make("DataType", DataType::F16)), + framework::dataset::make("Beta", { 1.0f, 2.0f })), + framework::dataset::make("Axis", { 0 })), + framework::dataset::make("is_log", {false, true}))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f16); +} + + +FIXTURE_DATA_TEST_CASE(RunLarge, DynamicFusionSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), + framework::dataset::make("DataType", DataType::F16)), + framework::dataset::make("Beta", { 1.0f, 2.0f })), + framework::dataset::make("Axis", { 0 })), + framework::dataset::make("is_log", {false, true}))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f16); +} + + +FIXTURE_DATA_TEST_CASE(Run4D, DynamicFusionSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SoftmaxLayer4DShapes(), + framework::dataset::make("DataType", DataType::F16)), + framework::dataset::make("Beta", { 1.0f, 2.0f })), + framework::dataset::make("Axis", { 0 })), + framework::dataset::make("is_log", {false, true}))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f16); +} +TEST_SUITE_END() // FP16 +TEST_SUITE_END() // FLOAT + +TEST_SUITE_END() // SOFTMAX +TEST_SUITE_END() // DYNAMIC_FUSION +TEST_SUITE_END() // CL + +} // namespace validation +} // namespace test +} // namespace arm_compute -- cgit v1.2.1