From 002e6530f6218b00a28aef9be8b21efb08cf3602 Mon Sep 17 00:00:00 2001 From: Ramy Elgammal Date: Wed, 11 Jan 2023 18:48:04 +0000 Subject: Implement dynamic fusion softmax operator - Return aux tensorInfo by get_aux_tensors() at runtime to init the aux tensor with the right size. - Keep softmax unfusable for this commit - Hence, added Tensor3D to template writer arguments declaration, for sake of keeping dynamic fusion softmax componenets' kernels matching their cl counterparts. Resolves: COMPMID-5523 Change-Id: I667f39545db925f667036ef448302c79a0330373 Signed-off-by: Ramy Elgammal Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/c/VisualCompute/ComputeLibrary/+/483924 Tested-by: bsgcomp Reviewed-by: Gunes Bayir Comments-Addressed: bsgcomp Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8986 Comments-Addressed: Arm Jenkins Reviewed-by: Jakub Sujak Tested-by: Arm Jenkins Benchmark: Arm Jenkins --- Android.bp | 3 + .../runtime/gpu/cl/ClWorkloadRuntime.h | 6 +- .../dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h | 2 +- .../sketch/gpu/operators/GpuSoftmax.h | 95 ++++++++++ filelist.json | 3 + .../runtime/gpu/cl/ClKernelRuntime.cpp | 2 +- .../runtime/gpu/cl/ClWorkloadRuntime.cpp | 8 +- .../sketch/gpu/GpuKernelComponentGraph.cpp | 9 +- .../sketch/gpu/GpuWorkloadSourceCode.h | 2 +- .../cl/ClComponentLogits1DMaxShiftExpSum.cpp | 16 +- .../cl/ClComponentLogits1DMaxShiftExpSum.h | 8 +- .../gpu/components/cl/ClComponentLogits1DNorm.cpp | 95 ++++++++++ .../gpu/components/cl/ClComponentLogits1DNorm.h | 128 +++++++++++++ .../sketch/gpu/operators/GpuSoftmax.cpp | 198 +++++++++++++++++++++ .../cl/ClTemplateLogits1DMaxShiftExpSum.cpp | 67 +++---- .../template_writer/cl/ClTemplateLogits1DNorm.cpp | 185 +++++++++++++++++++ .../template_writer/cl/ClTemplateLogits1DNorm.h | 106 +++++++++++ .../gpu/template_writer/cl/ClTemplateWriter.cpp | 16 +- .../validation/dynamic_fusion/gpu/Integration.cpp | 21 ++- tests/validation/dynamic_fusion/gpu/cl/Softmax.cpp | 198 +++++++++++++++++++++ .../dynamic_fusion/gpu/cl/DepthwiseConv2dFixture.h | 9 +- .../dynamic_fusion/gpu/cl/DirectConv2dFixture.h | 16 +- .../gpu/cl/ElementwiseBinaryFixture.h | 9 +- .../fixtures/dynamic_fusion/gpu/cl/Pool2dFixture.h | 11 +- .../dynamic_fusion/operators/CastFixture.h | 9 +- .../dynamic_fusion/operators/ReshapeFixture.h | 7 +- .../dynamic_fusion/operators/ResizeFixture.h | 9 +- .../dynamic_fusion/operators/SoftmaxFixture.h | 161 +++++++++++++++++ 28 files changed, 1295 insertions(+), 104 deletions(-) create mode 100644 arm_compute/dynamic_fusion/sketch/gpu/operators/GpuSoftmax.h create mode 100644 src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DNorm.cpp create mode 100644 src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DNorm.h create mode 100644 src/dynamic_fusion/sketch/gpu/operators/GpuSoftmax.cpp create mode 100644 src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateLogits1DNorm.cpp create mode 100644 src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateLogits1DNorm.h create mode 100644 tests/validation/dynamic_fusion/gpu/cl/Softmax.cpp create mode 100644 tests/validation/fixtures/dynamic_fusion/operators/SoftmaxFixture.h diff --git a/Android.bp b/Android.bp index f426f2fbc2..34f722f6fe 100644 --- a/Android.bp +++ b/Android.bp @@ -609,6 +609,7 @@ cc_library_static { "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentDirectConv2d.cpp", "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentElementwiseBinary.cpp", "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DMaxShiftExpSum.cpp", + "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DNorm.cpp", "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentPool2d.cpp", "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentReshape.cpp", "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentResize.cpp", @@ -622,6 +623,7 @@ cc_library_static { "src/dynamic_fusion/sketch/gpu/operators/GpuPool2d.cpp", "src/dynamic_fusion/sketch/gpu/operators/GpuReshape.cpp", "src/dynamic_fusion/sketch/gpu/operators/GpuResize.cpp", + "src/dynamic_fusion/sketch/gpu/operators/GpuSoftmax.cpp", "src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.cpp", "src/dynamic_fusion/sketch/gpu/template_writer/GpuKernelVariableTable.cpp", "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateActivation.cpp", @@ -630,6 +632,7 @@ cc_library_static { "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateDirectConv2d.cpp", "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateElementwiseBinary.cpp", "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateLogits1DMaxShiftExpSum.cpp", + "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateLogits1DNorm.cpp", "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplatePool2d.cpp", "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateReshape.cpp", "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateResize.cpp", diff --git a/arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h b/arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h index 326880f721..b5af589cd2 100644 --- a/arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h +++ b/arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Arm Limited. + * Copyright (c) 2022-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -24,8 +24,8 @@ #ifndef ARM_COMPUTE_DYNAMIC_FUSION_RUNTIME_GPU_CL_CLWORKLOADRUNTIME #define ARM_COMPUTE_DYNAMIC_FUSION_RUNTIME_GPU_CL_CLWORKLOADRUNTIME +#include "arm_compute/core/TensorInfo.h" #include "arm_compute/dynamic_fusion/sketch/MemoryDescriptor.h" - #include #include @@ -63,7 +63,7 @@ public: Status run(const std::vector &tensors); /** Get auxiliary tensors of the workload and their memory requirement */ - std::vector> get_auxiliary_tensors(); + std::vector> get_auxiliary_tensors(); private: /** Enqueue prepare workload diff --git a/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h b/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h index 422edb35f1..155df293bf 100644 --- a/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h +++ b/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022-2023 Arm Limited. + * Copyright (c) 2022 Arm Limited. * * SPDX-License-Identifier: MIT * diff --git a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuSoftmax.h b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuSoftmax.h new file mode 100644 index 0000000000..e86ef91e6a --- /dev/null +++ b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuSoftmax.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPUSOFTMAX +#define ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPUSOFTMAX + +#include "arm_compute/core/ITensorInfo.h" +#include "arm_compute/dynamic_fusion/sketch/attributes/SoftmaxAttributes.h" + +namespace arm_compute +{ +namespace experimental +{ +namespace dynamic_fusion +{ +/** Forward declaration */ +class GpuWorkloadContext; +class GpuWorkloadSketch; + +/** Operator interface. */ +class GpuSoftmax final +{ +public: + /** Attributes are a set of backend-agnostic parameters that define what an operator does */ + using Attributes = SoftmaxAttributes; + + /** Create an operator and fuse it into the workload sketch. + * @note If @ref validate_op() fails, the creation also fails and may throw an error. + * @note If @ref validate_op() fails, @p sketch remains unchanged and valid. + * + * Valid data type configurations: + * |src |dst | + * |:--------------|:--------------| + * |F16 |F16 | + * |F32 |F32 | + * + * Valid data layouts: + * - All + * + * @param[in,out] sketch Workload sketch into which the operator will be fused + * @param[in] src Source tensor info. + * @param[in] dst Destination tensor info. + * @param[in] attributes Operator attributes + */ + static void create_op(GpuWorkloadSketch &sketch, + ITensorInfo *src, + ITensorInfo *dst, + const Attributes &attributes); + /** Check if the operator configuration is supported, irrespective of fusion + * + * @param[in] context Workload context within which the operator is running + * @param[in] src Source tensor info. + * @param[in] dst Destination tensor info. + * @param[in] attributes Operator attributes + * + * @return Status + */ + static Status is_supported_op(const GpuWorkloadContext &context, + const ITensorInfo *src, + const ITensorInfo *dst, + const Attributes &attributes); + /** Validate the operator and check if the its configuration is supported and if it can be fused into the workload sketch. + * Similar to @ref GpuSoftmax::create_op() + * + * @return a status + */ + static Status validate_op(const GpuWorkloadSketch &sketch, + const ITensorInfo *src, + const ITensorInfo *dst, + const Attributes &attributes); +}; +} // namespace dynamic_fusion +} // namespace experimental +} // namespace arm_compute +#endif /* ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPUSOFTMAX */ diff --git a/filelist.json b/filelist.json index ce45be59af..3cb3a7a76f 100644 --- a/filelist.json +++ b/filelist.json @@ -2226,6 +2226,7 @@ "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentPool2d.cpp", "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentElementwiseBinary.cpp", "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DMaxShiftExpSum.cpp", + "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DNorm.cpp", "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentReshape.cpp", "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentResize.cpp", "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentStore.cpp", @@ -2237,6 +2238,7 @@ "src/dynamic_fusion/sketch/gpu/operators/GpuPool2d.cpp", "src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp", "src/dynamic_fusion/sketch/gpu/operators/GpuResize.cpp", + "src/dynamic_fusion/sketch/gpu/operators/GpuSoftmax.cpp", "src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.cpp", "src/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.cpp", "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateActivation.cpp", @@ -2246,6 +2248,7 @@ "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplatePool2d.cpp", "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateElementwiseBinary.cpp", "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateLogits1DMaxShiftExpSum.cpp", + "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateLogits1DNorm.cpp", "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateReshape.cpp", "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateResize.cpp", "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateStore.cpp", diff --git a/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp b/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp index 022d4685fe..b3ec39362c 100644 --- a/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp +++ b/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp @@ -41,7 +41,7 @@ void ClKernelRuntime::configure(const ClCompileContext &compile_ctx, const GpuKe // Create kernel from kernel source string opencl::ClKernelLibrary &klib = opencl::ClKernelLibrary::get(); _kernel = static_cast(compile_ctx.create_kernel(code.name(), - "" /* Program name: Used to as part of a unique string for built kernel cache. Not needed */, + code.name(), // program name has to be provided to differentiate between different unfusable components' kernels. code.code(), klib.kernel_path() /* Kernel path: Used in cases of embedded kernels */, code.build_options().options(), diff --git a/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp b/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp index 7e427fef72..cd21b10180 100644 --- a/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp +++ b/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Arm Limited. + * Copyright (c) 2022-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -355,12 +355,12 @@ Status ClWorkloadRuntime::run(const std::vector &tensors) return Status{}; } -std::vector> ClWorkloadRuntime::get_auxiliary_tensors() +std::vector> ClWorkloadRuntime::get_auxiliary_tensors() { - std::vector> aux_tensors; + std::vector> aux_tensors; for(const auto &data : _impl->_aux_tensors.get_tensors()) { - aux_tensors.emplace_back(data.tensor, data.memory_info); + aux_tensors.emplace_back(data.tensor, data.tensor_info, data.memory_info); } return aux_tensors; } diff --git a/src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.cpp b/src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.cpp index 4cf7a7fece..b70a192775 100644 --- a/src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.cpp +++ b/src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.cpp @@ -59,8 +59,13 @@ GpuKernelComponentStream GpuKernelComponentGraph::fuse(const MemoryDescriptorMap { const auto component = _components.at(op.op).get(); const auto success = stream.add_component(component); - ARM_COMPUTE_ERROR_ON(!success); - ARM_COMPUTE_UNUSED(success); + if(!success) // Assume first failure was because the root component is unfusable + { + stream.new_component_group(); + const auto success = stream.add_component(component); + ARM_COMPUTE_ERROR_ON(!success); + ARM_COMPUTE_UNUSED(success); + } } return stream; diff --git a/src/dynamic_fusion/sketch/gpu/GpuWorkloadSourceCode.h b/src/dynamic_fusion/sketch/gpu/GpuWorkloadSourceCode.h index 2375f5c6c6..d1d0bdf77f 100644 --- a/src/dynamic_fusion/sketch/gpu/GpuWorkloadSourceCode.h +++ b/src/dynamic_fusion/sketch/gpu/GpuWorkloadSourceCode.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Arm Limited. + * Copyright (c) 2022-2023 Arm Limited. * * SPDX-License-Identifier: MIT * diff --git a/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DMaxShiftExpSum.cpp b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DMaxShiftExpSum.cpp index 8ab1853e84..279c77e227 100644 --- a/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DMaxShiftExpSum.cpp +++ b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DMaxShiftExpSum.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Arm Limited. + * Copyright (c) 2022-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,9 +27,8 @@ #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" -#include "arm_compute/dynamic_fusion/sketch/attributes/DepthwiseConv2dAttributes.h" +#include "arm_compute/dynamic_fusion/sketch/attributes/SoftmaxAttributes.h" #include "src/core/CL/CLValidate.h" -#include "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateDepthwiseConv2d.h" #include "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateLogits1DMaxShiftExpSum.h" namespace arm_compute @@ -49,7 +48,9 @@ Status ClComponentLogits1DMaxShiftExpSum::validate( const ITensorInfo *sum = tensors.get_const_tensor(TensorType::ACL_DST_0); const ITensorInfo *dst = tensors.get_const_tensor(TensorType::ACL_DST_1); - ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, sum, dst); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(sum); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(dst); // 1. Check validity // All tensor infos are initialized @@ -61,11 +62,6 @@ Status ClComponentLogits1DMaxShiftExpSum::validate( ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst, sum); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst); - const uint32_t src_dimensions = src->tensor_shape().num_dimensions(); - const uint32_t sum_dimensions = sum->tensor_shape().num_dimensions(); - - ARM_COMPUTE_RETURN_ERROR_ON(src_dimensions != sum_dimensions + 1); - // Device requirements are met ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); @@ -94,4 +90,4 @@ const IGpuTemplateComponentWriter *ClComponentLogits1DMaxShiftExpSum::template_w } } // namespace dynamic_fusion } // namespace experimental -} // namespace arm_compute \ No newline at end of file +} // namespace arm_compute diff --git a/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DMaxShiftExpSum.h b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DMaxShiftExpSum.h index af47609a2c..b5db458248 100644 --- a/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DMaxShiftExpSum.h +++ b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DMaxShiftExpSum.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Arm Limited. + * Copyright (c) 2022-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -64,9 +64,9 @@ public: /** Validate the component * - * @param[in] properties Component properties @ref Properties - * @param[in,out] tensors Tensor arguments to the component - * @param[in] attributes Component attributes @ref Attributes + * @param[in] properties Component properties @ref Properties + * @param[in] tensors Tensor arguments to the component + * @param[in] attributes Component attributes @ref Attributes * * @return Status Validation results * diff --git a/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DNorm.cpp b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DNorm.cpp new file mode 100644 index 0000000000..7864d56d29 --- /dev/null +++ b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DNorm.cpp @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DNorm.h" + +#include "arm_compute/core/CL/CLHelpers.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/dynamic_fusion/sketch/attributes/SoftmaxAttributes.h" +#include "src/core/CL/CLValidate.h" +#include "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateLogits1DNorm.h" + +namespace arm_compute +{ +namespace experimental +{ +namespace dynamic_fusion +{ +Status ClComponentLogits1DNorm::validate( + const Properties &properties, + const ArgumentPack &tensors, + const Attributes &attributes) +{ + ARM_COMPUTE_UNUSED(properties, attributes); + + const ITensorInfo *src = tensors.get_const_tensor(TensorType::ACL_SRC_0); + const ITensorInfo *sum = tensors.get_const_tensor(TensorType::ACL_SRC_1); + const ITensorInfo *dst = tensors.get_const_tensor(TensorType::ACL_DST_0); + + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(sum); + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(dst); + + // 1. Check validity + // All tensor infos are initialized + ARM_COMPUTE_RETURN_ERROR_ON(src->tensor_shape().total_size() == 0); + ARM_COMPUTE_RETURN_ERROR_ON(sum->tensor_shape().total_size() == 0); + ARM_COMPUTE_RETURN_ERROR_ON(dst->tensor_shape().total_size() == 0); + + // Check for mismatches in shapes and data types + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst, sum); + ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst); + + ARM_COMPUTE_RETURN_ERROR_ON(attributes.is_log_softmax() && !is_data_type_float(src->data_type())); + + // Device requirements are met + ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src); + + // 2. Check support level + ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::F16, DataType::F32); + + return Status{}; +} + +ClComponentLogits1DNorm::ClComponentLogits1DNorm(ComponentId id, + const Properties &properties, + const ArgumentPack &tensors, + const Attributes &attributes) + : IGpuKernelComponent{ id, properties, tensors }, + _component_writer{ std::make_unique(id, tensors, attributes) } +{ +} + +ClComponentLogits1DNorm::~ClComponentLogits1DNorm() +{ +} + +const IGpuTemplateComponentWriter *ClComponentLogits1DNorm::template_writer() const +{ + return _component_writer.get(); +} +} // namespace dynamic_fusion +} // namespace experimental +} // namespace arm_compute diff --git a/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DNorm.h b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DNorm.h new file mode 100644 index 0000000000..5bd350b9bd --- /dev/null +++ b/src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DNorm.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef SRC_DYNAMIC_FUSION_SKETCH_GPU_COMPONENTS_CL_CLCOMPONENTLOGITS1DNORM +#define SRC_DYNAMIC_FUSION_SKETCH_GPU_COMPONENTS_CL_CLCOMPONENTLOGITS1DNORM + +#include "arm_compute/dynamic_fusion/sketch/attributes/SoftmaxAttributes.h" +#include "src/dynamic_fusion/sketch/gpu/components/IGpuKernelComponent.h" + +namespace arm_compute +{ +/** Forward declaration */ +class ITensorInfo; +namespace experimental +{ +namespace dynamic_fusion +{ +/** Forward declaration */ +template +class ArgumentPack; + +/** Forward declaration */ +class ClTemplateLogits1DNorm; + +/** Component to calculate the final step of the Softmax Layer + * where each logit value is multiplied by the inverse of the sum of the logits. + * + * 1D example: + * + * (input) src: [x1 x2 ... xn], shape: (1 x d) + * (input) sum: [x1 + x2 + ... + xn], shape: (1 x 1) + * (output) dst: [x1/sum x2/sum ... xn/sum], shape: (1 x d) + * + * This component is used by the softmax operator to get the final result. +*/ +class ClComponentLogits1DNorm final : public IGpuKernelComponent +{ +public: + /** Attributes are a set of backend-agnostic parameters that define what a component does */ + using Attributes = SoftmaxAttributes; + + /** Validate the component + * + * @param[in] properties Component properties @ref Properties + * @param[in] tensors Tensor arguments to the component + * @param[in] attributes Component attributes @ref Attributes + * + * @return Status Validation results + * + * Tensor argument names: + * - ACL_SRC_0: Input + * - ACL_SRC_1: Input + * - ACL_DST_0: Output + * + * Tensor argument constness: + * - ACL_SRC_0: Const + * - ACL_SRC_1: Const + * - ACL_DST_0: Const + * + * Valid data layouts: + * - All + * + ** Valid data type configurations: + * |ACL_SRC_0 |ACL_SRC_1 |ACL_DST_0 | + * |:----------|:----------|:----------| + * |F16 | F16 | F16 | + * |F32 | F32 | F32 | + */ + static Status validate( + const Properties &properties, + const ArgumentPack &tensors, + const Attributes &attributes); + + /** Constructor + * + * Similar to @ref ClComponentLogits1DNorm::validate() + */ + ClComponentLogits1DNorm(ComponentId id, + const Properties &properties, + const ArgumentPack &tensors, + const Attributes &attributes); + + /** Destructor */ + ~ClComponentLogits1DNorm() override; + /** Prevent instances of this class from being copy constructed */ + ClComponentLogits1DNorm(const ClComponentLogits1DNorm &component) = delete; + /** Prevent instances of this class from being copied */ + ClComponentLogits1DNorm &operator=(const ClComponentLogits1DNorm &component) = delete; + /** Allow instances of this class to be move constructed */ + ClComponentLogits1DNorm(ClComponentLogits1DNorm &&component) = default; + /** Allow instances of this class to be moved */ + ClComponentLogits1DNorm &operator=(ClComponentLogits1DNorm &&component) = default; + /** Get template writer for the component */ + const IGpuTemplateComponentWriter *template_writer() const override; + /** Get component type */ + GpuComponentType type() const override + { + return GpuComponentType::Unfusable; + } + +private: + std::unique_ptr _component_writer; +}; +} // namespace dynamic_fusion +} // namespace experimental +} // namespace arm_compute + +#endif /* SRC_DYNAMIC_FUSION_SKETCH_GPU_COMPONENTS_CL_CLCOMPONENTLOGITS1DNORM */ diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuSoftmax.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuSoftmax.cpp new file mode 100644 index 0000000000..db74538f91 --- /dev/null +++ b/src/dynamic_fusion/sketch/gpu/operators/GpuSoftmax.cpp @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuSoftmax.h" +#include "arm_compute/core/Error.h" +#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DMaxShiftExpSum.h" +#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DNorm.h" + +#include "src/common/utils/Log.h" +#include "src/core/helpers/AutoConfiguration.h" +#include "src/dynamic_fusion/sketch/ArgumentPack.h" +#include "src/dynamic_fusion/sketch/gpu/GpuOperatorProperties.h" +#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h" + +namespace arm_compute +{ +namespace experimental +{ +namespace dynamic_fusion +{ +namespace +{ +GpuOperatorType operator_type = GpuOperatorType::Unfusable; +} // namespace + +Status GpuSoftmax::is_supported_op(const GpuWorkloadContext &context, + const ITensorInfo *src, + const ITensorInfo *dst, + const Attributes &attributes) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + TensorInfo dst_info_to_validate; + + // Auto initialize dst tensor info + if(dst != nullptr) + { + dst_info_to_validate = *dst; + } + else + { + auto_init_if_empty(dst_info_to_validate, *src->clone()); + } + // Check components + if(context.gpu_language() == GpuLanguage::OpenCL) + { + const auto cl_compile_ctx = context.cl_compile_context(); + ARM_COMPUTE_RETURN_ERROR_ON(cl_compile_ctx == nullptr); + const KernelProperties properties = IGpuKernelComponent::Properties().stage(UnitWorkloadStage{ UnitWorkloadStage::Stage::Run }); + + TensorShape logits_sum_shape = src->tensor_shape(); + TensorInfo logits(src->clone()->set_tensor_shape(logits_sum_shape)); + + // The sum tensor dim0 only need one element + logits_sum_shape.set(0, 1); + TensorInfo sum(src->clone()->set_tensor_shape(logits_sum_shape)); + + // Validate Component + ArgumentPack arguments_exp_sum; + ArgumentPack arguments_norm; + + arguments_exp_sum.add_const_tensor(ACL_SRC_0, src); + arguments_exp_sum.add_const_tensor(ACL_DST_0, &sum); + arguments_exp_sum.add_const_tensor(ACL_DST_1, &logits); + + arguments_norm.add_const_tensor(ACL_SRC_0, &logits); + arguments_norm.add_const_tensor(ACL_SRC_1, &sum); + arguments_norm.add_const_tensor(ACL_DST_0, &dst_info_to_validate); + + ARM_COMPUTE_RETURN_ON_ERROR(ClComponentLogits1DMaxShiftExpSum::validate(properties, arguments_exp_sum, attributes)); + ARM_COMPUTE_RETURN_ON_ERROR(ClComponentLogits1DNorm::validate(properties, arguments_norm, attributes)); + } + else + { + ARM_COMPUTE_RETURN_ERROR_MSG("Unimplemented Gpu language"); + } + + return Status{}; +} + +Status GpuSoftmax::validate_op(const GpuWorkloadSketch &sketch, + const ITensorInfo *src, + const ITensorInfo *dst, + const Attributes &attributes) +{ + ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id() || !dst->has_valid_id()); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->num_dimensions() > 4, "Only up to 4 dimensions are supported"); + ARM_COMPUTE_RETURN_ERROR_ON(attributes.axis() < static_cast(-src->num_dimensions()) || static_cast(src->num_dimensions()) <= attributes.axis()); + + // Auto initialize dst tensor info + TensorInfo dst_info_to_validate = *dst; + auto_init_if_empty(dst_info_to_validate, *src->clone()); + + const size_t actual_axis = static_cast(wrap_around(attributes.axis(), static_cast(src->num_dimensions()))); + const bool needs_permute = actual_axis != 0; + ARM_COMPUTE_RETURN_ERROR_ON_MSG(needs_permute, "Dynamic fusion softmax on axis!=0 not supported yet."); + + // Perform fusion test and check if the operator meets the fusion constraints + ArgumentPack tensors; + tensors.add_const_tensor(ACL_SRC_0, src); + tensors.add_const_tensor(ACL_DST_0, &dst_info_to_validate); + + const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors); + ARM_COMPUTE_RETURN_ERROR_ON_MSG(!sketch.implementation().operator_group().try_add_operator(op), + "Operator fusion test failed. This operator cannot be fused into the workload"); + + // Check if configuration is supported + return is_supported_op(*sketch.gpu_context(), src, &dst_info_to_validate, attributes); +} + +void GpuSoftmax::create_op(GpuWorkloadSketch &sketch, + ITensorInfo *src, + ITensorInfo *dst, + const Attributes &attributes) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst); + ARM_COMPUTE_LOG_PARAMS(src, dst, attributes); + TensorShape logits_sum_shape = src->tensor_shape(); + ITensorInfo *logits = sketch.implementation().create_auxiliary_tensor(src->clone()->set_tensor_shape(logits_sum_shape)); + logits_sum_shape.set(0, 1); + ITensorInfo *sum = sketch.implementation().create_auxiliary_tensor(src->clone()->set_tensor_shape(logits_sum_shape)); + + // Auto initialize dst tensor info and the auxiliary tensor infos as well + auto_init_if_empty(*dst, *src->clone()); + + // Assert validation + ARM_COMPUTE_ERROR_THROW_ON(GpuSoftmax::validate_op(sketch, src, dst, attributes)); + ARM_COMPUTE_ERROR_ON_NULLPTR(logits, sum); + + // Translate into components and add to component graph + auto &comp_graph = sketch.implementation().component_graph(); + const auto sketch_ctx = sketch.implementation().context(); + + if(sketch_ctx->gpu_language() == GpuLanguage::OpenCL) + { + const auto cl_compile_ctx = sketch_ctx->cl_compile_context(); + ARM_COMPUTE_ERROR_ON(cl_compile_ctx == nullptr); + + // Add Direct Conv2d Component + { + auto properties = IGpuKernelComponent::Properties(); + properties.stage(UnitWorkloadStage{ UnitWorkloadStage::Stage::Run }); + + ArgumentPack arguments_exp_sum; + ArgumentPack arguments_norm; + + arguments_exp_sum.add_const_tensor(ACL_SRC_0, src); + arguments_exp_sum.add_const_tensor(ACL_DST_0, sum); + arguments_exp_sum.add_const_tensor(ACL_DST_1, logits); + + arguments_norm.add_const_tensor(ACL_SRC_0, logits); + arguments_norm.add_const_tensor(ACL_SRC_1, sum); + arguments_norm.add_const_tensor(ACL_DST_0, dst); + + comp_graph.add_new_component(properties, arguments_exp_sum, attributes); + comp_graph.add_new_component(properties, arguments_norm, attributes); + } + } + else + { + ARM_COMPUTE_ERROR("Unimplemented Gpu language"); + } + + // Set up fusion test by adding to the Operator Group + // Note this has to be performed after all the components have been successfully added to the component graph + + // Pack tensor infos + ArgumentPack tensors; + tensors.add_const_tensor(ACL_SRC_0, src); + tensors.add_const_tensor(ACL_DST_0, dst); + + const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors); + sketch.implementation().operator_group().add_operator(op); +} + +} // namespace dynamic_fusion +} // namespace experimental +} // namespace arm_compute diff --git a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateLogits1DMaxShiftExpSum.cpp b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateLogits1DMaxShiftExpSum.cpp index 8f1ed95351..6de0ba7617 100644 --- a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateLogits1DMaxShiftExpSum.cpp +++ b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateLogits1DMaxShiftExpSum.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Arm Limited. + * Copyright (c) 2022-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -34,6 +34,10 @@ namespace experimental { namespace dynamic_fusion { +namespace +{ + constexpr unsigned int serial_vector_size = 8; +} // namespace ClTemplateLogits1DMaxShiftExpSum::ClTemplateLogits1DMaxShiftExpSum(ComponentId id, const ArgumentPack &tensors, const Attributes &attributes) @@ -46,7 +50,9 @@ ClTemplateLogits1DMaxShiftExpSum::ClTemplateLogits1DMaxShiftExpSum(ComponentId _src = this->tensors().get_const_tensor(TensorType::ACL_SRC_0); _sum = this->tensors().get_const_tensor(TensorType::ACL_DST_0); _dst = this->tensors().get_const_tensor(TensorType::ACL_DST_1); - ARM_COMPUTE_ERROR_ON_NULLPTR(_src, _sum, _dst); + ARM_COMPUTE_ERROR_ON_NULLPTR(_src); + ARM_COMPUTE_ERROR_ON_NULLPTR(_sum); + ARM_COMPUTE_ERROR_ON_NULLPTR(_dst); } std::string ClTemplateLogits1DMaxShiftExpSum::get_name() const @@ -65,7 +71,6 @@ std::string ClTemplateLogits1DMaxShiftExpSum::get_component_code(const Component { __global uchar *src_addr = {{src}}_ptr + {{src}}_offset_first_element_in_bytes + g_ind_1 * {{src}}_stride_y + g_ind_2 * {{src}}_stride_z; __global uchar *dst_addr = {{dst}}_ptr + {{dst}}_offset_first_element_in_bytes + g_ind_1 * {{dst}}_stride_y + g_ind_2 * {{dst}}_stride_z; - Image sum = CONVERT_TENSOR3D_TO_IMAGE_STRUCT({{sum}}); VEC_TYPE max_val_vec = (VEC_TYPE)({{MINVAL}}); )_"; @@ -139,44 +144,41 @@ std::string ClTemplateLogits1DMaxShiftExpSum::get_component_code(const Component sum1D += data; )_"; } - else - { - code += R"_( + code += R"_( for(uint i = PARTIAL_N0; i < {{SRC_WIDTH}}; i += N0) { VEC_TYPE data = VLOAD(N0)(0, (__global {{DATA_TYPE}} *)(src_addr + i * sizeof({{DATA_TYPE}}))); data -= max_val; )_"; - if(beta_defined) - { - code += R"_( - data *= beta; + if(beta_defined) + { + code += R"_( + data *= beta; )_"; - } + } - if(_attributes.is_log_softmax()) - { - code += R"_( - VSTORE(N0) - (data, 0, (__global {{DATA_TYPE}} *)(dst_addr + i * sizeof({{DATA_TYPE}}))); - data = exp(data); + if(_attributes.is_log_softmax()) + { + code += R"_( + VSTORE(N0) + (data, 0, (__global {{DATA_TYPE}} *)(dst_addr + i * sizeof({{DATA_TYPE}}))); + data = exp(data); )_"; - } - else - { - code += R"_( - data = exp(data); - VSTORE(N0) - (data, 0, (__global {{DATA_TYPE}} *)(dst_addr + i * sizeof({{DATA_TYPE}}))); + } + else + { + code += R"_( + data = exp(data); + VSTORE(N0) + (data, 0, (__global {{DATA_TYPE}} *)(dst_addr + i * sizeof({{DATA_TYPE}}))); )_"; - } + } - code += R"_( - sum1D += data; + code += R"_( + sum1D += data; } )_"; - } code += R"_( *((__global {{DATA_TYPE}} *)sum.ptr) = SUM_REDUCE(sum1D, N0); @@ -192,19 +194,19 @@ void ClTemplateLogits1DMaxShiftExpSum::declare_variables(GpuKernelVariableTable vtable.declare_variable( comp_group, _src, - GpuKernelArgumentInfo(GpuKernelArgumentInfo::Type::Tensor_4D_t_Buffer), + GpuKernelArgumentInfo(GpuKernelArgumentInfo::Type::Tensor_3D), "src"); vtable.declare_variable( comp_group, _sum, - GpuKernelArgumentInfo(GpuKernelArgumentInfo::Type::Tensor_4D_t_Buffer), + GpuKernelArgumentInfo(GpuKernelArgumentInfo::Type::Tensor_3D), "sum"); vtable.declare_variable( comp_group, _dst, - GpuKernelArgumentInfo(GpuKernelArgumentInfo::Type::Tensor_4D_t_Buffer), + GpuKernelArgumentInfo(GpuKernelArgumentInfo::Type::Tensor_3D), "dst"); } @@ -237,7 +239,6 @@ CLBuildOptions ClTemplateLogits1DMaxShiftExpSum::get_build_options(const Compone ARM_COMPUTE_UNUSED(comp_group); CLBuildOptions build_opts{}; - constexpr unsigned int serial_vector_size = 8; const unsigned int reduction_dim_size = _src->dimension(0); const unsigned int vector_size = adjust_vec_size(serial_vector_size, reduction_dim_size); @@ -261,7 +262,7 @@ std::string ClTemplateLogits1DMaxShiftExpSum::get_config_id() const std::set ClTemplateLogits1DMaxShiftExpSum::get_headers_list() const { - return std::set{ "helpers.h" }; + return std::set{ "helpers.h", "tile_helpers.h" }; } Window ClTemplateLogits1DMaxShiftExpSum::get_window() const diff --git a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateLogits1DNorm.cpp b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateLogits1DNorm.cpp new file mode 100644 index 0000000000..0e1c9ef28f --- /dev/null +++ b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateLogits1DNorm.cpp @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateLogits1DNorm.h" + +#include "src/core/helpers/WindowHelpers.h" +#include "src/dynamic_fusion/sketch/gpu/GpuKernelComponentGroup.h" +#include "support/StringSupport.h" + +namespace arm_compute +{ +namespace experimental +{ +namespace dynamic_fusion +{ +ClTemplateLogits1DNorm::ClTemplateLogits1DNorm(ComponentId id, + const ArgumentPack &tensors, + const Attributes &attributes) + : IGpuTemplateComponentWriter{ id, tensors }, + _src{}, + _sum{}, + _dst{}, + _attributes{ attributes } +{ + _src = this->tensors().get_const_tensor(TensorType::ACL_SRC_0); + _sum = this->tensors().get_const_tensor(TensorType::ACL_SRC_1); + _dst = this->tensors().get_const_tensor(TensorType::ACL_DST_0); + ARM_COMPUTE_ERROR_ON_NULLPTR(_src); + ARM_COMPUTE_ERROR_ON_NULLPTR(_sum); + ARM_COMPUTE_ERROR_ON_NULLPTR(_dst); +} + +std::string ClTemplateLogits1DNorm::get_name() const +{ + return "logits_1d_norm"; +} + +std::string ClTemplateLogits1DNorm::get_component_code(const ComponentGroup &comp_group) const +{ + ARM_COMPUTE_UNUSED(comp_group); + + std::string code = R"_( +//------------------ START KERNEL {{meta_kernel_id}} --------------------- +{ + const int x_offs = g_ind_0 * sizeof({{DATA_TYPE}}); + __global uchar *src_addr = {{src}}_ptr + {{src}}_offset_first_element_in_bytes + x_offs + g_ind_1 * {{src}}_stride_y + g_ind_2 * {{src}}_stride_z; + __global uchar *dst_addr = {{dst}}_ptr + {{dst}}_offset_first_element_in_bytes + x_offs + g_ind_1 * {{dst}}_stride_y + g_ind_2 * {{dst}}_stride_z; + Image sum = CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP({{sum}}); +)_"; + // Load max value of 1D logits vector (row) + code += R"_( + {{DATA_TYPE}} sum_val = *((__global {{DATA_TYPE}} *)offset(&sum, 0, g_ind_1)); + VEC_DATA_TYPE({{DATA_TYPE}}, N0) + data0 = VLOAD(N0)(0, (__global {{DATA_TYPE}} *)src_addr); +)_"; + + if(_attributes.is_log_softmax()) + { + code += R"_( + sum_val = log(sum_val); + data0 -= sum_val; +)_"; + } + else + { + code += R"_( + data0 /= sum_val; +)_"; + } + + code += R"_( + STORE_VECTOR_SELECT(data, {{DATA_TYPE}}, dst_addr, N0, PARTIAL_N0, PARTIAL_N0 != 0 && g_ind_0 == 0); +} +//------------------ END KERNEL {{meta_kernel_id}} --------------------- +)_"; + + return code; +} + +void ClTemplateLogits1DNorm::declare_variables(GpuKernelVariableTable &vtable, const ComponentGroup &comp_group) const +{ + vtable.declare_variable( + comp_group, + _src, + GpuKernelArgumentInfo(GpuKernelArgumentInfo::Type::Tensor_3D), + "src"); + + vtable.declare_variable( + comp_group, + _sum, + GpuKernelArgumentInfo(GpuKernelArgumentInfo::Type::Tensor_3D), + "sum"); + + vtable.declare_variable( + comp_group, + _dst, + GpuKernelArgumentInfo(GpuKernelArgumentInfo::Type::Tensor_3D), + "dst"); +} + +TagLUT ClTemplateLogits1DNorm::get_tag_lut(const GpuKernelVariableTable &vtable, const ComponentGroup &comp_group) const +{ + ARM_COMPUTE_UNUSED(comp_group); + + TagLUT lut{}; + + // Arguments and global shared variables + lut["src"] = vtable.get_variable(_src); + lut["sum"] = vtable.get_variable(_sum); + lut["dst"] = vtable.get_variable(_dst); + + // Local build options + lut["meta_kernel_id"] = id(); + + const DataType data_type = _src->data_type(); + + lut["DATA_TYPE"] = get_cl_type_from_data_type(data_type); + + return lut; +} + +CLBuildOptions ClTemplateLogits1DNorm::get_build_options(const ComponentGroup &comp_group) const +{ + ARM_COMPUTE_UNUSED(comp_group); + CLBuildOptions build_opts{}; + + const auto root_window = comp_group.get_root_component()->template_writer()->get_window(); + const unsigned int n0 = root_window.x().step(); + build_opts.add_option("-DN0=" + support::cpp11::to_string(n0)); + build_opts.add_option("-DPARTIAL_N0=" + support::cpp11::to_string((_src->dimension(0) % n0))); + + return build_opts; +} + +std::string ClTemplateLogits1DNorm::get_config_id() const +{ + std::string config_id = get_name(); + + config_id += "_"; + config_id += support::cpp11::to_string(_src->dimension(0)); + config_id += "_"; + config_id += string_from_data_type(_src->data_type()); + + return config_id; +} + +std::set ClTemplateLogits1DNorm::get_headers_list() const +{ + return std::set{ "helpers.h", "tile_helpers.h" }; +} + +Window ClTemplateLogits1DNorm::get_window() const +{ + ARM_COMPUTE_ERROR_ON_MSG(_dst->tensor_shape().total_size() == 0U, "Destination tensor is not initialized"); + constexpr unsigned int serial_vector_size = 16; + const unsigned int vector_size = adjust_vec_size(serial_vector_size, _src->dimension(0)); + + Window win = calculate_max_window(*_src, Steps(vector_size)); + return win.collapse(win, Window::DimZ); +} + +} // namespace dynamic_fusion +} // namespace experimental +} // namespace arm_compute diff --git a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateLogits1DNorm.h b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateLogits1DNorm.h new file mode 100644 index 0000000000..5a74be5842 --- /dev/null +++ b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateLogits1DNorm.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef SRC_DYNAMIC_FUSION_SKETCH_GPU_TEMPLATE_WRITER_CL_CLTEMPLATELOGITS1DNORM +#define SRC_DYNAMIC_FUSION_SKETCH_GPU_TEMPLATE_WRITER_CL_CLTEMPLATELOGITS1DNORM + +#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentLogits1DNorm.h" +#include "src/dynamic_fusion/sketch/gpu/template_writer/GpuKernelVariableTable.h" +#include "src/dynamic_fusion/sketch/gpu/template_writer/IGpuTemplateComponentWriter.h" + +namespace arm_compute +{ +namespace experimental +{ +namespace dynamic_fusion +{ +class ClTemplateLogits1DNorm final : public IGpuTemplateComponentWriter +{ +public: + using Attributes = ClComponentLogits1DNorm::Attributes; + + /** Constructor + * + * @param[in] id Component id + * @param[in] tensors Tensor arguments to the components + * @param[in] attributes Component attributes + */ + ClTemplateLogits1DNorm(ComponentId id, const ArgumentPack &tensors, const Attributes &attributes); + /** Prevent instances of this class from being copy constructed */ + ClTemplateLogits1DNorm(const ClTemplateLogits1DNorm &) = delete; + /** Prevent instances of this class from being copied */ + ClTemplateLogits1DNorm &operator=(const ClTemplateLogits1DNorm &) = delete; + /** Allow instances of this class to be move constructed */ + ClTemplateLogits1DNorm(ClTemplateLogits1DNorm &&) = default; + /** Allow instances of this class to be moved */ + ClTemplateLogits1DNorm &operator=(ClTemplateLogits1DNorm &&) = default; + /** Generate kernel component name */ + std::string get_name() const override; + /** Generate kernel component code template + * + * @param[in] comp_group Component group of which the component is a part of + * + * @return std::string Component code + */ + std::string get_component_code(const ComponentGroup &comp_group) const override; + /** Declare all variables used by the component in the @p vtable + * + * @param[out] vtable Variable table + * @param[in] comp_group Component group of which the component is a part of + */ + void declare_variables(GpuKernelVariableTable &vtable, const ComponentGroup &comp_group) const override; + /** Generate the tag look-up table used to instantiate the component code. + * + * @param[in] vtable Variable table + * @param[in] comp_group Component group of which the component is a part of + * + * @return TagLUT Tag lookup table + */ + TagLUT get_tag_lut(const GpuKernelVariableTable &vtable, const ComponentGroup &comp_group) const override; + /** Generate the build options used in the component + * + * @param[in] comp_group Component group of which the component is a part of + * + * @return CLBuildOptions Build options + */ + CLBuildOptions get_build_options(const ComponentGroup &comp_group) const override; + /** Generate the component config id string used for tuning */ + std::string get_config_id() const override; + /** Generate the header list used in the component */ + std::set get_headers_list() const override; + /** Generate the execution window for the component */ + Window get_window() const override; + +private: + const ITensorInfo *_src; // exponentiated input + const ITensorInfo *_sum; // exponentiated and summed input + const ITensorInfo *_dst; // normalization of input with _sum + + Attributes _attributes; +}; +} // namespace dynamic_fusion +} // namespace experimental +} // namespace arm_compute + +#endif /* SRC_DYNAMIC_FUSION_SKETCH_GPU_TEMPLATE_WRITER_CL_CLTEMPLATELOGITS1DNORM */ diff --git a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateWriter.cpp b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateWriter.cpp index 2ab6316947..eda15f1d95 100644 --- a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateWriter.cpp +++ b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateWriter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Arm Limited. + * Copyright (c) 2022-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -180,7 +180,8 @@ std::string ClTemplateWriter::write_code() } auto arguments = _components.get_argument_tensors(); - std::sort(arguments.begin(), arguments.end(), [](const ITensorInfo *l, const ITensorInfo *r) { + std::sort(arguments.begin(), arguments.end(), [](const ITensorInfo * l, const ITensorInfo * r) + { return l->id() < r->id(); }); code += write_kernel_signature(_vtable.get_variable_list(arguments)); @@ -192,16 +193,16 @@ std::string ClTemplateWriter::write_code() code += " //------------------ END KERNEL_BUILDER_COORDINATE ---------------------\n"; { - const auto tiles = _components.get_tiles(); + const auto tiles = _components.get_tiles(); std::stringstream tiles_ss; tiles_ss << " //------------------ START TILE DECLARATION ---------------------\n"; for(auto tile : tiles) { - const auto var = _vtable.get_variable(tile); + const auto var = _vtable.get_variable(tile); const auto data_type = get_cl_type_from_data_type(tile->data_type()); - const auto var_name = var.uniq_name; + const auto var_name = var.uniq_name; tiles_ss << " TILE(" << data_type << ", M0, N0, " << var_name << ");\n"; } @@ -276,6 +277,11 @@ std::string ClTemplateWriter::write_argument_declaration(const GpuKernelVariable code += "\n TENSOR4D_T(" + var.uniq_name + ", IMAGE)"; break; } + case GpuKernelArgumentInfo::Type::Tensor_3D: + { + code += "\n TENSOR3D_DECLARATION(" + var.uniq_name + ")"; + break; + } default: { ARM_COMPUTE_ERROR("Unsupported declaration generation for GpuKernelArgumentInfo::Type"); diff --git a/tests/validation/dynamic_fusion/gpu/Integration.cpp b/tests/validation/dynamic_fusion/gpu/Integration.cpp index 7f2d439183..6a283f8082 100644 --- a/tests/validation/dynamic_fusion/gpu/Integration.cpp +++ b/tests/validation/dynamic_fusion/gpu/Integration.cpp @@ -90,9 +90,10 @@ TEST_CASE(Conv2d, framework::DatasetMode::ALL) // Instead of using ACL allocated memory, the user can choose to import memory into the tensors for(auto &data : runtime.get_auxiliary_tensors()) { - CLTensor *tensor = data.first; - AuxMemoryInfo aux_mem_req = data.second; - tensor->allocator()->init(*data.first->info(), aux_mem_req.alignment); + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); tensor->allocator()->allocate(); // Use ACL allocated memory // auto buf = cl::Buffer(); // tensor->allocator()->import_memory(buf); // Or, import external memory @@ -178,9 +179,10 @@ TEST_CASE(Add_Output_Add_Output, framework::DatasetMode::ALL) // Instead of using ACL allocated memory, the user can choose to import memory into the tensors for(auto &data : runtime.get_auxiliary_tensors()) { - CLTensor *tensor = data.first; - AuxMemoryInfo aux_mem_req = data.second; - tensor->allocator()->init(*data.first->info(), aux_mem_req.alignment); + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); tensor->allocator()->allocate(); // Use ACL allocated memory // auto buf = cl::Buffer(); // tensor->allocator()->import_memory(buf); // Or, import external memory @@ -282,9 +284,10 @@ TEST_CASE(Add_Output_Add_Cast_Cast_Output, framework::DatasetMode::ALL) // Instead of using ACL allocated memory, the user can choose to import memory into the tensors for(auto &data : runtime.get_auxiliary_tensors()) { - CLTensor *tensor = data.first; - AuxMemoryInfo aux_mem_req = data.second; - tensor->allocator()->init(*data.first->info(), aux_mem_req.alignment); + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); tensor->allocator()->allocate(); // Use ACL allocated memory // auto buf = cl::Buffer(); // tensor->allocator()->import_memory(buf); // Or, import external memory diff --git a/tests/validation/dynamic_fusion/gpu/cl/Softmax.cpp b/tests/validation/dynamic_fusion/gpu/cl/Softmax.cpp new file mode 100644 index 0000000000..d09454e05b --- /dev/null +++ b/tests/validation/dynamic_fusion/gpu/cl/Softmax.cpp @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/Types.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuSoftmax.h" + +#include "tests/CL/CLAccessor.h" +#include "tests/datasets/ShapeDatasets.h" +#include "tests/framework/Asserts.h" +#include "tests/framework/Fixture.h" +#include "tests/framework/Macros.h" +#include "tests/framework/datasets/Datasets.h" +#include "tests/validation/Validation.h" +#include "tests/validation/fixtures/dynamic_fusion/operators/SoftmaxFixture.h" + +using namespace arm_compute::experimental::dynamic_fusion; + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +/** Tolerance for float operations */ +RelativeTolerance tolerance_f16(half(0.2)); +RelativeTolerance tolerance_f32(0.001f); + +TEST_SUITE(CL) +TEST_SUITE(DYNAMIC_FUSION) +TEST_SUITE(SOFTMAX) + +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U), 1, DataType::F32), // Mismatching data types + TensorInfo(TensorShape(27U, 13U), 1, DataType::F32), // Mismatching shapes + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::S32), // Unsupported data type + TensorInfo(TensorShape(32U, 13U), 1, DataType::F16), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + + }), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U), 1, DataType::F16), + TensorInfo(TensorShape(27U, 11U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM16), // Unsupported data type + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), + + })), + framework::dataset::make("beta", { 1.0, + 2.0, + 2.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + })), + framework::dataset::make("axis", { + 0, + 0, + 1, // Invalid as axis != 0 + 0, + 0, + 0, + -3, // Invalid as axis != 0 + 2, // Invalid as axis != 0 + 1, // Invalid as axis != 0 + -1, // Invalid as axis != 0 + })), + framework::dataset::make("Expected", { false, false, false, true, false, false, false, false, false, false})), + input_info, output_info, beta, axis, expected) +{ + // Create a new workload sketch + CLCompileContext cl_compile_ctx = CLKernelLibrary::get().get_compile_context(); + GpuWorkloadContext gpu_ctx = GpuWorkloadContext{ &cl_compile_ctx }; + GpuWorkloadSketch sketch{ &gpu_ctx }; + + SoftmaxAttributes softmax_attr{}; + softmax_attr.axis(axis).beta(beta).is_log_softmax(false); + TensorInfo src_info = sketch.create_tensor_info(input_info); + TensorInfo dst_info = sketch.create_tensor_info(output_info); + const bool res = static_cast(GpuSoftmax::validate_op(sketch, &src_info, &dst_info, softmax_attr)); + ARM_COMPUTE_EXPECT(res == expected, framework::LogLevel::ERRORS); +} + +template +using DynamicFusionSoftmaxLayerFixture = DynamicFusionSoftmaxValidationFixture; + +TEST_SUITE(FLOAT) +TEST_SUITE(FP32) + +FIXTURE_DATA_TEST_CASE(RunSmall, DynamicFusionSoftmaxLayerFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SoftmaxLayerSmallShapes(), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("Beta", { 1.0f, 2.0f })), + framework::dataset::make("Axis", { 0 })), + framework::dataset::make("is_log", {false, true}))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32); +} + + +FIXTURE_DATA_TEST_CASE(RunLarge, DynamicFusionSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("Beta", { 1.0f, 2.0f })), + framework::dataset::make("Axis", { 0 })), + framework::dataset::make("is_log", {false, true}))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32); +} + + +FIXTURE_DATA_TEST_CASE(Run4D, DynamicFusionSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SoftmaxLayer4DShapes(), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("Beta", { 1.0f, 2.0f })), + framework::dataset::make("Axis", { 0 })), + framework::dataset::make("is_log", {false, true}))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32); +} +TEST_SUITE_END() // FP32 +TEST_SUITE(FP16) + +FIXTURE_DATA_TEST_CASE(RunSmall, DynamicFusionSoftmaxLayerFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::SoftmaxLayerSmallShapes(), + framework::dataset::make("DataType", DataType::F16)), + framework::dataset::make("Beta", { 1.0f, 2.0f })), + framework::dataset::make("Axis", { 0 })), + framework::dataset::make("is_log", {false, true}))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f16); +} + + +FIXTURE_DATA_TEST_CASE(RunLarge, DynamicFusionSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), + framework::dataset::make("DataType", DataType::F16)), + framework::dataset::make("Beta", { 1.0f, 2.0f })), + framework::dataset::make("Axis", { 0 })), + framework::dataset::make("is_log", {false, true}))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f16); +} + + +FIXTURE_DATA_TEST_CASE(Run4D, DynamicFusionSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::SoftmaxLayer4DShapes(), + framework::dataset::make("DataType", DataType::F16)), + framework::dataset::make("Beta", { 1.0f, 2.0f })), + framework::dataset::make("Axis", { 0 })), + framework::dataset::make("is_log", {false, true}))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f16); +} +TEST_SUITE_END() // FP16 +TEST_SUITE_END() // FLOAT + +TEST_SUITE_END() // SOFTMAX +TEST_SUITE_END() // DYNAMIC_FUSION +TEST_SUITE_END() // CL + +} // namespace validation +} // namespace test +} // namespace arm_compute diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/DepthwiseConv2dFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/DepthwiseConv2dFixture.h index 235c8602b1..b15de71707 100644 --- a/tests/validation/fixtures/dynamic_fusion/gpu/cl/DepthwiseConv2dFixture.h +++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/DepthwiseConv2dFixture.h @@ -145,10 +145,11 @@ protected: // (Important) Allocate auxiliary tensor memory if there are any for(auto &data : runtime.get_auxiliary_tensors()) { - auto tensor = data.first; - const auto aux_mem_req = data.second; - tensor->allocator()->init(*data.first->info(), aux_mem_req.alignment); - tensor->allocator()->allocate(); + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); + tensor->allocator()->allocate(); // Use ACL allocated memory } // Construct user tensors diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/DirectConv2dFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/DirectConv2dFixture.h index e0aecf5ed4..d9ce4dff18 100644 --- a/tests/validation/fixtures/dynamic_fusion/gpu/cl/DirectConv2dFixture.h +++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/DirectConv2dFixture.h @@ -133,9 +133,10 @@ protected: // (Important) Allocate auxiliary tensor memory if there are any for(auto &data : runtime.get_auxiliary_tensors()) { - auto tensor = data.first; - const auto aux_mem_req = data.second; - tensor->allocator()->init(*data.first->info(), aux_mem_req.alignment); + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); tensor->allocator()->allocate(); // Use ACL allocated memory } // Construct user tensors @@ -273,10 +274,11 @@ protected: for(auto &data : runtime.get_auxiliary_tensors()) { - auto tensor = data.first; - const auto aux_mem_req = data.second; - tensor->allocator()->init(*data.first->info(), aux_mem_req.alignment); - tensor->allocator()->allocate(); + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); + tensor->allocator()->allocate(); // Use ACL allocated memory } // Construct user tensors TensorType t_input{}; diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/ElementwiseBinaryFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/ElementwiseBinaryFixture.h index e2722a1bdc..faed610874 100644 --- a/tests/validation/fixtures/dynamic_fusion/gpu/cl/ElementwiseBinaryFixture.h +++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/ElementwiseBinaryFixture.h @@ -131,10 +131,11 @@ protected: // (Important) Allocate auxiliary tensor memory if there are any for(auto &data : runtime.get_auxiliary_tensors()) { - TensorType *tensor = data.first; - AuxMemoryInfo aux_mem_req = data.second; - tensor->allocator()->init(*data.first->info(), aux_mem_req.alignment); - tensor->allocator()->allocate(); + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); + tensor->allocator()->allocate(); // Use ACL allocated memory } // Construct user tensors diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/Pool2dFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/Pool2dFixture.h index efb67f8b11..efb5cf1e74 100644 --- a/tests/validation/fixtures/dynamic_fusion/gpu/cl/Pool2dFixture.h +++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/Pool2dFixture.h @@ -109,9 +109,10 @@ protected: // (Important) Allocate auxiliary tensor memory if there are any for(auto &data : runtime.get_auxiliary_tensors()) { - auto tensor = data.first; - const auto aux_mem_req = data.second; - tensor->allocator()->init(*data.first->info(), aux_mem_req.alignment); + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); tensor->allocator()->allocate(); // Use ACL allocated memory } // Construct user tensors @@ -142,8 +143,8 @@ protected: return reference::pooling_layer(src, pool_info, QuantizationInfo(), nullptr, DataLayout::NCHW); } - TensorType _target{}; - SimpleTensor _reference{}; + TensorType _target{}; + SimpleTensor _reference{}; }; template diff --git a/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h index bd999027b3..cd39ec0a06 100644 --- a/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h +++ b/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h @@ -132,10 +132,11 @@ protected: // (Important) Allocate auxiliary tensor memory if there are any for(auto &data : runtime.get_auxiliary_tensors()) { - auto tensor = data.first; - const auto aux_mem_req = data.second; - tensor->allocator()->init(*data.first->info(), aux_mem_req.alignment); - tensor->allocator()->allocate(); + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); + tensor->allocator()->allocate(); // Use ACL allocated memory } // Construct user tensors diff --git a/tests/validation/fixtures/dynamic_fusion/operators/ReshapeFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/ReshapeFixture.h index 0d3b1f0296..e0b62d093f 100644 --- a/tests/validation/fixtures/dynamic_fusion/operators/ReshapeFixture.h +++ b/tests/validation/fixtures/dynamic_fusion/operators/ReshapeFixture.h @@ -90,9 +90,10 @@ protected: // (Important) Allocate auxiliary tensor memory if there are any for(auto &data : runtime.get_auxiliary_tensors()) { - auto tensor = data.first; - const auto aux_mem_req = data.second; - tensor->allocator()->init(*data.first->info(), aux_mem_req.alignment); + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); tensor->allocator()->allocate(); // Use ACL allocated memory } diff --git a/tests/validation/fixtures/dynamic_fusion/operators/ResizeFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/ResizeFixture.h index 7eb820e0eb..581a3e8947 100644 --- a/tests/validation/fixtures/dynamic_fusion/operators/ResizeFixture.h +++ b/tests/validation/fixtures/dynamic_fusion/operators/ResizeFixture.h @@ -158,10 +158,11 @@ protected: // (Important) Allocate auxiliary tensor memory if there are any for(auto &data : runtime.get_auxiliary_tensors()) { - auto tensor = data.first; - const auto aux_mem_req = data.second; - tensor->allocator()->init(*data.first->info(), aux_mem_req.alignment); - tensor->allocator()->allocate(); + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); + tensor->allocator()->allocate(); // Use ACL allocated memory } // Construct user tensors diff --git a/tests/validation/fixtures/dynamic_fusion/operators/SoftmaxFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/SoftmaxFixture.h new file mode 100644 index 0000000000..38177114e6 --- /dev/null +++ b/tests/validation/fixtures/dynamic_fusion/operators/SoftmaxFixture.h @@ -0,0 +1,161 @@ +/* +* Copyright (c) 2023 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_SOFTMAXFIXTURE +#define TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_SOFTMAXFIXTURE + +#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h" +#include "arm_compute/dynamic_fusion/sketch/attributes/SoftmaxAttributes.h" +#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h" + +#include "tests/SimpleTensor.h" +#include "tests/framework/Fixture.h" +#include "tests/framework/Macros.h" +#include "tests/validation/Validation.h" +#include "tests/validation/reference/SoftmaxLayer.h" + +using namespace arm_compute::experimental::dynamic_fusion; + +namespace arm_compute +{ +namespace test +{ +namespace validation +{ +template +class DynamicFusionSoftmaxValidationGenericFixture : public framework::Fixture +{ +public: + template + void setup(TensorShape shape, DataType data_type, float beta, size_t axis, bool is_log) + { + _reference = compute_reference(shape, data_type, beta, axis, is_log); + _target = compute_target(shape, data_type, beta, axis, is_log); + } + +protected: + template + void fill(U &&tensor) + { + if(tensor.data_type() == DataType::F32) + { + std::uniform_real_distribution distribution(-10.0f, 10.0f); + library->fill(tensor, distribution, 0); + } + else if(tensor.data_type() == DataType::F16) + { + arm_compute::utils::uniform_real_distribution_16bit distribution{ -10.0f, 10.0f }; + library->fill(tensor, distribution, 0); + } + else if(!is_data_type_quantized(tensor.data_type())) + { + std::uniform_int_distribution<> distribution(0, 100); + library->fill(tensor, distribution, 0); + } + else + { + library->fill_tensor_uniform(tensor, 0); + } + } + + TensorType compute_target(const TensorShape &shape, DataType data_type, float beta, int32_t axis, bool is_log) + { + // Create a new workload sketch + CLCompileContext cl_compile_ctx = CLKernelLibrary::get().get_compile_context(); + GpuWorkloadContext gpu_ctx = GpuWorkloadContext{ &cl_compile_ctx }; + GpuWorkloadSketch sketch{ &gpu_ctx }; + + SoftmaxAttributes softmax_attr{}; + softmax_attr.axis(axis).beta(beta).is_log_softmax(is_log); + TensorInfo src_info = sketch.create_tensor_info(shape, 1, data_type); + TensorInfo dst_info = sketch.create_tensor_info(shape, 1, data_type); + FunctionType::create_op(sketch, &src_info, &dst_info, softmax_attr); + + // Configure runtime + ClWorkloadRuntime runtime; + runtime.configure(sketch); + + // (Important) Allocate auxiliary tensor memory if there are any + // Instead of using ACL allocated memory, the user can choose to import memory into the tensors + for(auto &data : runtime.get_auxiliary_tensors()) + { + CLTensor *tensor = std::get<0>(data); + TensorInfo info = std::get<1>(data); + AuxMemoryInfo aux_mem_req = std::get<2>(data); + tensor->allocator()->init(info, aux_mem_req.alignment); + tensor->allocator()->allocate(); // Use ACL allocated memory + } + // Construct user tensors + TensorType src{}; + TensorType dst{}; + + // Initialize user tensors + src.allocator()->init(src_info); + dst.allocator()->init(dst_info); + + // Allocate and fill user tensors + src.allocator()->allocate(); + dst.allocator()->allocate(); + fill(AccessorType(src)); + + // Run runtime + runtime.run({ &src, &dst }); + + return dst; + } + + SimpleTensor compute_reference(const TensorShape &shape, DataType data_type, float beta, int32_t axis, bool is_log) + { + // Create reference + SimpleTensor src{ shape, data_type, 1 }; + + // Fill reference + fill(src); + + return reference::softmax_layer(src, beta, axis, is_log); + } + + TensorType _target{}; + SimpleTensor _reference{}; +}; + +template +class DynamicFusionSoftmaxValidationFixture : public DynamicFusionSoftmaxValidationGenericFixture +{ +public: + template + void setup(TensorShape shape, DataType data_type, float beta, size_t axis, bool is_log) + { + DynamicFusionSoftmaxValidationGenericFixture::setup(shape, + data_type, + beta, + axis, + is_log); + } +}; + +} // namespace validation +} // namespace test +} // namespace arm_compute + +#endif /* TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_SOFTMAXFIXTURE */ -- cgit v1.2.1