aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSiCong Li <sicong.li@arm.com>2023-07-18 17:56:49 +0100
committerSiCong Li <sicong.li@arm.com>2023-07-28 15:29:15 +0000
commit16b37527906c68885f81a8db35f9d6040d73efec (patch)
tree9669b5ebda00b3e3b1ac55992c144b09324b5997
parent9129549110527fd53655d3e6b61e8e59bed6f97f (diff)
downloadComputeLibrary-16b37527906c68885f81a8db35f9d6040d73efec.tar.gz
Port ElementwiseBinary to CKW part 2
* Add fp16 support * Implement broadcasting to elementwise binary * Implement kernel name and kernel config id * Always use explicit cast in ckw unary, binary and ternary elementwise functions. This is to address the accidental use of double literals, with other benefits. * Refactor TypeConverter for smaller includes Resolves COMPMID-6260 Change-Id: I26b726746f8c0dd7b5942ad379d56f4d7642d15f Signed-off-by: SiCong Li <sicong.li@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9999 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Jakub Sujak <jakub.sujak@arm.com> Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--arm_compute/core/utils/StringUtils.h10
-rw-r--r--compute_kernel_writer/prototype/src/Prototype.h67
-rw-r--r--src/core/utils/StringUtils.cpp21
-rw-r--r--src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp1
-rw-r--r--src/dynamic_fusion/sketch/gpu/IGpuKernelWriter.h5
-rw-r--r--src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwDriver.cpp48
-rw-r--r--src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwDriver.h4
-rw-r--r--src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwVariableTable.cpp2
-rw-r--r--src/dynamic_fusion/sketch/gpu/ckw_driver/IGpuCkwComponentDriver.h21
-rw-r--r--src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwCast.cpp2
-rw-r--r--src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwElementwiseBinary.cpp128
-rw-r--r--src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwElementwiseBinary.h5
-rw-r--r--src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwStore.cpp6
-rw-r--r--src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwStore.h1
-rw-r--r--src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/WriterHelper.h115
-rw-r--r--src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/type_converter/Common.h (renamed from src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/TypeConverter.h)28
-rw-r--r--src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/type_converter/ElementwiseBinary.h61
-rw-r--r--src/dynamic_fusion/sketch/gpu/components/utils/type_printer/ElementwiseBinary.h77
-rw-r--r--tests/validation/dynamic_fusion/gpu/Integration.cpp25
19 files changed, 459 insertions, 168 deletions
diff --git a/arm_compute/core/utils/StringUtils.h b/arm_compute/core/utils/StringUtils.h
index a91ef2387d..41f29b0901 100644
--- a/arm_compute/core/utils/StringUtils.h
+++ b/arm_compute/core/utils/StringUtils.h
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_CORE_UTILS_STRINGUTILS_H
#include <string>
+#include <vector>
namespace arm_compute
{
@@ -51,5 +52,14 @@ std::string upper_string(const std::string &val);
* @return String with the floating point value.
*/
std::string float_to_string_with_full_precision(float val);
+
+/** Join a sequence of strings with separator @p sep
+ *
+ * @param[in] strings Strings to join
+ * @param[in] sep Separator to join consecutive strings in the sequence
+ *
+ * @return std::string
+ */
+std::string join(const std::vector<std::string> strings, const std::string &sep);
}
#endif /*ARM_COMPUTE_CORE_UTILS_STRINGUTILS_H */
diff --git a/compute_kernel_writer/prototype/src/Prototype.h b/compute_kernel_writer/prototype/src/Prototype.h
index 05c7306e3a..a8dc7fbfdb 100644
--- a/compute_kernel_writer/prototype/src/Prototype.h
+++ b/compute_kernel_writer/prototype/src/Prototype.h
@@ -2194,10 +2194,34 @@ struct GpuKernel
std::vector<std::pair<int32_t, TensorComponentType>> list_tensor_components; // List of tensor components (width, stride,..), required for the dispatch stage)
};
+// Generate all extension pragmas (hardcoded for now)
+inline std::string generate_extensions()
+{
+ std::string ext = R"(
+#if defined(cl_khr_fp16)
+#pragma OPENCL EXTENSION cl_khr_fp16 : enable
+#endif // defined(cl_khr_fp16)
+
+#if defined(cl_arm_integer_dot_product_int8)
+#pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable
+#endif // defined(cl_arm_integer_dot_product_int8)
+
+#if defined(cl_arm_integer_dot_product_accumulate_int8)
+#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable
+#endif // defined(cl_arm_integer_dot_product_accumulate_int8)
+
+#if defined(cl_arm_printf)
+#pragma OPENCL EXTENSION cl_arm_printf : enable
+#endif // defined(cl_arm_printf);
+)";
+ return ext;
+}
+
// This function should produce an object with the source
inline std::string generate_code(GpuKernelWriterDataHolder &in, const std::string &name)
{
std::string code;
+ code += generate_extensions();
code += "__kernel void ";
code += name;
code += "(\n";
@@ -2783,6 +2807,8 @@ private:
case TensorSamplerAddressModeY::SkipMinEdgeOnly:
_writer->compound_statement_end();
break;
+ case TensorSamplerAddressModeY::None:
+ break;
default:
assert(false);
@@ -2799,6 +2825,8 @@ private:
_writer->write_text(" = 0.0f;\n");
_writer->compound_statement_end();
break;
+ case TensorSamplerAddressModeY::None:
+ break;
default:
assert(false);
@@ -2857,6 +2885,8 @@ private:
case TensorSamplerAddressModeZ::SkipMaxEdgeOnly:
_writer->compound_statement_end();
break;
+ case TensorSamplerAddressModeZ::None:
+ break;
default:
assert(false);
@@ -3622,14 +3652,15 @@ public:
const IVectorTile *src = operands.unpack(src_name);
const IVectorTile *dst = operands.unpack(dst_name);
- const int32_t dst_w = dst->format().w;
const int32_t dst_h = dst->format().h;
- const int32_t src_w = src->format().w;
const std::string dt = dst->underlying_source_variables()[0].type.str;
- const bool broadcast_src_x = dst_w != 1 && src_w == 1;
-
- const std::string src_prefix = broadcast_src_x ? "(" + dt + ")" : "";
+ // Always perform an explicit cast. This automatically covers at least the 2 scenarios:
+ // 1. Widen a scalar into a vector type. This enables scalar-vector broadcasting
+ // 2. Ensure non-ambiguity over function overloads.
+ // E.g. a constant tile may be accidentally initialized with a double literal. By casting it to single float,
+ // it avoids ambiguous function calls
+ const std::string src_prefix = "(" + dt + ")";
// Broadcasting on Y is automatic
for(int32_t y = 0; y < dst_h; ++y)
@@ -3679,18 +3710,13 @@ public:
const IVectorTile *second = operands.unpack(second_name);
const IVectorTile *dst = operands.unpack(dst_name);
- const int32_t dst_w = dst->format().w;
const int32_t dst_h = dst->format().h;
- const int32_t first_w = first->format().w;
- const int32_t second_w = second->format().w;
const auto datatype = dst->underlying_source_variables()[0].type;
const std::string datatype_str = datatype.str;
- const bool broadcast_first_x = dst_w != 1 && first_w == 1;
- const bool broadcast_second_x = dst_w != 1 && second_w == 1;
-
- const std::string first_prefix = broadcast_first_x ? "(" + datatype_str + ")" : "";
- const std::string second_prefix = broadcast_second_x ? "(" + datatype_str + ")" : "";
+ // Always perform an explicit cast. See similar comments in op_unary_elementwise_function
+ const std::string first_prefix = "(" + datatype_str + ")";
+ const std::string second_prefix = "(" + datatype_str + ")";
const bool is_float = (datatype.dt == DataType::Fp32 || datatype.dt == DataType::Fp16);
@@ -3727,20 +3753,13 @@ public:
const IVectorTile *third = operands.unpack(third_name);
const IVectorTile *dst = operands.unpack(dst_name);
- const int32_t dst_w = dst->format().w;
const int32_t dst_h = dst->format().h;
- const int32_t first_w = first->format().w;
- const int32_t second_w = second->format().w;
- const int32_t third_w = third->format().w;
const std::string dt = dst->underlying_source_variables()[0].type.str;
- const bool broadcast_first_x = dst_w != 1 && first_w == 1;
- const bool broadcast_second_x = dst_w != 1 && second_w == 1;
- const bool broadcast_third_x = dst_w != 1 && third_w == 1;
-
- const std::string first_prefix = broadcast_first_x ? "(" + dt + ")" : "";
- const std::string second_prefix = broadcast_second_x ? "(" + dt + ")" : "";
- const std::string third_prefix = broadcast_third_x ? "(" + dt + ")" : "";
+ // Always perform an explicit cast. See similar comments in op_unary_elementwise_function
+ const std::string first_prefix = "(" + dt + ")";
+ const std::string second_prefix = "(" + dt + ")";
+ const std::string third_prefix = "(" + dt + ")";
// Broadcasting on Y is automatic
for(int32_t y = 0; y < dst_h; ++y)
diff --git a/src/core/utils/StringUtils.cpp b/src/core/utils/StringUtils.cpp
index 938ee09232..6d05c9b64e 100644
--- a/src/core/utils/StringUtils.cpp
+++ b/src/core/utils/StringUtils.cpp
@@ -23,13 +23,14 @@
*/
#include "arm_compute/core/utils/StringUtils.h"
-#include <limits>
-#include <sstream>
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <fstream>
+#include <limits>
#include <map>
+#include <numeric>
+#include <sstream>
#include <string>
namespace arm_compute
@@ -61,4 +62,20 @@ std::string float_to_string_with_full_precision(float val)
return ss.str();
}
+
+std::string join(const std::vector<std::string> strings, const std::string &sep)
+{
+ if(strings.empty())
+ {
+ return "";
+ }
+ return std::accumulate(
+ std::next(strings.begin()),
+ strings.end(),
+ strings.at(0),
+ [&sep](const std::string & a, const std::string & b)
+ {
+ return a + sep + b;
+ });
+}
}
diff --git a/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp b/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp
index 92ca8557f1..15a5632d0b 100644
--- a/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp
+++ b/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp
@@ -45,6 +45,7 @@ void ClKernelRuntime::configure(const ClCompileContext &compile_ctx, const GpuKe
opencl::ClKernelLibrary &klib = opencl::ClKernelLibrary::get();
_kernel = static_cast<cl::Kernel>(compile_ctx.create_kernel(code.name(),
code.name(), // program name has to be provided to differentiate between different unfusable components' kernels.
+ // Each program contains exactly one kernel
code.code(),
klib.kernel_path() /* Kernel path: Used in cases of embedded kernels */,
code.build_options().options(),
diff --git a/src/dynamic_fusion/sketch/gpu/IGpuKernelWriter.h b/src/dynamic_fusion/sketch/gpu/IGpuKernelWriter.h
index 28e5432224..1d8b231efd 100644
--- a/src/dynamic_fusion/sketch/gpu/IGpuKernelWriter.h
+++ b/src/dynamic_fusion/sketch/gpu/IGpuKernelWriter.h
@@ -53,7 +53,10 @@ public:
/** Generate kernel code */
virtual std::string get_code() = 0;
/** Generate build options */
- virtual CLBuildOptions get_build_options() = 0;
+ virtual CLBuildOptions get_build_options()
+ {
+ return {};
+ }
/** Generate config id string of the entire kernel. This is used for tuning */
virtual std::string get_config_id() = 0;
/** Generate execution window */
diff --git a/src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwDriver.cpp b/src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwDriver.cpp
index d78956f835..a24a172d77 100644
--- a/src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwDriver.cpp
+++ b/src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwDriver.cpp
@@ -30,7 +30,7 @@
#include "arm_compute/core/Window.h"
#include "src/common/utils/Log.h"
#include "src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwVariableTable.h"
-#include "src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/TypeConverter.h"
+#include "src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/type_converter/Common.h"
#include "src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwKernelWriter.h"
#include "src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwScopedKernelWriter.h"
@@ -43,46 +43,52 @@ namespace experimental
namespace dynamic_fusion
{
GpuCkwDriver::GpuCkwDriver(const GpuKernelComponentGroup &components)
- : _components{ components }, _kernel{ GpuTargetLanguage::OpenCL }
+ : _components{ components }, _kernel{ GpuTargetLanguage::OpenCL }, _code{}
{
-}
-
-std::string GpuCkwDriver::get_name()
-{
- ARM_COMPUTE_LOG_PARAMS(std::string("[V1] TODO"));
- return "unnamed";
-}
+ // Generate kernel name
+ std::string name = "";
+ for(auto &comp : _components)
+ {
+ auto ckw_driver = comp->ckw_component_driver();
+ ARM_COMPUTE_ERROR_ON(ckw_driver == nullptr);
+ name += ckw_driver->get_name(_components) + "__";
+ }
-std::string GpuCkwDriver::get_code()
-{
- _kernel.name(get_name());
+ // Generate kernel code
+ _kernel.name(name);
GpuCkwKernelWriter root_writer(_kernel);
GpuCkwScopedKernelWriter writer(&root_writer);
GpuCkwVariableTable vtable{};
- // Global Kernel Writer Driver code
for(auto &comp : _components)
{
auto ckw_driver = comp->ckw_component_driver();
ARM_COMPUTE_ERROR_ON(ckw_driver == nullptr);
ckw_driver->write_component_code(_components, vtable, writer);
}
+ _code = root_writer.generate_code();
+}
- std::string code = root_writer.generate_code();
-
- return code;
+std::string GpuCkwDriver::get_name()
+{
+ return _kernel.name();
}
-CLBuildOptions GpuCkwDriver::get_build_options()
+std::string GpuCkwDriver::get_code()
{
- ARM_COMPUTE_LOG_PARAMS(std::string("[V1] TO REMOVE"));
- return CLBuildOptions{};
+ return _code;
}
std::string GpuCkwDriver::get_config_id()
{
- ARM_COMPUTE_LOG_PARAMS(std::string("[V1] TODO"));
- return "";
+ std::string id = "";
+ for(auto &comp : _components)
+ {
+ auto ckw_driver = comp->ckw_component_driver();
+ ARM_COMPUTE_ERROR_ON(ckw_driver == nullptr);
+ id = ckw_driver->get_tuner_id(_components) + "__";
+ }
+ return id;
}
Window GpuCkwDriver::get_window() const
diff --git a/src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwDriver.h b/src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwDriver.h
index c6e03f6e03..19db575fea 100644
--- a/src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwDriver.h
+++ b/src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwDriver.h
@@ -37,7 +37,6 @@ namespace arm_compute
{
/** Forward declarations */
class Window;
-class CLBuildOptions;
namespace experimental
{
@@ -62,8 +61,6 @@ public:
std::string get_name() override;
/** Generate kernel code */
std::string get_code() override;
- /** Generate build options */
- CLBuildOptions get_build_options() override;
/** Generate config id string of the entire kernel. This is used for tuning */
std::string get_config_id() override;
/** Generate execution window */
@@ -74,6 +71,7 @@ public:
private:
GpuKernelComponentGroup _components{};
ckw::Kernel _kernel;
+ std::string _code;
};
} // namespace dynamic_fusion
diff --git a/src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwVariableTable.cpp b/src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwVariableTable.cpp
index 6f3eca711d..37c27cd116 100644
--- a/src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwVariableTable.cpp
+++ b/src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwVariableTable.cpp
@@ -27,7 +27,7 @@
#include "src/dynamic_fusion/sketch/gpu/GpuKernelComponentGroup.h"
#include "src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwKernelWriter.h"
#include "src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwScopedKernelWriter.h"
-#include "src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/TypeConverter.h"
+#include "src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/type_converter/Common.h"
#include <sstream>
namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/ckw_driver/IGpuCkwComponentDriver.h b/src/dynamic_fusion/sketch/gpu/ckw_driver/IGpuCkwComponentDriver.h
index 62255f1cf6..14086f785e 100644
--- a/src/dynamic_fusion/sketch/gpu/ckw_driver/IGpuCkwComponentDriver.h
+++ b/src/dynamic_fusion/sketch/gpu/ckw_driver/IGpuCkwComponentDriver.h
@@ -100,6 +100,27 @@ public:
{
return Window{};
}
+ /** Generate the name of the component
+ *
+ * This will be concatenated with other components' names to form the name of the kernel
+ */
+ virtual std::string get_name(const ComponentGroup &comp_group) const
+ {
+ ARM_COMPUTE_UNUSED(comp_group);
+ return "unnamed";
+ }
+ /** Generate the tuner id of the component
+ * This id should capture all the parameters that distinguish one kernel's lws tuning from another.
+ * e.g. two components that are identical in every other way, but have output tensor dimensions should
+ * have different tuner ids, because the lws of one may not be optimal on the other.
+ *
+ * This will be concatenated with other components' tuner id to form the tuner id of the kernel
+ */
+ virtual std::string get_tuner_id(const ComponentGroup &comp_group) const
+ {
+ ARM_COMPUTE_UNUSED(comp_group);
+ return "";
+ }
/** Get component id */
ComponentId id() const
{
diff --git a/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwCast.cpp b/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwCast.cpp
index 8d7e6a8c37..6ecf2bac44 100644
--- a/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwCast.cpp
+++ b/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwCast.cpp
@@ -33,7 +33,7 @@
#include "src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwKernelWriter.h"
#include "src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwScopedKernelWriter.h"
#include "src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwVariableTable.h"
-#include "src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/TypeConverter.h"
+#include "src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/type_converter/Common.h"
#include <string>
using namespace ckw;
diff --git a/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwElementwiseBinary.cpp b/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwElementwiseBinary.cpp
index 15e32e26d5..c8bf999261 100644
--- a/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwElementwiseBinary.cpp
+++ b/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwElementwiseBinary.cpp
@@ -25,6 +25,7 @@
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/utils/StringUtils.h"
#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
#include "ckw/TensorTileSampler.h"
#include "ckw/types/TensorSamplerTypes.h"
@@ -35,6 +36,11 @@
#include "src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwScopedKernelWriter.h"
#include "src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwVariableTable.h"
#include "src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/WriterHelper.h"
+#include "src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/type_converter/Common.h"
+#include "src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/type_converter/ElementwiseBinary.h"
+#include "src/dynamic_fusion/sketch/gpu/components/utils/type_printer/ElementwiseBinary.h"
+#include "support/StringSupport.h"
+#include <algorithm>
#include <string>
using namespace ckw;
@@ -44,57 +50,15 @@ namespace experimental
{
namespace dynamic_fusion
{
-namespace
-{
-/** Create a simple sampler from tile of dimension [m0, n0]
- */
-inline TensorTileSampler create_simple_sampler(GpuCkwScopedKernelWriter &writer, int32_t m0, int32_t n0)
-{
- TensorTileSampler sampler;
-
- auto &gid_0 = writer->declare_tile("gid_0", ckw::DataType::Int32);
- auto &gid_1 = writer->declare_tile("gid_1", ckw::DataType::Int32);
- auto &gid_2 = writer->declare_tile("gid_2", ckw::DataType::Int32);
-
- writer->op_get_global_id(gid_0, 0);
- writer->op_get_global_id(gid_1, 1);
- writer->op_get_global_id(gid_2, 2);
-
- auto &x_coord = writer->declare_tile("x_coord", ckw::DataType::Int32);
- auto &y_coord = writer->declare_tile("y_coord", ckw::DataType::Int32);
- auto &m0_t = writer->declare_tile("m0", m0);
- auto &n0_t = writer->declare_tile("n0", n0);
- writer->op_binary_expression(x_coord, gid_0, ckw::BinaryOp::Mul, n0_t);
- writer->op_binary_expression(y_coord, gid_1, ckw::BinaryOp::Mul, m0_t);
-
- sampler.x(x_coord);
- sampler.y(y_coord);
- auto &const_0 = writer->declare_tile("0", 0);
- sampler.z(const_0); // 3rd dimension collapsed with 2nd dimension
- sampler.b(gid_2);
-
- sampler.width(n0);
- sampler.height(m0);
-
- sampler.format(TensorSamplerFormat::C_WH_1); // 3rd dimension collapsed with 2nd dimension
- sampler.address_mode_x(TensorSamplerAddressModeX::None);
- sampler.address_mode_y(TensorSamplerAddressModeY::ClampToBorder);
- sampler.address_mode_z(TensorSamplerAddressModeZ::Skip); // Dimensions higher than 3 not supported yet
-
- return sampler;
-}
-} // namespace
-
GpuCkwElementwiseBinary::GpuCkwElementwiseBinary(ComponentId id,
const ArgumentPack<ITensorInfo> &tensors,
const Attributes &attributes)
: IGpuCkwComponentDriver{ id, tensors },
_lhs{},
_rhs{},
- _dst{}
+ _dst{},
+ _attributes{ attributes }
{
- ARM_COMPUTE_UNUSED(attributes);
-
_lhs = this->tensors().get_const_tensor(TensorType::ACL_SRC_0);
_rhs = this->tensors().get_const_tensor(TensorType::ACL_SRC_1);
_dst = this->tensors().get_const_tensor(TensorType::ACL_DST_0);
@@ -103,32 +67,60 @@ GpuCkwElementwiseBinary::GpuCkwElementwiseBinary(ComponentId
void GpuCkwElementwiseBinary::write_component_code(const ComponentGroup &comp_group, GpuCkwVariableTable &vtable, GpuCkwScopedKernelWriter writer) const
{
- const auto root_window = comp_group.get_root_component()->ckw_component_driver()->get_window();
- const unsigned int n0 = root_window.x().step();
- const unsigned int m0 = root_window.y().step();
+ const auto root_window = comp_group.get_root_component()->ckw_component_driver()->get_window();
+ const auto n0 = static_cast<int32_t>(root_window.x().step());
+ const auto m0 = static_cast<int32_t>(root_window.y().step());
GpuCkwComponentArgument *lhs = vtable.declare_variable(comp_group, writer, _lhs, TensorStorageType::ClBufferUint8Ptr, "lhs");
GpuCkwComponentArgument *rhs = vtable.declare_variable(comp_group, writer, _rhs, TensorStorageType::ClBufferUint8Ptr, "rhs");
GpuCkwComponentArgument *dst = vtable.declare_variable(comp_group, writer, _dst, TensorStorageType::ClBufferUint8Ptr, "dst");
- // Load the LHS and RHS tiles and prepare the tensor sampler.
- load_lhs_rhs_tiles_and_prepare_sampler(writer, lhs, rhs, m0, n0, create_simple_sampler);
+ auto &gid_0 = writer->declare_tile("gid_0", ckw::DataType::Int32);
+ auto &gid_1 = writer->declare_tile("gid_1", ckw::DataType::Int32);
+ auto &gid_2 = writer->declare_tile("gid_2", ckw::DataType::Int32);
+
+ writer->op_get_global_id(gid_0, 0);
+ writer->op_get_global_id(gid_1, 1);
+ writer->op_get_global_id(gid_2, 2);
+
+ auto &const_0 = writer->declare_tile("0", 0);
+
+ // Load the LHS and RHS tiles
+ if(!lhs->has_tile())
+ {
+ auto sampler = create_boundary_aware_2d_sampler(writer, gid_0, gid_1, _lhs->dimension(0), _lhs->dimension(1), n0, m0, "lhs_", const_0);
+ sampler.format(TensorSamplerFormat::C_WH_1); // 3rd dimension collapsed with 2nd dimension
+ sampler.z(const_0);
+ sampler.b(gid_2);
+ writer->op_load_once(lhs, sampler);
+ }
+ if(!rhs->has_tile())
+ {
+ auto sampler = create_boundary_aware_2d_sampler(writer, gid_0, gid_1, _rhs->dimension(0), _rhs->dimension(1), n0, m0, "rhs_", const_0);
+ sampler.format(TensorSamplerFormat::C_WH_1); // 3rd dimension collapsed with 2nd dimension
+ sampler.z(const_0);
+ sampler.b(gid_2);
+ writer->op_load_once(rhs, sampler);
+ }
- auto &lhs_tile = lhs->tile();
- auto &rhs_tile = rhs->tile();
- const auto &sampler = lhs->tile_sampler();
+ auto dst_sampler = create_boundary_aware_2d_sampler(writer, gid_0, gid_1, _dst->dimension(0), _dst->dimension(1), n0, m0, "dst_", const_0);
+ dst_sampler.format(TensorSamplerFormat::C_WH_1); // 3rd dimension collapsed with 2nd dimension
+ dst_sampler.z(const_0);
+ dst_sampler.b(gid_2);
// Prepare the output tile.
if(!dst->has_tile())
{
- auto &tile = writer->declare_tile("dst_tile", lhs_tile.tile_info());
- dst->init_virtual_tensor(tile, sampler);
+ auto &tile = writer->declare_tile("dst_tile", ckw::TileInfo(to_ckw(_dst->data_type()), dst_sampler.height(), dst_sampler.width()));
+ dst->init_virtual_tensor(tile, dst_sampler);
}
+ auto &lhs_tile = lhs->tile();
+ auto &rhs_tile = rhs->tile();
auto &dst_tile = dst->tile();
// Perform the operation.
- writer->op_binary_expression(dst_tile, lhs_tile, BinaryOp::Add, rhs_tile);
+ writer->op_binary_expression(dst_tile, lhs_tile, to_ckw(_attributes), rhs_tile);
}
Window GpuCkwElementwiseBinary::get_window() const
@@ -146,6 +138,32 @@ Window GpuCkwElementwiseBinary::get_window() const
return win;
}
+std::string GpuCkwElementwiseBinary::get_name(const ComponentGroup &comp_group) const
+{
+ ARM_COMPUTE_UNUSED(comp_group);
+ const std::vector<std::string> build_params =
+ {
+ "elementwise_binary",
+ "op", to_string(_attributes.operation()),
+ "dt", lower_string(string_from_data_type(_dst->data_type())),
+ };
+ return join(build_params, "_");
+}
+
+std::string GpuCkwElementwiseBinary::get_tuner_id(const ComponentGroup &comp_group) const
+{
+ ARM_COMPUTE_UNUSED(comp_group);
+ /// NOTE: Hardcoded for now, the parameters should ideally be exported by ckw (a selection of constant tiles)
+ std::vector<std::string> build_params =
+ {
+ "elementwise_binary",
+ "op", to_string(_attributes.operation()),
+ "dt", lower_string(string_from_data_type(_dst->data_type())),
+ "dst_dim0", support::cpp11::to_string(_dst->dimension(0)),
+ "dst_dim1", support::cpp11::to_string(_dst->dimension(1)),
+ };
+ return join(build_params, "_");
+}
} // namespace dynamic_fusion
} // namespace experimental
} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwElementwiseBinary.h b/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwElementwiseBinary.h
index 963b92baf9..e9c41530f8 100644
--- a/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwElementwiseBinary.h
+++ b/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwElementwiseBinary.h
@@ -54,12 +54,15 @@ public:
~GpuCkwElementwiseBinary() override = default;
// Inherited methods overriden:
virtual void write_component_code(const ComponentGroup &comp_group, GpuCkwVariableTable &vtable, GpuCkwScopedKernelWriter writer) const override;
- Window get_window() const override;
+ Window get_window() const override;
+ std::string get_name(const ComponentGroup &comp_group) const override;
+ std::string get_tuner_id(const ComponentGroup &comp_group) const override;
private:
const ITensorInfo *_lhs;
const ITensorInfo *_rhs;
const ITensorInfo *_dst;
+ Attributes _attributes;
};
} // namespace dynamic_fusion
} // namespace experimental
diff --git a/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwStore.cpp b/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwStore.cpp
index 247d1b834f..8917391537 100644
--- a/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwStore.cpp
+++ b/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwStore.cpp
@@ -53,6 +53,12 @@ void GpuCkwStore::write_component_code(const ComponentGroup &comp_group, GpuCkwV
writer->op_store(dst_tensor, src_tile, sampler);
}
+
+std::string GpuCkwStore::get_name(const ComponentGroup &comp_group) const
+{
+ ARM_COMPUTE_UNUSED(comp_group);
+ return "store";
+}
} // namespace dynamic_fusion
} // namespace experimental
} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwStore.h b/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwStore.h
index 5728ff9f49..8e35651caf 100644
--- a/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwStore.h
+++ b/src/dynamic_fusion/sketch/gpu/ckw_driver/components/GpuCkwStore.h
@@ -49,6 +49,7 @@ public:
~GpuCkwStore() override = default;
// Inherited methods overriden:
virtual void write_component_code(const ComponentGroup &comp_group, GpuCkwVariableTable &vtable, GpuCkwScopedKernelWriter writer) const override;
+ std::string get_name(const ComponentGroup &comp_group) const override;
private:
const ITensorInfo *_src;
diff --git a/src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/WriterHelper.h b/src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/WriterHelper.h
index 46c0f4ed8c..f4a056c5a0 100644
--- a/src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/WriterHelper.h
+++ b/src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/WriterHelper.h
@@ -24,10 +24,12 @@
#ifndef ACL_SRC_DYNAMIC_FUSION_SKETCH_GPU_CKW_DRIVER_COMPONENTS_UTILS_WRITERHELPER
#define ACL_SRC_DYNAMIC_FUSION_SKETCH_GPU_CKW_DRIVER_COMPONENTS_UTILS_WRITERHELPER
+#include "arm_compute/core/utils/misc/Utility.h"
+#include "ckw/TensorTileSampler.h"
#include "src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwComponentArgument.h"
#include "src/dynamic_fusion/sketch/gpu/ckw_driver/GpuCkwScopedKernelWriter.h"
-#include "ckw/TensorTileSampler.h"
+#include <algorithm>
#include <functional>
using namespace ckw;
@@ -39,29 +41,6 @@ namespace dynamic_fusion
{
using SamplerCreator = std::function<TensorTileSampler(GpuCkwScopedKernelWriter &, int32_t /* m0 */, int32_t /* n0 */)>;
-/** Load lhs and rhs tiles of dimension [m0, n0] only when not loaded and prepare the sampler
- */
-inline void load_lhs_rhs_tiles_and_prepare_sampler(GpuCkwScopedKernelWriter &writer, GpuCkwComponentArgument *lhs, GpuCkwComponentArgument *rhs, int32_t m0, int32_t n0, SamplerCreator create_sampler)
-{
- if(!lhs->has_tile() && !rhs->has_tile())
- {
- const auto sampler = create_sampler(writer, m0, n0);
-
- writer->op_load_once(lhs, sampler);
- writer->op_load_once(rhs, sampler);
- }
- else if(lhs->has_tile())
- {
- const auto &sampler = lhs->tile_sampler();
- writer->op_load_once(rhs, sampler);
- }
- else
- {
- const auto &sampler = rhs->tile_sampler();
- writer->op_load_once(lhs, sampler);
- }
-}
-
/** Load src and dst tiles of dimension [m0, n0] only when not loaded and prepare the sampler
*/
inline void load_src_dst_tiles_and_prepare_sampler(GpuCkwScopedKernelWriter &writer, GpuCkwComponentArgument *src, GpuCkwComponentArgument *dst, int32_t m0, int32_t n0, SamplerCreator create_sampler)
@@ -88,6 +67,94 @@ inline void load_src_dst_tiles_and_prepare_sampler(GpuCkwScopedKernelWriter &wri
}
}
+/** Get boundary aware coordinate along one axis. Load and store of size step_v at the coordinate will not be out of bound
+ *
+ * @param[in,out] writer Writer
+ * @param[out] coord Resultant coordinate
+ * @param[in] gid Global work item id
+ * @param[in] step_v Step size / vector size
+ * @param[in] leftover_step_v Leftover step size at the boundary
+ * @param[in] prefix Prefix to all the tiles declared within this function
+ * @param[in] const_0 Constant tile of value 0
+ */
+inline void get_coord(GpuCkwScopedKernelWriter writer, TileOperand &coord, TileOperand &gid, int32_t step_v, int32_t leftover_step_v, const std::string &prefix, TileOperand &const_0)
+{
+ auto &step = writer->declare_tile(prefix + "step", step_v);
+ auto &leftover_step = writer->declare_tile(prefix + "leftover_step", leftover_step_v);
+
+ // step - leftover_step
+ auto &step_minus_leftover = writer->declare_tile(prefix + "step_minus_leftover", ckw::DataType::Int32);
+ writer->op_binary_expression(step_minus_leftover, step, ckw::BinaryOp::Sub, leftover_step);
+
+ // (step - leftover_step) % step
+ auto &coord_correction = writer->declare_tile(prefix + "coord_correction", ckw::DataType::Int32);
+ writer->op_binary_expression(coord_correction, step_minus_leftover, ckw::BinaryOp::Mod, step);
+
+ // (gid * step)
+ auto &raw_coord = writer->declare_tile(prefix + "raw_coord", ckw::DataType::Int32);
+ writer->op_binary_expression(raw_coord, gid, ckw::BinaryOp::Mul, step);
+
+ // (gid * step) - (step - leftover_step) % step
+ auto &corrected_coord = writer->declare_tile(prefix + "corrected_coord", ckw::DataType::Int32);
+ writer->op_binary_expression(corrected_coord, raw_coord, ckw::BinaryOp::Sub, coord_correction);
+
+ // max((gid * step) - (step - leftover_step) % step, 0)
+ writer->op_binary_elementwise_function(coord, ckw::BinaryFunction::Max, corrected_coord, const_0);
+}
+
+/** Declare coordinate tiles "{prefix}_dim0_coord" and "{prefix}_dim1_coord", and create a boundary-aware sampler from tile of size [n0, m0], against the overall dimensions [dim0, dim1]
+ * The load and store of tile [n0, m0] will never be out of bound of [dim0, dim1]
+ */
+
+/** Declare coordinate tiles "{prefix}_dim0_coord" and "{prefix}_dim1_coord", and create a boundary-aware sampler from tile of size [n0, m0], against the overall dimensions [dim0, dim1]
+ * The load and store of tile [n0, m0] will never be out of bound of [dim0, dim1]
+ *
+ * @param[in,out] writer Writer
+ * @param[in] gid_0 Global work item id 0
+ * @param[in] gid_1 Global work item id 1
+ * @param[in] dim0_v Dimension 0
+ * @param[in] dim1_v Dimension 1
+ * @param[in] n0_v Tile size dimension 0
+ * @param[in] m0_v Tile size dimension 1
+ * @param[in] prefix Prefix to all the tiles declared within this function
+ * @param[in] const_0 Constant tile of value 0
+ *
+ * @return TensorTileSampler
+ */
+inline TensorTileSampler create_boundary_aware_2d_sampler(GpuCkwScopedKernelWriter writer, TileOperand &gid_0, TileOperand &gid_1, int32_t dim0_v, int32_t dim1_v, int32_t n0_v, int32_t m0_v,
+ const std::string prefix, TileOperand &const_0)
+{
+ // Clamp tile size [n0, m0] against dimension [dim0, dim1]
+ // This is needed to:
+ // * Guard against tile sizes are bigger than the tensor dimensions
+ // * Handle broadcasting tiles (e.g. src tensor is of size 1 in one of the dimensions)
+ n0_v = utility::clamp(n0_v, 1, dim0_v);
+ m0_v = utility::clamp(m0_v, 1, dim1_v);
+ const int32_t partial_n0_v = dim0_v % n0_v;
+ const int32_t partial_m0_v = dim1_v % m0_v;
+
+ // Declare #prefix_dim0_coord and #prefix_dim1_coord
+ auto &dim0_coord = writer->declare_tile(prefix + "dim0_coord", ckw::DataType::Int32);
+ get_coord(writer, dim0_coord, gid_0, n0_v, partial_n0_v, prefix + "dim0_", const_0);
+ auto &dim1_coord = writer->declare_tile(prefix + "dim1_coord", ckw::DataType::Int32);
+ get_coord(writer, dim1_coord, gid_1, m0_v, partial_m0_v, prefix + "dim1_", const_0);
+
+ // Set sampler
+ // Only set fields related to boundary aware loading/storing. Other info (e.g. format) is not responsibility of this function
+ TensorTileSampler sampler;
+
+ sampler.x(dim0_coord);
+ sampler.y(dim1_coord);
+
+ sampler.width(n0_v);
+ sampler.height(m0_v);
+
+ sampler.address_mode_x(TensorSamplerAddressModeX::None);
+ sampler.address_mode_y(TensorSamplerAddressModeY::None);
+ sampler.address_mode_z(TensorSamplerAddressModeZ::None);
+
+ return sampler;
+}
} // namespace dynamic_fusion
} // namespace experimental
} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/TypeConverter.h b/src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/type_converter/Common.h
index 8a38d67d80..34b1283add 100644
--- a/src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/TypeConverter.h
+++ b/src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/type_converter/Common.h
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ACL_SRC_DYNAMIC_FUSION_SKETCH_GPU_CKW_DRIVER_COMPONENTS_UTILS_TYPECONVERTER
-#define ACL_SRC_DYNAMIC_FUSION_SKETCH_GPU_CKW_DRIVER_COMPONENTS_UTILS_TYPECONVERTER
+#ifndef ACL_SRC_DYNAMIC_FUSION_SKETCH_GPU_CKW_DRIVER_COMPONENTS_UTILS_TYPE_CONVERTER_COMMON
+#define ACL_SRC_DYNAMIC_FUSION_SKETCH_GPU_CKW_DRIVER_COMPONENTS_UTILS_TYPE_CONVERTER_COMMON
#include "arm_compute/core/ITensorInfo.h"
#include "arm_compute/core/TensorShape.h"
@@ -106,46 +106,32 @@ inline TensorComponentType from_ckw(const ckw::TensorComponentType &component)
{
case ckw::TensorComponentType::OffsetFirstElement:
return TensorComponentType::OffsetFirstElement;
- break;
case ckw::TensorComponentType::Stride0:
return TensorComponentType::Stride0;
- break;
case ckw::TensorComponentType::Stride1:
return TensorComponentType::Stride1;
- break;
case ckw::TensorComponentType::Stride2:
return TensorComponentType::Stride2;
- break;
case ckw::TensorComponentType::Stride3:
return TensorComponentType::Stride3;
- break;
case ckw::TensorComponentType::Stride4:
return TensorComponentType::Stride4;
- break;
case ckw::TensorComponentType::Dim0:
return TensorComponentType::Dim0;
- break;
case ckw::TensorComponentType::Dim1:
return TensorComponentType::Dim1;
- break;
case ckw::TensorComponentType::Dim2:
return TensorComponentType::Dim2;
- break;
case ckw::TensorComponentType::Dim3:
return TensorComponentType::Dim3;
- break;
case ckw::TensorComponentType::Dim4:
return TensorComponentType::Dim4;
- break;
case ckw::TensorComponentType::Dim1xDim2:
return TensorComponentType::Dim1xDim2;
- break;
case ckw::TensorComponentType::Dim2xDim3:
return TensorComponentType::Dim2xDim3;
- break;
case ckw::TensorComponentType::Dim1xDim2xDim3:
return TensorComponentType::Dim1xDim2xDim3;
- break;
case ckw::TensorComponentType::Unknown:
return TensorComponentType::Unknown;
default:
@@ -160,16 +146,12 @@ inline ckw::TensorStorageType to_ckw(const TensorStorageType &storage)
{
case TensorStorageType::ClBufferUint8Ptr:
return ckw::TensorStorageType::BufferUint8Ptr;
- break;
case TensorStorageType::ClImage2dReadOnly:
return ckw::TensorStorageType::Texture2dReadOnly;
- break;
case TensorStorageType::ClImage2dWriteOnly:
return ckw::TensorStorageType::Texture2dWriteOnly;
- break;
case TensorStorageType::Unknown:
return ckw::TensorStorageType::Unknown;
- break;
default:
ARM_COMPUTE_ERROR("Unknown tensor storage type");
return ckw::TensorStorageType::Unknown;
@@ -181,16 +163,12 @@ inline TensorStorageType from_ckw(const ckw::TensorStorageType &storage)
{
case ckw::TensorStorageType::BufferUint8Ptr:
return TensorStorageType::ClBufferUint8Ptr;
- break;
case ckw::TensorStorageType::Texture2dReadOnly:
return TensorStorageType::ClImage2dReadOnly;
- break;
case ckw::TensorStorageType::Texture2dWriteOnly:
return TensorStorageType::ClImage2dWriteOnly;
- break;
case ckw::TensorStorageType::Unknown:
return TensorStorageType::Unknown;
- break;
default:
ARM_COMPUTE_ERROR("Unknown CKW tensor storage type");
return TensorStorageType::Unknown;
@@ -199,4 +177,4 @@ inline TensorStorageType from_ckw(const ckw::TensorStorageType &storage)
} // namespace dynamic_fusion
} // namespace experimental
} // namespace arm_compute
-#endif /* ACL_SRC_DYNAMIC_FUSION_SKETCH_GPU_CKW_DRIVER_COMPONENTS_UTILS_TYPECONVERTER */
+#endif /* ACL_SRC_DYNAMIC_FUSION_SKETCH_GPU_CKW_DRIVER_COMPONENTS_UTILS_TYPE_CONVERTER_COMMON */
diff --git a/src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/type_converter/ElementwiseBinary.h b/src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/type_converter/ElementwiseBinary.h
new file mode 100644
index 0000000000..9cb022fc10
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/ckw_driver/components/utils/type_converter/ElementwiseBinary.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_SRC_DYNAMIC_FUSION_SKETCH_GPU_CKW_DRIVER_COMPONENTS_UTILS_TYPE_CONVERTER_ELEMENTWISEBINARY
+#define ACL_SRC_DYNAMIC_FUSION_SKETCH_GPU_CKW_DRIVER_COMPONENTS_UTILS_TYPE_CONVERTER_ELEMENTWISEBINARY
+
+#include "ckw/types/Operators.h"
+#include "src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+inline ckw::BinaryOp to_ckw(const ElementwiseBinaryCommonAttributes &attributes)
+{
+ switch(attributes.operation())
+ {
+ case ElementwiseBinaryCommonAttributes::ElementwiseOp::Add:
+ return ckw::BinaryOp::Add;
+ case ElementwiseBinaryCommonAttributes::ElementwiseOp::Sub:
+ return ckw::BinaryOp::Sub;
+ case ElementwiseBinaryCommonAttributes::ElementwiseOp::Div:
+ return ckw::BinaryOp::Div;
+ case ElementwiseBinaryCommonAttributes::ElementwiseOp::Mul:
+ return ckw::BinaryOp::Mul;
+ case ElementwiseBinaryCommonAttributes::ElementwiseOp::Min:
+ case ElementwiseBinaryCommonAttributes::ElementwiseOp::Max:
+ case ElementwiseBinaryCommonAttributes::ElementwiseOp::Power:
+ case ElementwiseBinaryCommonAttributes::ElementwiseOp::Prelu:
+ case ElementwiseBinaryCommonAttributes::ElementwiseOp::SquaredDiff:
+ default:
+ ARM_COMPUTE_ERROR("Cannot convert ElementwiseBinaryCommonAttributes to corresponding ckw::BinaryOp");
+ }
+}
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif /* ACL_SRC_DYNAMIC_FUSION_SKETCH_GPU_CKW_DRIVER_COMPONENTS_UTILS_TYPE_CONVERTER_ELEMENTWISEBINARY */
diff --git a/src/dynamic_fusion/sketch/gpu/components/utils/type_printer/ElementwiseBinary.h b/src/dynamic_fusion/sketch/gpu/components/utils/type_printer/ElementwiseBinary.h
new file mode 100644
index 0000000000..bc7133f4df
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/components/utils/type_printer/ElementwiseBinary.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_SRC_DYNAMIC_FUSION_SKETCH_GPU_COMPONENTS_UTILS_TYPE_PRINTER_ELEMENTWISEBINARY
+#define ACL_SRC_DYNAMIC_FUSION_SKETCH_GPU_COMPONENTS_UTILS_TYPE_PRINTER_ELEMENTWISEBINARY
+
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentElementwiseBinary.h"
+
+#include <ostream>
+#include <sstream>
+#include <string>
+
+namespace arm_compute
+{
+/** Type printers for all types related to the component @ref ClComponentElementwiseBinary
+ */
+
+using namespace experimental::dynamic_fusion;
+
+/** Formatted output of the pute::experimental::dynamic_fusion::ClComponentElementwiseBinary::Attributes::ElementwiseOp type.
+ *
+ * @param[out] os Output stream.
+ * @param[in] op arm_compute::experimental::dynamic_fusion::ClComponentElementwiseBinary::Attributes::ElementwiseOp type to output.
+ *
+ * @return Modified output stream.
+ */
+inline ::std::ostream &operator<<(::std::ostream &os, const ClComponentElementwiseBinary::Attributes::ElementwiseOp &op)
+{
+ const std::map<ClComponentElementwiseBinary::Attributes::ElementwiseOp, std::string> op_name =
+ {
+ { ClComponentElementwiseBinary::Attributes::ElementwiseOp::Add, "add" },
+ { ClComponentElementwiseBinary::Attributes::ElementwiseOp::Div, "div" },
+ { ClComponentElementwiseBinary::Attributes::ElementwiseOp::Max, "max" },
+ { ClComponentElementwiseBinary::Attributes::ElementwiseOp::Min, "min" },
+ { ClComponentElementwiseBinary::Attributes::ElementwiseOp::Mul, "mul" },
+ { ClComponentElementwiseBinary::Attributes::ElementwiseOp::Power, "power" },
+ { ClComponentElementwiseBinary::Attributes::ElementwiseOp::Prelu, "prelu" },
+ { ClComponentElementwiseBinary::Attributes::ElementwiseOp::SquaredDiff, "squareddiff" },
+ { ClComponentElementwiseBinary::Attributes::ElementwiseOp::Sub, "sub" }
+ };
+ os << op_name.at(op);
+ return os;
+}
+/** Formatted output of the arm_compute::experimental::dynamic_fusion::ClComponentElementwiseBinary::Attributes::ElementwiseOp type.
+ *
+ * @param[in] op arm_compute::experimental::dynamic_fusion::ClComponentElementwiseBinary::Attributes::ElementwiseOp type to output.
+ *
+ * @return Formatted string.
+ */
+inline std::string to_string(const ClComponentElementwiseBinary::Attributes::ElementwiseOp &op)
+{
+ std::stringstream str;
+ str << op;
+ return str.str();
+}
+} // namespace arm_compute
+#endif /* ACL_SRC_DYNAMIC_FUSION_SKETCH_GPU_COMPONENTS_UTILS_TYPE_PRINTER_ELEMENTWISEBINARY */
diff --git a/tests/validation/dynamic_fusion/gpu/Integration.cpp b/tests/validation/dynamic_fusion/gpu/Integration.cpp
index 3a915779c1..89cca5cd66 100644
--- a/tests/validation/dynamic_fusion/gpu/Integration.cpp
+++ b/tests/validation/dynamic_fusion/gpu/Integration.cpp
@@ -63,6 +63,7 @@ namespace validation
TEST_SUITE(CL)
TEST_SUITE(INTEGRATION)
TEST_SUITE(DYNAMIC_FUSION)
+#ifndef ACL_INTERNAL_TEST_CKW_IN_DF // Conv2d is not ported to ckw yet. COMPMID-6259
TEST_CASE(Conv2d, framework::DatasetMode::ALL)
{
/* Computation:
@@ -152,6 +153,7 @@ TEST_CASE(Conv2d, framework::DatasetMode::ALL)
RelativeTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */
validate(CLAccessor(t_dst), ref_t_dst_nchw, tolerance_f32);
}
+#endif // ACL_INTERNAL_TEST_CKW_IN_DF
TEST_CASE(Add_Output_Add_Output, framework::DatasetMode::ALL)
{
/* Computation:
@@ -358,6 +360,7 @@ TEST_CASE(Add_Output_Add_Cast_Cast_Output, framework::DatasetMode::ALL)
validate(CLAccessor(t_out_1), ref_t_out_1, tolerance_cast_f32);
}
+#ifndef ACL_INTERNAL_TEST_CKW_IN_DF // Conv2d is not ported to ckw yet. COMPMID-6259
TEST_CASE(Conv2d_Sigmoid_DepthwiseConv2d_Mul, framework::DatasetMode::ALL)
{
// (tensor0)
@@ -422,7 +425,7 @@ TEST_CASE(Conv2d_Sigmoid_DepthwiseConv2d_Mul, framework::DatasetMode::ALL)
// Initialize the context.
CLScheduler::get().default_reinit();
- auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+ auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
GpuWorkloadContext context(&cl_compile_ctx);
auto tensor0_info = context.create_tensor_info(conv2d_src_shape, 1, DataType::F32, DataLayout::NHWC);
@@ -431,8 +434,8 @@ TEST_CASE(Conv2d_Sigmoid_DepthwiseConv2d_Mul, framework::DatasetMode::ALL)
GpuWorkloadSketch sketch0(&context);
Conv2dAttributes conv2d_attr;
- auto tensor1_info = context.create_tensor_info(conv2d_wei_shape, 1, DataType::F32, DataLayout::NHWC);
- auto tensor2_info = context.create_tensor_info(conv2d_bia_shape, 1, DataType::F32, DataLayout::NHWC);
+ auto tensor1_info = context.create_tensor_info(conv2d_wei_shape, 1, DataType::F32, DataLayout::NHWC);
+ auto tensor2_info = context.create_tensor_info(conv2d_bia_shape, 1, DataType::F32, DataLayout::NHWC);
ARM_COMPUTE_EXPECT(GpuConv2d::validate_op(sketch0, &tensor0_info, &tensor1_info, &tensor2_info, conv2d_attr), framework::LogLevel::ERRORS);
auto ans_info = GpuConv2d::create_op(sketch0, &tensor0_info, &tensor1_info, &tensor2_info, conv2d_attr);
@@ -440,8 +443,8 @@ TEST_CASE(Conv2d_Sigmoid_DepthwiseConv2d_Mul, framework::DatasetMode::ALL)
ans_info = GpuSigmoid::create_op(sketch0, ans_info);
DepthwiseConv2dAttributes dwc_attr;
- auto tensor3_info = context.create_tensor_info(dwc_wei_shape, 1, DataType::F32, DataLayout::NHWC);
- auto tensor4_info = context.create_tensor_info(dwc_bia_shape, 1, DataType::F32, DataLayout::NHWC);
+ auto tensor3_info = context.create_tensor_info(dwc_wei_shape, 1, DataType::F32, DataLayout::NHWC);
+ auto tensor4_info = context.create_tensor_info(dwc_bia_shape, 1, DataType::F32, DataLayout::NHWC);
ARM_COMPUTE_EXPECT(!GpuDepthwiseConv2d::validate_op(sketch0, ans_info, &tensor3_info, &tensor4_info, dwc_attr), framework::LogLevel::ERRORS);
auto tensor5_info = context.create_tensor_info();
@@ -497,9 +500,9 @@ TEST_CASE(Conv2d_Sigmoid_DepthwiseConv2d_Mul, framework::DatasetMode::ALL)
// Allocate the auxiliary tensors.
for(auto &data : runtime0.get_auxiliary_tensors())
{
- auto tensor = std::get<0>(data);
+ auto tensor = std::get<0>(data);
auto &tensor_info = std::get<1>(data);
- auto mem_req = std::get<2>(data);
+ auto mem_req = std::get<2>(data);
tensor->allocator()->init(tensor_info, mem_req.alignment);
tensor->allocator()->allocate();
@@ -507,9 +510,9 @@ TEST_CASE(Conv2d_Sigmoid_DepthwiseConv2d_Mul, framework::DatasetMode::ALL)
for(auto &data : runtime1.get_auxiliary_tensors())
{
- auto tensor = std::get<0>(data);
+ auto tensor = std::get<0>(data);
auto &tensor_info = std::get<1>(data);
- auto mem_req = std::get<2>(data);
+ auto mem_req = std::get<2>(data);
tensor->allocator()->init(tensor_info, mem_req.alignment);
tensor->allocator()->allocate();
@@ -556,11 +559,13 @@ TEST_CASE(Conv2d_Sigmoid_DepthwiseConv2d_Mul, framework::DatasetMode::ALL)
const auto ref_dwc_bia_nchw = reference::permute(ref_dwc_bia, nhwc_to_nchw);
const auto ref_dwc_dst_nchw = reference::depthwise_convolution(ref_sigmoid_dst_nchw, ref_dwc_wei_nchw, ref_dwc_bia_nchw, dwc_dst_shape_nchw, PadStrideInfo(), 1);
- const auto ref_mul_dst_nchw = reference::pixel_wise_multiplication<float, float, float>(ref_dwc_dst_nchw, ref_conv2d_bia_nchw, 1.0, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_UP, DataType::F32);
+ const auto ref_mul_dst_nchw = reference::pixel_wise_multiplication<float, float, float>(ref_dwc_dst_nchw, ref_conv2d_bia_nchw, 1.0, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_UP,
+ DataType::F32);
constexpr RelativeTolerance<float> tolerance(0.001f);
validate(CLAccessor(tensor6), ref_mul_dst_nchw, tolerance);
}
+#endif // ACL_INTERNAL_TEST_CKW_IN_DF
TEST_SUITE(Invalid_Fusion_Should_Fail)
TEST_CASE(Multiple_Complex_Ops_0, framework::DatasetMode::ALL)