aboutsummaryrefslogtreecommitdiff
path: root/compute_kernel_writer
diff options
context:
space:
mode:
authorGunes Bayir <gunes.bayir@arm.com>2023-08-17 11:04:02 +0100
committerGunes Bayir <gunes.bayir@arm.com>2023-08-18 15:34:49 +0000
commit47a396e3aae96f2dcad44f4e0d6cb6b87b368395 (patch)
tree454123cb8c47a90d7baf57b0f296c2baa18615de /compute_kernel_writer
parent580ecd750ed76c72d59a8b8d23566686e6aa9c7b (diff)
downloadComputeLibrary-47a396e3aae96f2dcad44f4e0d6cb6b87b368395.tar.gz
Implement load/store API functions
Add KernelWriter API functions for loading and storing tiles with and without dilations. Resolves: COMPMID-5791, COMPMID-6389 Change-Id: I9b1f5b2f081fa54e7bda488aac69ed8d43d1d35c Signed-off-by: Gunes Bayir <gunes.bayir@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10152 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Jakub Sujak <jakub.sujak@arm.com> Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'compute_kernel_writer')
-rw-r--r--compute_kernel_writer/include/ckw/KernelWriter.h41
-rw-r--r--compute_kernel_writer/src/cl/CLKernelWriter.cpp126
-rw-r--r--compute_kernel_writer/src/cl/CLKernelWriter.h55
-rw-r--r--compute_kernel_writer/validation/Validation.cpp17
-rw-r--r--compute_kernel_writer/validation/tests/CLKernelWriterOpLoadStoreTest.h313
5 files changed, 544 insertions, 8 deletions
diff --git a/compute_kernel_writer/include/ckw/KernelWriter.h b/compute_kernel_writer/include/ckw/KernelWriter.h
index 2a347e9ae0..f77798e2ab 100644
--- a/compute_kernel_writer/include/ckw/KernelWriter.h
+++ b/compute_kernel_writer/include/ckw/KernelWriter.h
@@ -38,7 +38,9 @@ class Kernel;
/** Forward Declerations */
class TensorInfo;
+class TensorSampler;
class TileInfo;
+
enum class TargetArchitecture;
enum class TargetLanguage;
@@ -128,6 +130,45 @@ public:
*/
virtual void op_write_raw_code(const std::string &raw_code) = 0;
+ /** Load the data from the tensor memory to the tile using the sampling information.
+ *
+ * @param[in] tile_op The tile to be loaded.
+ * @param[in] tensor_op The tensor to be read.
+ * @param[in] sampler The tensor sampling information.
+ * @param[in] x x-coordinate
+ * @param[in] y y-coordinate
+ * @param[in] z z-coordinate
+ * @param[in] batch batch offset
+ */
+ virtual void op_load(const TileOperand &tile_op, const TensorOperand &tensor_op, TensorSampler &sampler,
+ const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch) = 0;
+
+ /** Load the data from the tensor memory to the tile in a dilated way using the sampling information.
+ *
+ * Similar to @ref KernelWriter::op_load() and
+ *
+ * @param[in] dilation_x Dilation while reading in x-dimension
+ * @param[in] dilation_y Dilation while reading in y-dimension
+ */
+ virtual void op_load_dilated(const TileOperand &tile_op, const TensorOperand &tensor_op, TensorSampler &sampler,
+ const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch,
+ const TileOperand &dilation_x, const TileOperand &dilation_y) = 0;
+
+ /** Store the data to the tensor memory from the tile using the sampling information.
+ *
+ * Similar to @ref KernelWriter::op_load()
+ */
+ virtual void op_store(const TensorOperand &tensor_op, const TileOperand &tile_op, TensorSampler &sampler,
+ const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch) = 0;
+
+ /** Store the data to the tensor memory from the tile in a dilated way using the sampling information.
+ *
+ * Similar to @ref KernelWriter::op_load_dilated()
+ */
+ virtual void op_store_dilated(const TensorOperand &tensor_op, const TileOperand &tile_op, TensorSampler &sampler,
+ const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch,
+ const TileOperand &dilation_x, const TileOperand &dilation_y) = 0;
+
protected:
int32_t id_space() const;
diff --git a/compute_kernel_writer/src/cl/CLKernelWriter.cpp b/compute_kernel_writer/src/cl/CLKernelWriter.cpp
index 88ada37d71..b4df5c5f50 100644
--- a/compute_kernel_writer/src/cl/CLKernelWriter.cpp
+++ b/compute_kernel_writer/src/cl/CLKernelWriter.cpp
@@ -25,12 +25,18 @@
#include "src/cl/CLKernelWriter.h"
#include "ckw/Error.h"
#include "ckw/Kernel.h"
+#include "ckw/TensorSampler.h"
#include "ckw/TileOperand.h"
+#include "ckw/types/MemoryOperation.h"
#include "ckw/types/TargetLanguage.h"
#include "src/ITensorComponent.h"
#include "src/cl/CLHelpers.h"
#include "src/cl/CLTensorArgument.h"
#include "src/cl/CLTile.h"
+#include "src/cl/helpers/CLMemoryOpBufferHelper.h"
+#include "src/cl/helpers/CLMemoryOpImage2dHelper.h"
+#include "src/cl/helpers/ICLMemoryOpHelper.h"
+
#include <cstdint>
namespace ckw
@@ -160,4 +166,124 @@ void CLKernelWriter::op_write_raw_code(const std::string &raw_code)
append_code(raw_code);
}
+const CLTile &CLKernelWriter::to_cl_tile(const TileOperand &operand)
+{
+ const auto &tile = get_tile(operand);
+#ifdef COMPUTE_KERNEL_WRITER_ASSERTS_ENABLED
+ // Check if the tile is a CLTile created by this kernel writer.
+ {
+ bool found = false;
+ for(const auto &t : _tiles)
+ {
+ if(&tile == t.get())
+ {
+ found = true;
+ break;
+ }
+ }
+ if(!found)
+ {
+ for(const auto &t : _tensors)
+ {
+ const auto components = t->components();
+ for(const auto component : components)
+ {
+ if(&tile == &component->tile())
+ {
+ found = true;
+ break;
+ }
+ }
+ }
+ }
+ CKW_ASSERT_MSG(found, "The tile is not found!");
+ }
+#endif // COMPUTE_KERNEL_WRITER_ASSERTS_ENABLED
+ return static_cast<const CLTile &>(tile);
+}
+
+void CLKernelWriter::op_load(const TileOperand &tile_op, const TensorOperand &tensor_op, TensorSampler &sampler,
+ const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch)
+{
+ const CLTile dilation_x("1", DataType::Int32);
+ const CLTile dilation_y("1", DataType::Int32);
+
+ op_load_store(MemoryOperation::Load, tile_op, tensor_op, sampler, x, y, z, batch, dilation_x, dilation_y);
+}
+
+void CLKernelWriter::op_load_dilated(const TileOperand &tile_op, const TensorOperand &tensor_op, TensorSampler &sampler,
+ const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch,
+ const TileOperand &dilation_x, const TileOperand &dilation_y)
+{
+ const auto &dil_x_tile = to_cl_tile(dilation_x);
+ const auto &dil_y_tile = to_cl_tile(dilation_y);
+
+ op_load_store(MemoryOperation::Load, tile_op, tensor_op, sampler, x, y, z, batch, dil_x_tile, dil_y_tile);
+}
+
+void CLKernelWriter::op_store(const TensorOperand &tensor_op, const TileOperand &tile_op, TensorSampler &sampler,
+ const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch)
+{
+ const CLTile dilation_x("1", DataType::Int32);
+ const CLTile dilation_y("1", DataType::Int32);
+
+ op_load_store(MemoryOperation::Store, tile_op, tensor_op, sampler, x, y, z, batch, dilation_x, dilation_y);
+}
+
+void CLKernelWriter::op_store_dilated(const TensorOperand &tensor_op, const TileOperand &tile_op, TensorSampler &sampler,
+ const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch,
+ const TileOperand &dilation_x, const TileOperand &dilation_y)
+{
+ const auto &dil_x_tile = to_cl_tile(dilation_x);
+ const auto &dil_y_tile = to_cl_tile(dilation_y);
+
+ op_load_store(MemoryOperation::Store, tile_op, tensor_op, sampler, x, y, z, batch, dil_x_tile, dil_y_tile);
+}
+
+void CLKernelWriter::op_load_store(MemoryOperation op, const TileOperand &tile_op, const TensorOperand &tensor_op, TensorSampler &sampler,
+ const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch,
+ const CLTile &dilation_x, const CLTile &dilation_y)
+{
+ CKW_UNUSED(dilation_x);
+ CKW_ASSERT(dilation_x.scalar(0,0).str == "1"); // Dilation in x dimension is not implemented yet
+
+ ITensor &tensor = get_tensor(tensor_op);
+
+ std::unique_ptr<ICLMemoryOpHelper> helper;
+ switch(sampler.storage())
+ {
+ case TensorStorageType::BufferUint8Ptr:
+ helper = std::make_unique<CLMemoryOpBufferHelper>(this, &tensor, &sampler, op);
+ break;
+ case TensorStorageType::Texture2dReadOnly:
+ case TensorStorageType::Texture2dWriteOnly:
+ helper = std::make_unique<CLMemoryOpImage2dHelper>(this, &tensor, &sampler, op);
+ break;
+ default:
+ CKW_THROW_MSG("Unsupported tensor storage");
+ }
+
+ const auto &tile = to_cl_tile(tile_op);
+ const auto &x_tile = to_cl_tile(x);
+ const auto &y_tile = to_cl_tile(y);
+ const auto &z_tile = to_cl_tile(z);
+ const auto &batch_tile = to_cl_tile(batch);
+
+ helper->initialize(&tile, &x_tile, &z_tile, &batch_tile);
+
+ for(int row = 0; row < tile.info().height(); ++row)
+ {
+ std::string coord_y = y_tile.scalar(0, 0).str + " + " + std::to_string(row);
+
+ if(dilation_y.scalar(0, 0).str != "1")
+ {
+ coord_y += " * " + dilation_y.scalar(0, 0).str;
+ }
+
+ helper->write_row(row, coord_y);
+ }
+
+ helper->finalize();
+}
+
} // namespace ckw
diff --git a/compute_kernel_writer/src/cl/CLKernelWriter.h b/compute_kernel_writer/src/cl/CLKernelWriter.h
index 5df148da7b..a40698d7bb 100644
--- a/compute_kernel_writer/src/cl/CLKernelWriter.h
+++ b/compute_kernel_writer/src/cl/CLKernelWriter.h
@@ -36,6 +36,11 @@ namespace ckw
class CLTile;
class CLTensorArgument;
+class TensorSampler;
+class TileOperand;
+class TensorOperand;
+
+enum class MemoryOperation;
/** OpenCL kernel writer. */
class CLKernelWriter : public KernelWriter
@@ -76,9 +81,43 @@ public:
/** Declare a tile given name and tile information
*
* Similar to @ref KernelWriter::declare_tile()
- */
+ */
TileOperand declare_tile(const std::string &name, const TileInfo &tile_info) override;
+ // =============================================================================================
+ // Memory Operations
+ // =============================================================================================
+
+ /** Load the data from the tensor memory to the tile using the sampling information.
+ *
+ * Similar to @ref KernelWriter::op_load()
+ */
+ void op_load(const TileOperand &tile_op, const TensorOperand &tensor_op, TensorSampler &sampler,
+ const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch) override;
+
+ /** Load the data from the tensor memory to the tile in a dilated way using the sampling information.
+ *
+ * Similar to @ref KernelWriter::op_load_dilated()
+ */
+ void op_load_dilated(const TileOperand &tile_op, const TensorOperand &tensor_op, TensorSampler &sampler,
+ const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch,
+ const TileOperand &dilation_x, const TileOperand &dilation_y) override;
+
+ /** Store the data to the tensor memory from the tile using the sampling information.
+ *
+ * Similar to @ref KernelWriter::op_store()
+ */
+ void op_store(const TensorOperand &tensor_op, const TileOperand &tile_op, TensorSampler &sampler,
+ const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch) override;
+
+ /** Store the data to the tensor memory from the tile in a dilated way using the sampling information.
+ *
+ * Similar to @ref KernelWriter::op_store_dilated()
+ */
+ void op_store_dilated(const TensorOperand &tensor_op, const TileOperand &tile_op, TensorSampler &sampler,
+ const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch,
+ const TileOperand &dilation_x, const TileOperand &dilation_y) override;
+
protected:
/** Append the specified code to the kernel body source code. */
template <typename T, typename... TArgs>
@@ -98,6 +137,20 @@ protected:
/** Get the current kernel body source code. */
const std::string &body_source_code() const;
+// For helper functions
+private:
+ /** Return @ref CLTile object from the @ref TileOperand object.
+ *
+ * This function performs appropriate check before doing type casting.
+ */
+ const CLTile &to_cl_tile(const TileOperand &operand);
+
+ /** Helper function to consolidate all load/store logic in this class */
+ void op_load_store(MemoryOperation op, const TileOperand &tile_op, const TensorOperand &tensor_op, TensorSampler &sampler,
+ const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch,
+ const CLTile &dilation_x, const CLTile &dilation_y);
+
+// For attributes
private:
/** This string contains the kernel body source code, not the full CL source code.
* The full source code will only be generated when the user calls @ref KernelWriter::emit_kernel.
diff --git a/compute_kernel_writer/validation/Validation.cpp b/compute_kernel_writer/validation/Validation.cpp
index f8ee27cee0..3755986cf4 100644
--- a/compute_kernel_writer/validation/Validation.cpp
+++ b/compute_kernel_writer/validation/Validation.cpp
@@ -22,14 +22,15 @@
* SOFTWARE.
*/
-#include "tests/CLConstantTileTest.hpp"
-#include "tests/CLKernelWriterCommentTest.h"
-#include "tests/CLKernelWriterDeclareTileTest.h"
-#include "tests/CLTensorArgumentTest.h"
-#include "tests/CLTileTest.hpp"
-#include "tests/TensorBitMaskTest.h"
-#include "tests/UtilsTest.h"
+#include "validation/tests/CLConstantTileTest.hpp"
+#include "validation/tests/CLKernelWriterCommentTest.h"
+#include "validation/tests/CLKernelWriterDeclareTileTest.h"
+#include "validation/tests/CLTensorArgumentTest.h"
+#include "validation/tests/CLTileTest.hpp"
+#include "validation/tests/TensorBitMaskTest.h"
+#include "validation/tests/UtilsTest.h"
#include "validation/tests/CLKernelWriterDeclareTensorTest.h"
+#include "validation/tests/CLKernelWriterOpLoadStoreTest.h"
#include <memory>
#include <vector>
@@ -75,6 +76,7 @@ int32_t main()
const auto test22 = std::make_unique<CLTensorArgumentStoragesUsedTest>();
const auto test23 = std::make_unique<CLTensorArgumentComponentsUsedPassByValueTrueDynamicDimTrueTest>();
const auto test24 = std::make_unique<CLKernelWriterDeclareTensorTest>();
+ const auto test25 = std::make_unique<CLKernelWriterOpLoadStoreTest>();
tests.push_back(test3.get());
tests.push_back(test4.get());
@@ -100,6 +102,7 @@ int32_t main()
tests.push_back(test22.get());
tests.push_back(test23.get());
tests.push_back(test24.get());
+ tests.push_back(test25.get());
#endif /* COMPUTE_KERNEL_WRITER_OPENCL_ENABLED */
bool all_test_passed = true;
diff --git a/compute_kernel_writer/validation/tests/CLKernelWriterOpLoadStoreTest.h b/compute_kernel_writer/validation/tests/CLKernelWriterOpLoadStoreTest.h
new file mode 100644
index 0000000000..0f4afc8bf3
--- /dev/null
+++ b/compute_kernel_writer/validation/tests/CLKernelWriterOpLoadStoreTest.h
@@ -0,0 +1,313 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef CKW_VALIDATION_TESTS_CLKERNELWRITEROPLOADSTORETEST_H
+#define CKW_VALIDATION_TESTS_CLKERNELWRITEROPLOADSTORETEST_H
+
+#include "ckw/TileInfo.h"
+#include "ckw/types/DataType.h"
+#include "src/cl/CLKernelWriter.h"
+#include "validation/tests/common/KernelWriterInterceptor.h"
+#include "validation/tests/common/Common.h"
+
+#include "ckw/TensorSampler.h"
+#include "ckw/types/MemoryOperation.h"
+#include "ckw/types/TensorSamplerTypes.h"
+
+#include <vector>
+
+namespace ckw
+{
+
+class CLKernelWriterOpLoadStoreTest : public ITest
+{
+private:
+ using AddressModeX = TensorSamplerAddressModeX;
+ using AddressModeY = TensorSamplerAddressModeY;
+ using AddressModeZ = TensorSamplerAddressModeZ;
+ using Format = TensorSamplerFormat;
+ using Storage = TensorStorageType;
+
+ struct Coordinates
+ {
+ Coordinates(std::string x, std::string y, std::string z, std::string batch)
+ : x(x), y(y), z(z), batch(batch)
+ {
+ }
+
+ std::string x;
+ std::string y;
+ std::string z;
+ std::string batch;
+ };
+
+ struct SamplerData
+ {
+ SamplerData(Format format, AddressModeX mode_x, AddressModeY mode_y, AddressModeZ mode_z)
+ : format(format), mode_x(mode_x), mode_y(mode_y), mode_z(mode_z)
+ {
+ }
+
+ Format format;
+ AddressModeX mode_x;
+ AddressModeY mode_y;
+ AddressModeZ mode_z;
+ };
+
+ struct Dilations
+ {
+ Dilations(std::string dilation_x, std::string dilation_y)
+ : dilation_x(dilation_x), dilation_y(dilation_y)
+ {
+ }
+
+ std::string dilation_x;
+ std::string dilation_y;
+ };
+
+ using CLKernelWriterOpLoadStoreConfig = std::tuple<MemoryOperation, TileInfo, TensorStorageType, SamplerData, Coordinates, Dilations, std::string>;
+
+public:
+ CLKernelWriterOpLoadStoreTest()
+ {
+ // Cases
+ const std::string load_fp_2x3_tile = R"_(
+tile_0 = vload3(0, (__global float*)(G0__tensor_ptr + (x) * sizeof(float) + (y + 0) * G0__tensor_stride1 + (z) * G0__tensor_stride2 + (b) * G0__tensor_stride3));
+tile_1 = vload3(0, (__global float*)(G0__tensor_ptr + (x) * sizeof(float) + (y + 1) * G0__tensor_stride1 + (z) * G0__tensor_stride2 + (b) * G0__tensor_stride3));
+)_";
+ const std::string load_half_2x4_tile_image_clamp_y = R"_(
+tile_0 = read_imageh(G0__tensor_img2d, CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST, (int2)((x) >> 2, (y + 0 + (z) * G0__tensor_dim1 + (b) * G0__tensor_dim1 * G0__tensor_dim2)));
+tile_1 = read_imageh(G0__tensor_img2d, CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST, (int2)((x) >> 2, (y + 1 + (z) * G0__tensor_dim1 + (b) * G0__tensor_dim1 * G0__tensor_dim2)));
+)_";
+ const std::string store_fp_2x3_tile = R"_(
+vstore3(tile_0, 0, (__global float*)(G0__tensor_ptr + (x) * sizeof(float) + (y + 0) * G0__tensor_stride1 + (b) * G0__tensor_stride3));
+vstore3(tile_1, 0, (__global float*)(G0__tensor_ptr + (x) * sizeof(float) + (y + 1) * G0__tensor_stride1 + (b) * G0__tensor_stride3));
+)_";
+ const std::string store_int8_4x4_y_dilation_batch_eq_0 = R"_(
+vstore4(tile_0, 0, (__global char*)(G0__tensor_ptr + (1) * sizeof(char) + (y + 0 * y_dilation) * G0__tensor_stride1 + (z) * G0__tensor_stride2));
+vstore4(tile_1, 0, (__global char*)(G0__tensor_ptr + (1) * sizeof(char) + (y + 1 * y_dilation) * G0__tensor_stride1 + (z) * G0__tensor_stride2));
+vstore4(tile_2, 0, (__global char*)(G0__tensor_ptr + (1) * sizeof(char) + (y + 2 * y_dilation) * G0__tensor_stride1 + (z) * G0__tensor_stride2));
+vstore4(tile_3, 0, (__global char*)(G0__tensor_ptr + (1) * sizeof(char) + (y + 3 * y_dilation) * G0__tensor_stride1 + (z) * G0__tensor_stride2));
+)_";
+ // tensor dimension is 10
+ const std::string load_fp_2x3_tile_x_overlapping_min_y_eq_0_batch_eq_1 = R"_(
+if(x > 0)
+{
+tile_0 = vload3(0, (__global float*)(G0__tensor_ptr + (x) * sizeof(float) + (0 + 0) * G0__tensor_stride1 + (z) * G0__tensor_stride2 + (1) * G0__tensor_stride3));
+tile_1 = vload3(0, (__global float*)(G0__tensor_ptr + (x) * sizeof(float) + (0 + 1) * G0__tensor_stride1 + (z) * G0__tensor_stride2 + (1) * G0__tensor_stride3));
+}
+else
+{
+tile_0.s0 = *((__global float*)(G0__tensor_ptr + (x + 0) * sizeof(float) + (0 + 0) * G0__tensor_stride1 + (z) * G0__tensor_stride2 + (1) * G0__tensor_stride3));
+tile_1.s0 = *((__global float*)(G0__tensor_ptr + (x + 0) * sizeof(float) + (0 + 1) * G0__tensor_stride1 + (z) * G0__tensor_stride2 + (1) * G0__tensor_stride3));
+}
+)_";
+ const std::string store_fp_2x3_tile_x_overlapping_min_y_clamp_to_border_max_only = R"_(
+if(x > 0)
+{
+if(y + 0 < G0__tensor_dim1)
+{
+vstore3(tile_0, 0, (__global float*)(G0__tensor_ptr + (x) * sizeof(float) + (y + 0) * G0__tensor_stride1 + (z) * G0__tensor_stride2 + (b) * G0__tensor_stride3));
+}
+else
+{
+tile_0 = 0.0f;
+}
+if(y + 1 < G0__tensor_dim1)
+{
+vstore3(tile_1, 0, (__global float*)(G0__tensor_ptr + (x) * sizeof(float) + (y + 1) * G0__tensor_stride1 + (z) * G0__tensor_stride2 + (b) * G0__tensor_stride3));
+}
+else
+{
+tile_1 = 0.0f;
+}
+}
+else
+{
+if(y + 0 < G0__tensor_dim1)
+{
+*((__global float*)(G0__tensor_ptr + (x + 0) * sizeof(float) + (y + 0) * G0__tensor_stride1 + (z) * G0__tensor_stride2 + (b) * G0__tensor_stride3)) = tile_0.s0;
+}
+else
+{
+tile_0.s0 = 0.0f;
+}
+if(y + 1 < G0__tensor_dim1)
+{
+*((__global float*)(G0__tensor_ptr + (x + 0) * sizeof(float) + (y + 1) * G0__tensor_stride1 + (z) * G0__tensor_stride2 + (b) * G0__tensor_stride3)) = tile_1.s0;
+}
+else
+{
+tile_1.s0 = 0.0f;
+}
+}
+)_";
+ const std::string store_half_2x4_tile_x_image_y_dilation = R"_(
+write_imageh(G0__tensor_img2d, (int2)((x) >> 2, (0 + 0 * y_dilation + (z) * G0__tensor_dim1 + (1) * G0__tensor_dim1 * G0__tensor_dim2)), tile_0);
+write_imageh(G0__tensor_img2d, (int2)((x) >> 2, (0 + 1 * y_dilation + (z) * G0__tensor_dim1 + (1) * G0__tensor_dim1 * G0__tensor_dim2)), tile_1);
+)_";
+
+ // Configs Bundled
+ _configs = {
+ // op, tile, storage, sampler, coordinates, dilation, expected
+ {
+ MemoryOperation::Load,
+ TileInfo(DataType::Fp32, 2, 3),
+ TensorStorageType::BufferUint8Ptr,
+ SamplerData(Format::Dim0_Dim1_Dim2, AddressModeX::None, AddressModeY::None, AddressModeZ::None),
+ Coordinates("x", "y", "z", "b"),
+ Dilations("1", "1"),
+ load_fp_2x3_tile
+ },
+ {
+ MemoryOperation::Load,
+ TileInfo(DataType::Fp16, 2, 4),
+ TensorStorageType::Texture2dReadOnly,
+ SamplerData(Format::Dim0_Dim1_Dim2, AddressModeX::None, AddressModeY::ClampToBorderMaxOnly, AddressModeZ::None),
+ Coordinates("x", "y", "z", "b"),
+ Dilations("1", "1"),
+ load_half_2x4_tile_image_clamp_y
+ },
+ {
+ MemoryOperation::Store,
+ TileInfo(DataType::Fp32, 2, 3),
+ TensorStorageType::BufferUint8Ptr,
+ SamplerData(Format::Dim0_Dim1xDim2_1,AddressModeX::None, AddressModeY::None, AddressModeZ::None),
+ Coordinates("x", "y", "z", "b"),
+ Dilations("1", "1"),
+ store_fp_2x3_tile
+ },
+ {
+ MemoryOperation::Store,
+ TileInfo(DataType::Int8, 4, 4),
+ TensorStorageType::BufferUint8Ptr,
+ SamplerData(Format::Dim0_Dim1_Dim2, AddressModeX::None, AddressModeY::None, AddressModeZ::None),
+ Coordinates("1", "y", "z", "0"),
+ Dilations("1", "y_dilation"),
+ store_int8_4x4_y_dilation_batch_eq_0
+ },
+ {
+ MemoryOperation::Load,
+ TileInfo(DataType::Fp32, 2, 3),
+ TensorStorageType::BufferUint8Ptr,
+ SamplerData(Format::Dim0_Dim1_Dim2, AddressModeX::OverlappingMin, AddressModeY::None, AddressModeZ::None),
+ Coordinates("x", "0", "z", "1"),
+ Dilations("1", "1"),
+ load_fp_2x3_tile_x_overlapping_min_y_eq_0_batch_eq_1
+ },
+ {
+ MemoryOperation::Store,
+ TileInfo(DataType::Fp32, 2, 3),
+ TensorStorageType::BufferUint8Ptr,
+ SamplerData(Format::Dim0_Dim1_Dim2, AddressModeX::OverlappingMin, AddressModeY::ClampToBorderMaxOnly, AddressModeZ::None),
+ Coordinates("x", "y", "z", "b"),
+ Dilations("1", "1"),
+ store_fp_2x3_tile_x_overlapping_min_y_clamp_to_border_max_only
+ },
+ {
+ MemoryOperation::Store,
+ TileInfo(DataType::Fp16, 2, 4),
+ TensorStorageType::Texture2dWriteOnly,
+ SamplerData(Format::Dim0_Dim1_Dim2, AddressModeX::None, AddressModeY::None, AddressModeZ::None),
+ Coordinates("x", "0", "z", "1"),
+ Dilations("1", "y_dilation"),
+ store_half_2x4_tile_x_image_y_dilation
+ }
+ };
+ }
+
+ bool run() override
+ {
+ bool all_tests_passed = true;
+ int32_t test_idx = 0;
+
+ for(auto _config: _configs)
+ {
+ KernelWriterInterceptor<CLKernelWriter> writer;
+
+ const MemoryOperation op = std::get<0>(_config);
+ const TileInfo tile_info = std::get<1>(_config);
+ const Storage storage = std::get<2>(_config);
+ const SamplerData sampler_data = std::get<3>(_config);
+ const Coordinates coord = std::get<4>(_config);
+ const Dilations dilations = std::get<5>(_config);
+ const std::string expected_code = std::get<6>(_config).substr(1); // ignore initial newline, which was added for convenience
+
+ TileOperand tile_op = writer.declare_tile("tile", tile_info);
+ TileOperand x_op = writer.declare_tile(coord.x, TileInfo(DataType::Int32));
+ TileOperand y_op = writer.declare_tile(coord.y, TileInfo(DataType::Int32));
+ TileOperand z_op = writer.declare_tile(coord.z, TileInfo(DataType::Int32));
+ TileOperand batch_op = writer.declare_tile(coord.batch, TileInfo(DataType::Int32));
+ TileOperand dil_x_op = writer.declare_tile(dilations.dilation_x, TileInfo(DataType::Int32));
+ TileOperand dil_y_op = writer.declare_tile(dilations.dilation_y, TileInfo(DataType::Int32));
+
+ TensorShape tensor_shape {10, 10, 10, 10};
+ TensorInfo tensor_info(tile_info.data_type(), tensor_shape, TensorDataLayout::Nhwc, 0 /* id */);
+ TensorOperand tensor_op = writer.declare_tensor_argument("tensor", tensor_info);
+ TensorSampler sampler(storage, sampler_data.format, sampler_data.mode_x, sampler_data.mode_y, sampler_data.mode_z);
+
+ const bool no_dilation = (dilations.dilation_x == "1" && dilations.dilation_y == "1");
+
+ writer.start_capture_code();
+ if(op == MemoryOperation::Load)
+ {
+ if(no_dilation)
+ {
+ writer.op_load(tile_op, tensor_op, sampler, x_op, y_op, z_op, batch_op);
+ }
+ else
+ {
+ writer.op_load_dilated(tile_op, tensor_op, sampler, x_op, y_op, z_op, batch_op, dil_x_op, dil_y_op);
+ }
+ }
+ else
+ {
+ if(no_dilation)
+ {
+ writer.op_store(tensor_op, tile_op, sampler, x_op, y_op, z_op, batch_op);
+ }
+ else
+ {
+ writer.op_store_dilated(tensor_op, tile_op, sampler, x_op, y_op, z_op, batch_op, dil_x_op, dil_y_op);
+ }
+ }
+
+ VALIDATE_TEST(writer.check_added_code(expected_code), all_tests_passed, test_idx++);
+ }
+
+ return all_tests_passed;
+ }
+
+ std::string name() override
+ {
+ return "CLKernelWriterOpLoadStoreTest";
+ }
+
+private:
+ std::vector<CLKernelWriterOpLoadStoreConfig> _configs {};
+};
+
+} // namespace ckw
+
+#endif // CKW_VALIDATION_TESTS_CLKERNELWRITEROPLOADSTORETEST_H