aboutsummaryrefslogtreecommitdiff
path: root/compute_kernel_writer
diff options
context:
space:
mode:
authorGunes Bayir <gunes.bayir@arm.com>2023-08-17 11:04:02 +0100
committerGunes Bayir <gunes.bayir@arm.com>2023-08-30 15:45:59 +0000
commitd5f9a1cf9f0340f3e6bf9ff00156fc2adb1fdca9 (patch)
treeaf23cff1cb3a504ee51676cd9bfc74b75934fef2 /compute_kernel_writer
parent91cb7336400acc857e20086a23692f99fe11be9c (diff)
downloadComputeLibrary-d5f9a1cf9f0340f3e6bf9ff00156fc2adb1fdca9.tar.gz
Implement indirect load for buffer and CLImage
Add KernelWriter API functions for loading from an indirect buffer Resolves: COMPMID-6390 Signed-off-by: Gunes Bayir <gunes.bayir@arm.com> Change-Id: I45dbf88b25ec5caf2b458657ef20aacac9924745 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10192 Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'compute_kernel_writer')
-rw-r--r--compute_kernel_writer/include/ckw/KernelWriter.h15
-rw-r--r--compute_kernel_writer/include/ckw/types/TensorSamplerTypes.h10
-rw-r--r--compute_kernel_writer/src/cl/CLKernelWriter.cpp52
-rw-r--r--compute_kernel_writer/src/cl/CLKernelWriter.h26
-rw-r--r--compute_kernel_writer/src/cl/helpers/CLMemoryOpBufferHelper.cpp6
-rw-r--r--compute_kernel_writer/src/cl/helpers/CLMemoryOpImage2dHelper.cpp7
-rw-r--r--compute_kernel_writer/validation/Validation.cpp4
-rw-r--r--compute_kernel_writer/validation/tests/CLKernelWriterOpLoadIndirectTest.h216
-rw-r--r--compute_kernel_writer/validation/tests/CLKernelWriterOpLoadStoreTest.h36
9 files changed, 318 insertions, 54 deletions
diff --git a/compute_kernel_writer/include/ckw/KernelWriter.h b/compute_kernel_writer/include/ckw/KernelWriter.h
index 0c8f3de0a1..93ae8aecd6 100644
--- a/compute_kernel_writer/include/ckw/KernelWriter.h
+++ b/compute_kernel_writer/include/ckw/KernelWriter.h
@@ -267,7 +267,7 @@ public:
* @param[in] x x-coordinate
* @param[in] y y-coordinate
* @param[in] z z-coordinate
- * @param[in] batch batch offset
+ * @param[in] batch batch
*/
virtual void op_load(
const TileOperand &tile_op, const TensorOperand &tensor_op, TensorSampler &sampler,
@@ -302,6 +302,19 @@ public:
const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch,
const TileOperand &dilation_x, const TileOperand &dilation_y) = 0;
+ /** Load the data from the tensor memory to the tile using the indirect buffer approach and respecting the sampling information.
+ *
+ * @param[in] tile_op The tile to be loaded.
+ * @param[in] tensor_op The tensor to be read.
+ * @param[in] sampler The tensor sampling information.
+ * @param[in] x x-coordinate
+ * @param[in] y y-coordinate
+ * @param[in] z z-coordinate
+ * @param[in] batch batch
+ */
+ virtual void op_load_indirect(const TileOperand &tile_op, const TensorOperand &tensor_op, TensorSampler &sampler,
+ const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch_op) = 0;
+
protected:
// =============================================================================================
// ID space management
diff --git a/compute_kernel_writer/include/ckw/types/TensorSamplerTypes.h b/compute_kernel_writer/include/ckw/types/TensorSamplerTypes.h
index 3a9f4f5722..43dce1d4e4 100644
--- a/compute_kernel_writer/include/ckw/types/TensorSamplerTypes.h
+++ b/compute_kernel_writer/include/ckw/types/TensorSamplerTypes.h
@@ -22,8 +22,8 @@
* SOFTWARE.
*/
-#ifndef CKW_INCLUDE_CKW_TENSORSAMPLERTYPES_H
-#define CKW_INCLUDE_CKW_TENSORSAMPLERTYPES_H
+#ifndef CKW_INCLUDE_CKW_TYPES_TENSORSAMPLERTYPES_H
+#define CKW_INCLUDE_CKW_TYPES_TENSORSAMPLERTYPES_H
#include <cstdint>
@@ -47,6 +47,7 @@ enum class TensorSamplerFormat : int32_t
* Leftover elements can be handled using overlapping. This involves processing some of the elements in the array twice.
* ClampToBorderMaxOnly : Clamp to max value allowed in the corresponding dimension, and construct an if/else guard to prevent out of bound access,
* e.g. if( y < size-of-dimension-y ){ <do the operation> }
+ * SkipLessThanZero : Skip loading/storing if the index is less than 0
*
* Individual dimensions choose which adddress mode to implement in their respective enum classes.
*/
@@ -65,7 +66,8 @@ enum class TensorSamplerAddressModeY : int32_t
Unknown = 0,
None = 1,
OverlappingMin = 2,
- ClampToBorderMaxOnly = 3
+ ClampToBorderMaxOnly = 3,
+ SkipLessThanZero = 4
};
/**
@@ -79,4 +81,4 @@ enum class TensorSamplerAddressModeZ : int32_t
} // namespace ckw
-#endif //CKW_INCLUDE_CKW_TENSORSAMPLERTYPES_H
+#endif // CKW_INCLUDE_CKW_TYPES_TENSORSAMPLERTYPES_H
diff --git a/compute_kernel_writer/src/cl/CLKernelWriter.cpp b/compute_kernel_writer/src/cl/CLKernelWriter.cpp
index a946b989d7..4074da7912 100644
--- a/compute_kernel_writer/src/cl/CLKernelWriter.cpp
+++ b/compute_kernel_writer/src/cl/CLKernelWriter.cpp
@@ -42,6 +42,7 @@
#include <algorithm>
#include <cstdint>
+#include <vector>
namespace ckw
{
@@ -628,7 +629,7 @@ void CLKernelWriter::op_load(const TileOperand &tile_op, const TensorOperand &te
const CLTile dilation_x({ { "1" } }, DataType::Int32);
const CLTile dilation_y({ { "1" } }, DataType::Int32);
- op_load_store(MemoryOperation::Load, tile_op, tensor_op, sampler, x, y, z, batch, dilation_x, dilation_y);
+ op_load_store(MemoryOperation::Load, tile_op, tensor_op, sampler, x, y, z, batch, dilation_x, dilation_y, false /* indirect buffer */);
}
void CLKernelWriter::op_load_dilated(const TileOperand &tile_op, const TensorOperand &tensor_op, TensorSampler &sampler,
@@ -638,7 +639,7 @@ void CLKernelWriter::op_load_dilated(const TileOperand &tile_op, const TensorOpe
const auto &dil_x_tile = to_cl_tile(dilation_x);
const auto &dil_y_tile = to_cl_tile(dilation_y);
- op_load_store(MemoryOperation::Load, tile_op, tensor_op, sampler, x, y, z, batch, dil_x_tile, dil_y_tile);
+ op_load_store(MemoryOperation::Load, tile_op, tensor_op, sampler, x, y, z, batch, dil_x_tile, dil_y_tile, false /* indirect buffer */);
}
void CLKernelWriter::op_store(const TensorOperand &tensor_op, const TileOperand &tile_op, TensorSampler &sampler,
@@ -647,7 +648,7 @@ void CLKernelWriter::op_store(const TensorOperand &tensor_op, const TileOperand
const CLTile dilation_x({ { "1" } }, DataType::Int32);
const CLTile dilation_y({ { "1" } }, DataType::Int32);
- op_load_store(MemoryOperation::Store, tile_op, tensor_op, sampler, x, y, z, batch, dilation_x, dilation_y);
+ op_load_store(MemoryOperation::Store, tile_op, tensor_op, sampler, x, y, z, batch, dilation_x, dilation_y, false /* indirect buffer */);
}
void CLKernelWriter::op_store_dilated(const TensorOperand &tensor_op, const TileOperand &tile_op, TensorSampler &sampler,
@@ -657,16 +658,32 @@ void CLKernelWriter::op_store_dilated(const TensorOperand &tensor_op, const Tile
const auto &dil_x_tile = to_cl_tile(dilation_x);
const auto &dil_y_tile = to_cl_tile(dilation_y);
- op_load_store(MemoryOperation::Store, tile_op, tensor_op, sampler, x, y, z, batch, dil_x_tile, dil_y_tile);
+ op_load_store(MemoryOperation::Store, tile_op, tensor_op, sampler, x, y, z, batch, dil_x_tile, dil_y_tile, false /* indirect buffer */);
+}
+
+void CLKernelWriter::op_load_indirect(const TileOperand &tile_op, const TensorOperand &tensor_op, TensorSampler &sampler,
+ const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch)
+{
+ const CLTile dilation_x({ { "1" } }, DataType::Int32);
+ const CLTile dilation_y({ { "1" } }, DataType::Int32);
+
+ op_load_store(MemoryOperation::Load, tile_op, tensor_op, sampler, x, y, z, batch, dilation_x, dilation_y, true /* indirect buffer */);
}
void CLKernelWriter::op_load_store(MemoryOperation op, const TileOperand &tile_op, const TensorOperand &tensor_op, TensorSampler &sampler,
- const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch,
- const CLTile &dilation_x, const CLTile &dilation_y)
+ const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch,
+ const CLTile &dilation_x, const CLTile &dilation_y, bool indirect_buffer)
{
CKW_UNUSED(dilation_x);
+ CKW_ASSERT(dilation_x.is_scalar());
+ CKW_ASSERT(dilation_y.is_scalar());
CKW_ASSERT(dilation_x.scalar(0, 0).str == "((int)(1))"); // Dilation in x dimension is not implemented yet
+ if(indirect_buffer)
+ {
+ CKW_ASSERT(dilation_y.scalar(0,0).str == "((int)(1))" && dilation_x.scalar(0,0).str == "((int)(1))");
+ }
+
ITensor &tensor = get_tensor(tensor_op);
std::unique_ptr<ICLMemoryOpHelper> helper;
@@ -689,18 +706,31 @@ void CLKernelWriter::op_load_store(MemoryOperation op, const TileOperand &tile_o
const auto &z_tile = to_cl_tile(z);
const auto &batch_tile = to_cl_tile(batch);
+ CKW_ASSERT(x_tile.is_scalar());
+ CKW_ASSERT(z_tile.is_scalar());
+ CKW_ASSERT_IF(indirect_buffer, y_tile.info().width() == 1);
+ CKW_ASSERT_IF(!indirect_buffer, y_tile.is_scalar());
+ CKW_ASSERT(batch_tile.is_scalar());
+
helper->initialize(&tile, &x_tile, &z_tile, &batch_tile);
for(int row = 0; row < tile.info().height(); ++row)
{
- std::string coord_y = y_tile.scalar(0, 0).str + " + " + std::to_string(row);
+ if(!indirect_buffer)
+ {
+ std::string coord_y = y_tile.scalar(0, 0).str + " + " + std::to_string(row);
+
+ if(dilation_y.scalar(0, 0).str != "((int)(1))")
+ {
+ coord_y += " * " + dilation_y.scalar(0, 0).str;
+ }
- if(dilation_y.scalar(0, 0).str != "1")
+ helper->write_row(row, coord_y);
+ }
+ else
{
- coord_y += " * " + dilation_y.scalar(0, 0).str;
+ helper->write_row(row, y_tile.scalar(row, 0).str);
}
-
- helper->write_row(row, coord_y);
}
helper->finalize();
diff --git a/compute_kernel_writer/src/cl/CLKernelWriter.h b/compute_kernel_writer/src/cl/CLKernelWriter.h
index c494847944..1e2e5dc910 100644
--- a/compute_kernel_writer/src/cl/CLKernelWriter.h
+++ b/compute_kernel_writer/src/cl/CLKernelWriter.h
@@ -131,40 +131,27 @@ public:
// Memory Operations
// =============================================================================================
- /** Load the data from the tensor memory to the tile using the sampling information.
- *
- * Similar to @ref KernelWriter::op_load()
- */
void op_load(
const TileOperand &tile_op, const TensorOperand &tensor_op, TensorSampler &sampler,
const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch) override;
- /** Load the data from the tensor memory to the tile in a dilated way using the sampling information.
- *
- * Similar to @ref KernelWriter::op_load_dilated()
- */
void op_load_dilated(
const TileOperand &tile_op, const TensorOperand &tensor_op, TensorSampler &sampler,
const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch,
const TileOperand &dilation_x, const TileOperand &dilation_y) override;
- /** Store the data to the tensor memory from the tile using the sampling information.
- *
- * Similar to @ref KernelWriter::op_store()
- */
void op_store(
const TensorOperand &tensor_op, const TileOperand &tile_op, TensorSampler &sampler,
const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch) override;
- /** Store the data to the tensor memory from the tile in a dilated way using the sampling information.
- *
- * Similar to @ref KernelWriter::op_store_dilated()
- */
void op_store_dilated(
const TensorOperand &tensor_op, const TileOperand &tile_op, TensorSampler &sampler,
const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch,
const TileOperand &dilation_x, const TileOperand &dilation_y) override;
+ void op_load_indirect(const TileOperand &tile_op, const TensorOperand &tensor_op, TensorSampler &sampler,
+ const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch) override;
+
protected:
/** Return @ref CLTile object from the @ref TileOperand object.
*
@@ -192,11 +179,10 @@ protected:
// For helper functions
private:
- /** Helper function to consolidate all load/store logic in this class */
- void op_load_store(
- MemoryOperation op, const TileOperand &tile_op, const TensorOperand &tensor_op, TensorSampler &sampler,
+ /** Helper method to consolidate all load/store logic in this class */
+ void op_load_store(MemoryOperation op, const TileOperand &tile_op, const TensorOperand &tensor_op, TensorSampler &sampler,
const TileOperand &x, const TileOperand &y, const TileOperand &z, const TileOperand &batch,
- const CLTile &dilation_x, const CLTile &dilation_y);
+ const CLTile &dilation_x, const CLTile &dilation_y, bool indirect_buffer);
/** This function is the generic function to write both `if` and `else if` blocks.
*
diff --git a/compute_kernel_writer/src/cl/helpers/CLMemoryOpBufferHelper.cpp b/compute_kernel_writer/src/cl/helpers/CLMemoryOpBufferHelper.cpp
index e50418711e..f906bcd4b1 100644
--- a/compute_kernel_writer/src/cl/helpers/CLMemoryOpBufferHelper.cpp
+++ b/compute_kernel_writer/src/cl/helpers/CLMemoryOpBufferHelper.cpp
@@ -198,6 +198,9 @@ void CLMemoryOpBufferHelper::out_of_bound_initialize_y(const std::string &coord)
max = _mapper->dim_y().str;
_writer->op_write_raw_code("if(" + coord + " < " + max + ")\n{\n");
break;
+ case TensorSamplerAddressModeY::SkipLessThanZero:
+ _writer->op_write_raw_code("if(" + coord + " >= 0)\n{\n");
+ break;
case TensorSamplerAddressModeY::None:
break;
default:
@@ -216,6 +219,9 @@ void CLMemoryOpBufferHelper::out_of_bound_finalize_y(const std::string &dst)
_writer->op_write_raw_code(dst);
_writer->op_write_raw_code(" = 0.0f;\n}\n");
break;
+ case TensorSamplerAddressModeY::SkipLessThanZero:
+ _writer->op_write_raw_code("}\n");
+ break;
case TensorSamplerAddressModeY::None:
break;
default:
diff --git a/compute_kernel_writer/src/cl/helpers/CLMemoryOpImage2dHelper.cpp b/compute_kernel_writer/src/cl/helpers/CLMemoryOpImage2dHelper.cpp
index a5f0c17c16..55f88f4136 100644
--- a/compute_kernel_writer/src/cl/helpers/CLMemoryOpImage2dHelper.cpp
+++ b/compute_kernel_writer/src/cl/helpers/CLMemoryOpImage2dHelper.cpp
@@ -104,6 +104,9 @@ void CLMemoryOpImage2dHelper::out_of_bound_initialize_y(const std::string &coord
const TensorSamplerAddressModeY address_mode_y = _sampler->address_mode_y();
switch(address_mode_y)
{
+ case TensorSamplerAddressModeY::SkipLessThanZero:
+ _writer->op_write_raw_code("if(" + coord + " >= 0)\n{\n");
+ break;
case TensorSamplerAddressModeY::ClampToBorderMaxOnly:
case TensorSamplerAddressModeY::None:
break;
@@ -117,6 +120,9 @@ void CLMemoryOpImage2dHelper::out_of_bound_finalize_y()
const TensorSamplerAddressModeY address_mode_y = _sampler->address_mode_y();
switch(address_mode_y)
{
+ case TensorSamplerAddressModeY::SkipLessThanZero:
+ _writer->op_write_raw_code("}\n");
+ break;
case TensorSamplerAddressModeY::ClampToBorderMaxOnly:
case TensorSamplerAddressModeY::None:
break;
@@ -153,6 +159,7 @@ std::string CLMemoryOpImage2dHelper::to_ls_image2d_sampler() const
{
case TensorSamplerAddressModeY::None:
return "CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_NONE | CLK_FILTER_NEAREST";
+ case TensorSamplerAddressModeY::SkipLessThanZero:
case TensorSamplerAddressModeY::ClampToBorderMaxOnly:
return "CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST";
default:
diff --git a/compute_kernel_writer/validation/Validation.cpp b/compute_kernel_writer/validation/Validation.cpp
index 06af610456..7031fe80a9 100644
--- a/compute_kernel_writer/validation/Validation.cpp
+++ b/compute_kernel_writer/validation/Validation.cpp
@@ -33,6 +33,7 @@
#include "validation/tests/CLKernelWriterForTest.h"
#include "validation/tests/CLKernelWriterGetGlobalIdTest.h"
#include "validation/tests/CLKernelWriterIfTest.h"
+#include "validation/tests/CLKernelWriterOpLoadIndirectTest.h"
#include "validation/tests/CLKernelWriterOpLoadStoreTest.h"
#include "validation/tests/CLKernelWriterPrintTest.h"
#include "validation/tests/CLKernelWriterReturnTest.h"
@@ -43,6 +44,7 @@
#include "validation/tests/TensorBitMaskTest.h"
#include "validation/tests/UtilsTest.h"
+#include <cstdint>
#include <memory>
#include <vector>
@@ -99,6 +101,7 @@ int32_t main()
const auto test34 = std::make_unique<CLKernelWriterReturnTest>();
const auto test35 = std::make_unique<CLKernelWriterGetGlobalIdTest>();
const auto test36 = std::make_unique<CLKernelWriterPrintTest>();
+ const auto test37 = std::make_unique<CLKernelWriterOpLoadIndirectTest>();
tests.push_back(test3.get());
tests.push_back(test4.get());
@@ -136,6 +139,7 @@ int32_t main()
tests.push_back(test34.get());
tests.push_back(test35.get());
tests.push_back(test36.get());
+ tests.push_back(test37.get());
#endif /* COMPUTE_KERNEL_WRITER_OPENCL_ENABLED */
bool all_test_passed = true;
diff --git a/compute_kernel_writer/validation/tests/CLKernelWriterOpLoadIndirectTest.h b/compute_kernel_writer/validation/tests/CLKernelWriterOpLoadIndirectTest.h
new file mode 100644
index 0000000000..dacf3cd435
--- /dev/null
+++ b/compute_kernel_writer/validation/tests/CLKernelWriterOpLoadIndirectTest.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef CKW_VALIDATION_TESTS_CLKERNELWRITEROPLOADINDIRECTTEST_H
+#define CKW_VALIDATION_TESTS_CLKERNELWRITEROPLOADINDIRECTTEST_H
+
+#include "ckw/TileInfo.h"
+#include "ckw/types/DataType.h"
+#include "ckw/TensorSampler.h"
+#include "ckw/types/MemoryOperation.h"
+#include "ckw/types/TensorSamplerTypes.h"
+#include "src/cl/CLKernelWriter.h"
+#include "validation/tests/common/KernelWriterInterceptor.h"
+#include "validation/tests/common/Common.h"
+
+#include <vector>
+
+namespace ckw
+{
+
+class CLKernelWriterOpLoadIndirectTest : public ITest
+{
+private:
+ using AddressModeX = TensorSamplerAddressModeX;
+ using AddressModeY = TensorSamplerAddressModeY;
+ using AddressModeZ = TensorSamplerAddressModeZ;
+ using Format = TensorSamplerFormat;
+ using Storage = TensorStorageType;
+
+ struct Coordinates
+ {
+ Coordinates(std::string x, std::string y, std::string z, std::string batch)
+ : x(x), y(y), z(z), batch(batch)
+ {
+ }
+
+ std::string x;
+ std::string y;
+ std::string z;
+ std::string batch;
+ };
+
+ struct SamplerData
+ {
+ SamplerData(Format format, AddressModeX mode_x, AddressModeY mode_y, AddressModeZ mode_z)
+ : format(format), mode_x(mode_x), mode_y(mode_y), mode_z(mode_z)
+ {
+ }
+
+ Format format;
+ AddressModeX mode_x;
+ AddressModeY mode_y;
+ AddressModeZ mode_z;
+ };
+
+ using CLKernelWriterOpLoadIndirectConfig = std::tuple<TileInfo, TensorStorageType, SamplerData, Coordinates, std::string>;
+
+public:
+ CLKernelWriterOpLoadIndirectTest()
+ {
+ const std::string fp_2x3_tile = R"_(
+G0__tile__0 = vload3(0, (__global float*)(G0__tensor_ptr + (G0__x) * sizeof(float) + (G0__indirect_addr__0) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (G0__b) * G0__tensor_stride3));
+G0__tile__1 = vload3(0, (__global float*)(G0__tensor_ptr + (G0__x) * sizeof(float) + (G0__indirect_addr__1) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (G0__b) * G0__tensor_stride3));
+)_";
+
+ const std::string half_2x4_yz_collapsed_y_clamped_to_border_max_only_image = R"_(
+G0__tile__0 = read_imageh(G0__tensor_img2d, CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST, (int2)((G0__x) >> 2, (G0__indirect_addr__0 + (G0__b) * G0__tensor_dim1xdim2 * 1)));
+G0__tile__1 = read_imageh(G0__tensor_img2d, CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST, (int2)((G0__x) >> 2, (G0__indirect_addr__1 + (G0__b) * G0__tensor_dim1xdim2 * 1)));
+)_";
+
+ const std::string int_2x4_y_skip_less_than_zero = R"_(
+if(G0__indirect_addr__0 >= 0)
+{
+G0__tile__0 = vload4(0, (__global int*)(G0__tensor_ptr + (G0__x) * sizeof(int) + (G0__indirect_addr__0) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (G0__b) * G0__tensor_stride3));
+}
+if(G0__indirect_addr__1 >= 0)
+{
+G0__tile__1 = vload4(0, (__global int*)(G0__tensor_ptr + (G0__x) * sizeof(int) + (G0__indirect_addr__1) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (G0__b) * G0__tensor_stride3));
+}
+)_";
+
+ // tensor shape in x-dim is 10 (thus the 8, 2 vloads in if, else blocks respectively)
+ const std::string uint16_3x8_yz_collapsed_b_eq_0_x_overlapping_min_y_skip_less_than_zero = R"_(
+if(G0__x > 0)
+{
+if(G0__indirect_addr__0 >= 0)
+{
+G0__tile__0 = vload8(0, (__global ushort*)(G0__tensor_ptr + (G0__x) * sizeof(ushort) + (G0__indirect_addr__0) * G0__tensor_stride1 + (G0__0) * G0__tensor_stride3));
+}
+if(G0__indirect_addr__1 >= 0)
+{
+G0__tile__1 = vload8(0, (__global ushort*)(G0__tensor_ptr + (G0__x) * sizeof(ushort) + (G0__indirect_addr__1) * G0__tensor_stride1 + (G0__0) * G0__tensor_stride3));
+}
+if(G0__indirect_addr__2 >= 0)
+{
+G0__tile__2 = vload8(0, (__global ushort*)(G0__tensor_ptr + (G0__x) * sizeof(ushort) + (G0__indirect_addr__2) * G0__tensor_stride1 + (G0__0) * G0__tensor_stride3));
+}
+}
+else
+{
+if(G0__indirect_addr__0 >= 0)
+{
+G0__tile__0.s01 = vload2(0, (__global ushort*)(G0__tensor_ptr + (G0__x + 0) * sizeof(ushort) + (G0__indirect_addr__0) * G0__tensor_stride1 + (G0__0) * G0__tensor_stride3));
+}
+if(G0__indirect_addr__1 >= 0)
+{
+G0__tile__1.s01 = vload2(0, (__global ushort*)(G0__tensor_ptr + (G0__x + 0) * sizeof(ushort) + (G0__indirect_addr__1) * G0__tensor_stride1 + (G0__0) * G0__tensor_stride3));
+}
+if(G0__indirect_addr__2 >= 0)
+{
+G0__tile__2.s01 = vload2(0, (__global ushort*)(G0__tensor_ptr + (G0__x + 0) * sizeof(ushort) + (G0__indirect_addr__2) * G0__tensor_stride1 + (G0__0) * G0__tensor_stride3));
+}
+}
+)_";
+
+ // Configs Bundled
+ _configs = {
+ {
+ TileInfo(DataType::Fp32, 2, 3),
+ TensorStorageType::BufferUint8Ptr,
+ SamplerData(Format::Dim0_Dim1_Dim2, AddressModeX::None, AddressModeY::None, AddressModeZ::None),
+ Coordinates("x", "y", "z", "b"),
+ fp_2x3_tile
+ },
+ {
+ TileInfo(DataType::Fp16, 2, 4),
+ TensorStorageType::Texture2dReadOnly,
+ SamplerData(Format::Dim0_Dim1xDim2_1, AddressModeX::None, AddressModeY::ClampToBorderMaxOnly, AddressModeZ::None),
+ Coordinates("x", "y", "z", "b"),
+ half_2x4_yz_collapsed_y_clamped_to_border_max_only_image
+ },
+ {
+ TileInfo(DataType::Int32, 2, 4),
+ TensorStorageType::BufferUint8Ptr,
+ SamplerData(Format::Dim0_Dim1_Dim2, AddressModeX::None, AddressModeY::SkipLessThanZero, AddressModeZ::None),
+ Coordinates("x", "y", "z", "b"),
+ int_2x4_y_skip_less_than_zero
+ },
+ {
+ TileInfo(DataType::Uint16, 3, 8),
+ TensorStorageType::BufferUint8Ptr,
+ SamplerData(Format::Dim0_Dim1xDim2_1, AddressModeX::OverlappingMin, AddressModeY::SkipLessThanZero, AddressModeZ::None),
+ Coordinates("x", "y", "z", "0"),
+ uint16_3x8_yz_collapsed_b_eq_0_x_overlapping_min_y_skip_less_than_zero
+ }
+ };
+ }
+
+ bool run() override
+ {
+ bool all_tests_passed = true;
+ int32_t test_idx = 0;
+
+ for(auto _config: _configs)
+ {
+ KernelWriterInterceptor<CLKernelWriter> writer;
+
+ const TileInfo tile_info = std::get<0>(_config);
+ const Storage storage = std::get<1>(_config);
+ const SamplerData sampler_data = std::get<2>(_config);
+ const Coordinates coord = std::get<3>(_config);
+ const std::string expected_code = std::get<4>(_config).substr(1); // ignore initial newline, which was added for convenience
+
+ TileOperand tile_op = writer.declare_tile("tile", TileInfo(tile_info.data_type(), tile_info.height(), tile_info.width()));
+ TileOperand indirect_addr_op = writer.declare_tile("indirect_addr", TileInfo(DataType::Int32, tile_info.height(), 1)); // (M0, 1)
+ TileOperand x_op = writer.declare_tile(coord.x, TileInfo(DataType::Int32));
+ TileOperand z_op = writer.declare_tile(coord.z, TileInfo(DataType::Int32));
+ TileOperand batch_op = writer.declare_tile(coord.batch, TileInfo(DataType::Int32));
+
+ TensorShape tensor_shape {10, 10, 10, 10};
+ TensorInfo tensor_info(tile_info.data_type(), tensor_shape, TensorDataLayout::Nhwc, 0 /* id */);
+ TensorOperand tensor_op = writer.declare_tensor_argument("tensor", tensor_info);
+ TensorSampler sampler(storage, sampler_data.format, sampler_data.mode_x, sampler_data.mode_y, sampler_data.mode_z);
+
+ writer.start_capture_code();
+ writer.op_load_indirect(tile_op, tensor_op, sampler, x_op, indirect_addr_op, z_op, batch_op);
+
+ VALIDATE_TEST(writer.check_added_code(expected_code), all_tests_passed, test_idx++);
+ }
+
+ return all_tests_passed;
+ }
+
+ std::string name() override
+ {
+ return "CLKernelWriterOpLoadIndirectTest";
+ }
+
+private:
+ std::vector<CLKernelWriterOpLoadIndirectConfig> _configs {};
+};
+
+} // namespace ckw
+
+#endif // CKW_VALIDATION_TESTS_CLKERNELWRITEROPLOADINDIRECTTEST_H
diff --git a/compute_kernel_writer/validation/tests/CLKernelWriterOpLoadStoreTest.h b/compute_kernel_writer/validation/tests/CLKernelWriterOpLoadStoreTest.h
index 5702f19ce5..870e80ee9a 100644
--- a/compute_kernel_writer/validation/tests/CLKernelWriterOpLoadStoreTest.h
+++ b/compute_kernel_writer/validation/tests/CLKernelWriterOpLoadStoreTest.h
@@ -93,16 +93,16 @@ public:
{
// Cases
const std::string load_fp_2x3_tile = R"_(
-G0__tile__0 = vload3(0, (__global float*)(G0__tensor_ptr + (G0__x) * sizeof(float) + (G0__y + 0 * ((int)(1))) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (G0__b) * G0__tensor_stride3));
-G0__tile__1 = vload3(0, (__global float*)(G0__tensor_ptr + (G0__x) * sizeof(float) + (G0__y + 1 * ((int)(1))) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (G0__b) * G0__tensor_stride3));
+G0__tile__0 = vload3(0, (__global float*)(G0__tensor_ptr + (G0__x) * sizeof(float) + (G0__y + 0) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (G0__b) * G0__tensor_stride3));
+G0__tile__1 = vload3(0, (__global float*)(G0__tensor_ptr + (G0__x) * sizeof(float) + (G0__y + 1) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (G0__b) * G0__tensor_stride3));
)_";
const std::string load_half_2x4_tile_image_clamp_y = R"_(
-G0__tile__0 = read_imageh(G0__tensor_img2d, CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST, (int2)((G0__x) >> 2, (G0__y + 0 * ((int)(1)) + (G0__z) * G0__tensor_dim1 + (G0__b) * G0__tensor_dim1 * G0__tensor_dim2)));
-G0__tile__1 = read_imageh(G0__tensor_img2d, CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST, (int2)((G0__x) >> 2, (G0__y + 1 * ((int)(1)) + (G0__z) * G0__tensor_dim1 + (G0__b) * G0__tensor_dim1 * G0__tensor_dim2)));
+G0__tile__0 = read_imageh(G0__tensor_img2d, CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST, (int2)((G0__x) >> 2, (G0__y + 0 + (G0__z) * G0__tensor_dim1 + (G0__b) * G0__tensor_dim1 * G0__tensor_dim2)));
+G0__tile__1 = read_imageh(G0__tensor_img2d, CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST, (int2)((G0__x) >> 2, (G0__y + 1 + (G0__z) * G0__tensor_dim1 + (G0__b) * G0__tensor_dim1 * G0__tensor_dim2)));
)_";
const std::string store_fp_2x3_tile = R"_(
-vstore3(G0__tile__0, 0, (__global float*)(G0__tensor_ptr + (G0__x) * sizeof(float) + (G0__y + 0 * ((int)(1))) * G0__tensor_stride1 + (G0__b) * G0__tensor_stride3));
-vstore3(G0__tile__1, 0, (__global float*)(G0__tensor_ptr + (G0__x) * sizeof(float) + (G0__y + 1 * ((int)(1))) * G0__tensor_stride1 + (G0__b) * G0__tensor_stride3));
+vstore3(G0__tile__0, 0, (__global float*)(G0__tensor_ptr + (G0__x) * sizeof(float) + (G0__y + 0) * G0__tensor_stride1 + (G0__b) * G0__tensor_stride3));
+vstore3(G0__tile__1, 0, (__global float*)(G0__tensor_ptr + (G0__x) * sizeof(float) + (G0__y + 1) * G0__tensor_stride1 + (G0__b) * G0__tensor_stride3));
)_";
const std::string store_int8_4x4_y_dilation_batch_eq_0 = R"_(
vstore4(G0__tile__0, 0, (__global char*)(G0__tensor_ptr + (((int)(1))) * sizeof(char) + (G0__y + 0 * G0__y_dilation) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (((int)(0))) * G0__tensor_stride3));
@@ -114,29 +114,29 @@ vstore4(G0__tile__3, 0, (__global char*)(G0__tensor_ptr + (((int)(1))) * sizeof(
const std::string load_fp_2x3_tile_x_overlapping_min_y_eq_0_batch_eq_1 = R"_(
if(G0__x > 0)
{
-G0__tile__0 = vload3(0, (__global float*)(G0__tensor_ptr + (G0__x) * sizeof(float) + (((int)(0)) + 0 * ((int)(1))) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (((int)(1))) * G0__tensor_stride3));
-G0__tile__1 = vload3(0, (__global float*)(G0__tensor_ptr + (G0__x) * sizeof(float) + (((int)(0)) + 1 * ((int)(1))) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (((int)(1))) * G0__tensor_stride3));
+G0__tile__0 = vload3(0, (__global float*)(G0__tensor_ptr + (G0__x) * sizeof(float) + (((int)(0)) + 0) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (((int)(1))) * G0__tensor_stride3));
+G0__tile__1 = vload3(0, (__global float*)(G0__tensor_ptr + (G0__x) * sizeof(float) + (((int)(0)) + 1) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (((int)(1))) * G0__tensor_stride3));
}
else
{
-G0__tile__0.s0 = *((__global float*)(G0__tensor_ptr + (G0__x + 0) * sizeof(float) + (((int)(0)) + 0 * ((int)(1))) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (((int)(1))) * G0__tensor_stride3));
-G0__tile__1.s0 = *((__global float*)(G0__tensor_ptr + (G0__x + 0) * sizeof(float) + (((int)(0)) + 1 * ((int)(1))) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (((int)(1))) * G0__tensor_stride3));
+G0__tile__0.s0 = *((__global float*)(G0__tensor_ptr + (G0__x + 0) * sizeof(float) + (((int)(0)) + 0) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (((int)(1))) * G0__tensor_stride3));
+G0__tile__1.s0 = *((__global float*)(G0__tensor_ptr + (G0__x + 0) * sizeof(float) + (((int)(0)) + 1) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (((int)(1))) * G0__tensor_stride3));
}
)_";
const std::string store_fp_2x3_tile_x_overlapping_min_y_clamp_to_border_max_only = R"_(
if(G0__x > 0)
{
-if(G0__y + 0 * ((int)(1)) < G0__tensor_dim1)
+if(G0__y + 0 < G0__tensor_dim1)
{
-vstore3(G0__tile__0, 0, (__global float*)(G0__tensor_ptr + (G0__x) * sizeof(float) + (G0__y + 0 * ((int)(1))) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (G0__b) * G0__tensor_stride3));
+vstore3(G0__tile__0, 0, (__global float*)(G0__tensor_ptr + (G0__x) * sizeof(float) + (G0__y + 0) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (G0__b) * G0__tensor_stride3));
}
else
{
G0__tile__0 = 0.0f;
}
-if(G0__y + 1 * ((int)(1)) < G0__tensor_dim1)
+if(G0__y + 1 < G0__tensor_dim1)
{
-vstore3(G0__tile__1, 0, (__global float*)(G0__tensor_ptr + (G0__x) * sizeof(float) + (G0__y + 1 * ((int)(1))) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (G0__b) * G0__tensor_stride3));
+vstore3(G0__tile__1, 0, (__global float*)(G0__tensor_ptr + (G0__x) * sizeof(float) + (G0__y + 1) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (G0__b) * G0__tensor_stride3));
}
else
{
@@ -145,17 +145,17 @@ G0__tile__1 = 0.0f;
}
else
{
-if(G0__y + 0 * ((int)(1)) < G0__tensor_dim1)
+if(G0__y + 0 < G0__tensor_dim1)
{
-*((__global float*)(G0__tensor_ptr + (G0__x + 0) * sizeof(float) + (G0__y + 0 * ((int)(1))) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (G0__b) * G0__tensor_stride3)) = G0__tile__0.s0;
+*((__global float*)(G0__tensor_ptr + (G0__x + 0) * sizeof(float) + (G0__y + 0) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (G0__b) * G0__tensor_stride3)) = G0__tile__0.s0;
}
else
{
G0__tile__0.s0 = 0.0f;
}
-if(G0__y + 1 * ((int)(1)) < G0__tensor_dim1)
+if(G0__y + 1 < G0__tensor_dim1)
{
-*((__global float*)(G0__tensor_ptr + (G0__x + 0) * sizeof(float) + (G0__y + 1 * ((int)(1))) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (G0__b) * G0__tensor_stride3)) = G0__tile__1.s0;
+*((__global float*)(G0__tensor_ptr + (G0__x + 0) * sizeof(float) + (G0__y + 1) * G0__tensor_stride1 + (G0__z) * G0__tensor_stride2 + (G0__b) * G0__tensor_stride3)) = G0__tile__1.s0;
}
else
{