aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2021-04-22 21:13:21 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2021-05-18 14:48:39 +0000
commit856f66e6c61b77d03f754cd0fa8439891f0e4aca (patch)
treef9379cd0853ac407109e54c3d53b385ceee066c2 /tests
parent37f4b2ef1ea225a90ccb563fcb2c08f8fb0fb5d5 (diff)
downloadComputeLibrary-856f66e6c61b77d03f754cd0fa8439891f0e4aca.tar.gz
Port CLGEMM to memory injecting interface
Moves the following kernels: - CLGEMMMatrixMultiplyKernel - CLGEMMMatrixMultiplyNativeKernel - CLGEMMMatrixMultipluReshapedKernel - CLGEMMMatrixMultiplyReshapedOnlyRHSKernel Moves the following functions - CLGEMM Introduces facilities to easy handling of auxiliary temporary buffers under then new run interface. Such are: - CLAuxTensorHandler: That allows wrapping of workspace buffers memory to CLBuffer objects - Ability to inject TensorInfo to allocator without transferring ownership. This reduce the copy overhead if needed. Resolves: COMPMID-4188 Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Change-Id: I7055435d831b05b749b26302082e4ac45f26dfb0 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5498 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/CL/Helper.h83
-rw-r--r--tests/validation/CL/GEMMLowpMatrixMultiplyReshaped.cpp16
-rw-r--r--tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp6
-rw-r--r--tests/validation/CL/GEMMMatrixMultiply.cpp23
-rw-r--r--tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp31
-rw-r--r--tests/validation/CL/GEMMMatrixMultiplyNative.cpp11
-rw-r--r--tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp27
-rw-r--r--tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp13
-rw-r--r--tests/validation/CL/GEMMReshapeLHSMatrix.cpp7
-rw-r--r--tests/validation/CL/GEMMReshapeRHSMatrix.cpp13
-rw-r--r--tests/validation/CL/UNIT/DynamicTensor.cpp3
-rw-r--r--tests/validation/CL/UNIT/WeightsRetention.cpp12
-rw-r--r--tests/validation/fixtures/GEMMFixture.h200
-rw-r--r--tests/validation/fixtures/GEMMLowpFixture.h50
-rw-r--r--tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h9
-rw-r--r--tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h9
16 files changed, 330 insertions, 183 deletions
diff --git a/tests/CL/Helper.h b/tests/CL/Helper.h
index 5153e98add..b99911e1e6 100644
--- a/tests/CL/Helper.h
+++ b/tests/CL/Helper.h
@@ -29,8 +29,11 @@
#include "arm_compute/runtime/CL/functions/CLFill.h"
#include "arm_compute/runtime/IFunction.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
+#include "src/runtime/gpu/cl/IClOperator.h"
+#include "src/runtime/gpu/cl/operators/ClFill.h"
#include "src/core/CL/ICLKernel.h"
+#include "support/Cast.h"
#include <memory>
@@ -38,6 +41,86 @@ namespace arm_compute
{
namespace test
{
+/** This template synthetizes a simple IOperator which runs the given kernel K */
+template <typename K>
+class CLSynthetizeOperator : public opencl::IClOperator
+{
+public:
+ /** Configure the kernel.
+ *
+ * @param[in] args Configuration arguments.
+ */
+ template <typename... Args>
+ void configure(Args &&... args)
+ {
+ auto k = std::make_unique<K>();
+ k->configure(CLKernelLibrary::get().get_compile_context(), std::forward<Args>(args)...);
+ _kernel = std::move(k);
+ }
+ /** Configure the kernel setting the GPU target as well
+ *
+ * @param[in] gpu_target GPUTarget to set
+ * @param[in] args Configuration arguments.
+ */
+ template <typename... Args>
+ void configure(GPUTarget gpu_target, Args &&... args)
+ {
+ auto k = std::make_unique<K>();
+ k->set_target(gpu_target);
+ k->configure(CLKernelLibrary::get().get_compile_context(), std::forward<Args>(args)...);
+ _kernel = std::move(k);
+ }
+ /** Validate input arguments
+ *
+ * @param[in] args Configuration arguments.
+ */
+ template <typename... Args>
+ static Status validate(Args &&... args)
+ {
+ return K::validate(std::forward<Args>(args)...);
+ }
+};
+
+/** As above but this also initializes to zero the input tensor */
+template <typename K, int bordersize>
+class CLSynthetizeOperatorInitOutputWithZeroAndWithZeroConstantBorder : public opencl::IClOperator
+{
+public:
+ /** Configure the kernel.
+ *
+ * @param[in] first First input argument.
+ * @param[in] second Second input argument.
+ * @param[in] args Rest of the configuration arguments.
+ */
+ template <typename T, typename... Args>
+ void configure(T first, T second, Args &&... args)
+ {
+ auto cctx = CLKernelLibrary::get().get_compile_context();
+ auto k = std::make_unique<K>();
+ k->set_target(CLScheduler::get().target());
+ k->configure(cctx, first, second, std::forward<Args>(args)...);
+ _kernel = std::move(k);
+ _border_handler.configure(cctx, first, BorderSize(bordersize), BorderMode::CONSTANT, PixelValue());
+ _fill.configure(cctx, second, PixelValue());
+ }
+
+ // Inherited method overridden:
+ void run(ITensorPack &tensors) override final
+ {
+ ARM_COMPUTE_ERROR_ON_MSG(!_kernel, "The CL kernel or function isn't configured");
+
+ ITensorPack fill_pack = { { ACL_SRC, tensors.get_tensor(TensorType::ACL_DST) } };
+ _fill.run(fill_pack);
+ CLScheduler::get().enqueue_op(_border_handler, tensors);
+ CLScheduler::get().enqueue_op(*_kernel, tensors);
+ }
+
+private:
+ opencl::ClFill _fill{}; /**< Kernel to initialize the tensor */
+ CLFillBorderKernel _border_handler{}; /**< Kernel to handle borders */
+ std::unique_ptr<ICLKernel> _kernel{}; /**< Kernel to run */
+};
+
/** This template synthetizes an ICLSimpleFunction which runs the given kernel K */
template <typename K>
class CLSynthetizeFunction : public ICLSimpleFunction
diff --git a/tests/validation/CL/GEMMLowpMatrixMultiplyReshaped.cpp b/tests/validation/CL/GEMMLowpMatrixMultiplyReshaped.cpp
index 4873a291ab..68a7d055ad 100644
--- a/tests/validation/CL/GEMMLowpMatrixMultiplyReshaped.cpp
+++ b/tests/validation/CL/GEMMLowpMatrixMultiplyReshaped.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,8 +24,8 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedKernel.h"
-#include "src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h"
-#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
+#include "src/core/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h"
+#include "src/core/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.h"
#include "tests/CL/CLAccessor.h"
#include "tests/CL/Helper.h"
#include "tests/framework/Asserts.h"
@@ -42,13 +42,13 @@ namespace validation
{
using namespace arm_compute::misc::shape_calculator;
-// Create function for CLGEMMReshapeLHSMatrixKernel
-using CLGEMMReshapeLHSMatrix = CLSynthetizeFunction<CLGEMMReshapeLHSMatrixKernel>;
+// Create function for ClGemmReshapeLhsMatrixKernel
+using CLGEMMReshapeLHSMatrix = CLSynthetizeOperator<opencl::kernels::ClGemmReshapeLhsMatrixKernel>;
-// Create function for CLGEMMReshapeRHSMatrixKernel
-using CLGEMMReshapeRHSMatrix = CLSynthetizeFunction<CLGEMMReshapeRHSMatrixKernel>;
+// Create function for ClGemmReshapeRhsMatrixKernel
+using CLGEMMReshapeRHSMatrix = CLSynthetizeOperator<opencl::kernels::ClGemmReshapeRhsMatrixKernel>;
-// Create function for CLGEMMMatrixMultiplyReshapedKernel
+// Create function for CLGEMMLowpMatrixMultiplyReshapedKernel
using CLGEMMLowpMatrixMultiplyReshaped = CLSynthetizeFunction<CLGEMMLowpMatrixMultiplyReshapedKernel>;
// Fixture for CLGEMMLowpMatrixMultiplyReshaped
diff --git a/tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp b/tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp
index fa256280ca..43b86b51e8 100644
--- a/tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp
+++ b/tests/validation/CL/GEMMLowpMatrixMultiplyReshapedOnlyRHS.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,7 +26,7 @@
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/CLTensorAllocator.h"
#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
-#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
+#include "src/core/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.h"
#include "tests/CL/CLAccessor.h"
#include "tests/CL/Helper.h"
#include "tests/PaddingCalculator.h"
@@ -46,7 +46,7 @@ namespace validation
using namespace arm_compute::misc::shape_calculator;
// Create function for CLGEMMReshapeRHSMatrixKernel
-using CLGEMMReshapeRHSMatrix = CLSynthetizeFunction<CLGEMMReshapeRHSMatrixKernel>;
+using CLGEMMReshapeRHSMatrix = CLSynthetizeOperator<opencl::kernels::ClGemmReshapeRhsMatrixKernel>;
// Create function for CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel
using CLGEMMLowpMatrixMultiplyReshapedOnlyRHS = CLSynthetizeFunction<CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel>;
diff --git a/tests/validation/CL/GEMMMatrixMultiply.cpp b/tests/validation/CL/GEMMMatrixMultiply.cpp
index fdf7f503ec..21e085087d 100644
--- a/tests/validation/CL/GEMMMatrixMultiply.cpp
+++ b/tests/validation/CL/GEMMMatrixMultiply.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,7 +26,7 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
+#include "src/core/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.h"
#include "tests/CL/CLAccessor.h"
#include "tests/CL/Helper.h"
#include "tests/PaddingCalculator.h"
@@ -44,9 +44,10 @@ namespace test
namespace validation
{
using namespace arm_compute::misc::shape_calculator;
+using namespace arm_compute::opencl::kernels;
// Create function for CLGEMMMatrixMultiplyKernel
-using CLGEMMMatrixMultiplyNative = CLSynthetizeFunction<CLGEMMMatrixMultiplyKernel>;
+using CLGEMMMatrixMultiplyNative = CLSynthetizeOperator<ClGemmMatrixMultiplyKernel>;
// Fixture for GEMMMatrixMultiplyValidationFixture
template <typename T>
@@ -140,7 +141,7 @@ TEST_CASE(Negative, framework::DatasetMode::ALL)
const bool is_interleaved_transposed = false;
const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(12, 14, 13, 1, 1, 0, false, false);
const GPUTarget gpu_target = GPUTarget::MIDGARD;
- const auto status = CLGEMMMatrixMultiplyKernel::validate(&lhs, &rhs, nullptr, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target);
+ const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, nullptr, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target);
ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
}
@@ -154,7 +155,7 @@ TEST_CASE(Negative, framework::DatasetMode::ALL)
const bool is_interleaved_transposed = false;
const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(12, 14, 13, 1, 1, 0, false, false);
const GPUTarget gpu_target = GPUTarget::MIDGARD;
- const auto status = CLGEMMMatrixMultiplyKernel::validate(&lhs, &rhs, nullptr, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target);
+ const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, nullptr, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target);
ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
}
@@ -169,7 +170,7 @@ TEST_CASE(Negative, framework::DatasetMode::ALL)
const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(12, 14, 13, 1, 1, 0, false, false);
const GPUTarget gpu_target = GPUTarget::MIDGARD;
const bool fp_mixed_precision = true;
- const auto status = CLGEMMMatrixMultiplyKernel::validate(&lhs, &rhs, nullptr, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision);
+ const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, nullptr, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision);
ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
}
@@ -183,7 +184,7 @@ TEST_CASE(Negative, framework::DatasetMode::ALL)
const bool is_interleaved_transposed = false;
const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(12, 14, 13, 1, 1, 0, false, false);
const GPUTarget gpu_target = GPUTarget::MIDGARD;
- const auto status = CLGEMMMatrixMultiplyKernel::validate(&lhs, &rhs, nullptr, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target);
+ const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, nullptr, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target);
ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
}
@@ -197,7 +198,7 @@ TEST_CASE(Negative, framework::DatasetMode::ALL)
const bool is_interleaved_transposed = false;
const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(12, 14, 13, 1, 1, 0, false, false);
const GPUTarget gpu_target = GPUTarget::MIDGARD;
- const auto status = CLGEMMMatrixMultiplyKernel::validate(&lhs, &rhs, nullptr, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target);
+ const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, nullptr, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target);
ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
}
@@ -214,7 +215,7 @@ TEST_CASE(Negative, framework::DatasetMode::ALL)
const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(12, 14, 13, 1, 1, 0, false, true);
const GPUTarget gpu_target = GPUTarget::MIDGARD;
const bool fp_mixed_precision = false;
- const auto status = CLGEMMMatrixMultiplyKernel::validate(&lhs, &rhs, &bias, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision);
+ const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, &bias, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision);
ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
}
@@ -231,7 +232,7 @@ TEST_CASE(Negative, framework::DatasetMode::ALL)
const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(12, 14, 13, 1, 1, 0, false, false);
const GPUTarget gpu_target = GPUTarget::MIDGARD;
const bool fp_mixed_precision = false;
- const auto status = CLGEMMMatrixMultiplyKernel::validate(&lhs, &rhs, &bias, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision);
+ const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, &bias, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision);
ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
}
@@ -246,7 +247,7 @@ TEST_CASE(Negative, framework::DatasetMode::ALL)
const bool is_interleaved_transposed = false;
const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(12, 14, 13, 1, 1, 0, false, false);
const GPUTarget gpu_target = GPUTarget::MIDGARD;
- const auto status = CLGEMMMatrixMultiplyKernel::validate(&lhs, &rhs, nullptr, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target);
+ const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, nullptr, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target);
ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
}
}
diff --git a/tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp b/tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp
index d6507a06c4..e47518ad7d 100644
--- a/tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp
+++ b/tests/validation/CL/GEMMMatrixMultiplyInterleavedTransposed.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,9 +26,9 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
-#include "src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h"
-#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
+#include "src/core/gpu/cl/kernels/ClGemmMatrixMultiplyKernel.h"
+#include "src/core/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h"
+#include "src/core/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.h"
#include "tests/CL/CLAccessor.h"
#include "tests/CL/Helper.h"
#include "tests/PaddingCalculator.h"
@@ -46,15 +46,16 @@ namespace test
namespace validation
{
using namespace arm_compute::misc::shape_calculator;
+using namespace arm_compute::opencl::kernels;
-// Create function for CLGEMMReshapeLHSMatrixKernel
-using CLGEMMReshapeLHSMatrix = CLSynthetizeFunction<CLGEMMReshapeLHSMatrixKernel>;
+// Create function for ClGemmReshapeLhsMatrixKernel
+using CLGEMMReshapeLHSMatrix = CLSynthetizeOperator<ClGemmReshapeLhsMatrixKernel>;
-// Create function for CLGEMMReshapeRHSMatrixKernel
-using CLGEMMReshapeRHSMatrix = CLSynthetizeFunction<CLGEMMReshapeRHSMatrixKernel>;
+// Create function for ClGemmReshapeRhsMatrixKernel
+using CLGEMMReshapeRHSMatrix = CLSynthetizeOperator<ClGemmReshapeRhsMatrixKernel>;
-// Create function for CLGEMMMatrixMultiplyKernel
-using CLGEMMMatrixMultiplyReshaped = CLSynthetizeFunction<CLGEMMMatrixMultiplyKernel>;
+// Create function for ClGemmMatrixMultiplyKernel
+using CLGEMMMatrixMultiplyReshaped = CLSynthetizeOperator<ClGemmMatrixMultiplyKernel>;
// Fixture for GEMMMatrixMultiplyInterleavedTransposedValidationFixture
template <typename T>
@@ -166,7 +167,7 @@ TEST_CASE(Negative, framework::DatasetMode::ALL)
const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(16, 24, 13, 2, 4, 0, false, false);
const GPUTarget gpu_target = GPUTarget::MIDGARD;
const bool fp_mixed_precision = false;
- const auto status = CLGEMMMatrixMultiplyKernel::validate(&lhs, &rhs, &bias, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision);
+ const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, &bias, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision);
ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
}
@@ -183,7 +184,7 @@ TEST_CASE(Negative, framework::DatasetMode::ALL)
const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(16, 24, 13, 2, 4, 0, false, false);
const GPUTarget gpu_target = GPUTarget::MIDGARD;
const bool fp_mixed_precision = false;
- const auto status = CLGEMMMatrixMultiplyKernel::validate(&lhs, &rhs, &bias, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision);
+ const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, &bias, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision);
ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
}
@@ -200,7 +201,7 @@ TEST_CASE(Negative, framework::DatasetMode::ALL)
const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(16, 24, 13, 2, 4, 0, false, true);
const GPUTarget gpu_target = GPUTarget::MIDGARD;
const bool fp_mixed_precision = false;
- const auto status = CLGEMMMatrixMultiplyKernel::validate(&lhs, &rhs, &bias, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision);
+ const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, &bias, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision);
ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
}
@@ -217,7 +218,7 @@ TEST_CASE(Negative, framework::DatasetMode::ALL)
const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(16, 24, 13, 2, 4, 0, false, false);
const GPUTarget gpu_target = GPUTarget::MIDGARD;
const bool fp_mixed_precision = false;
- const auto status = CLGEMMMatrixMultiplyKernel::validate(&lhs, &rhs, &bias, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision);
+ const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, &bias, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision);
ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
}
@@ -234,7 +235,7 @@ TEST_CASE(Negative, framework::DatasetMode::ALL)
const GEMMReshapeInfo reshape_info = GEMMReshapeInfo(16, 24, 13, 2, 4, 0, false, false);
const GPUTarget gpu_target = GPUTarget::MIDGARD;
const bool fp_mixed_precision = false;
- const auto status = CLGEMMMatrixMultiplyKernel::validate(&lhs, &rhs, &bias, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision);
+ const auto status = ClGemmMatrixMultiplyKernel::validate(&lhs, &rhs, &bias, &out, alpha, beta, is_interleaved_transposed, reshape_info, gpu_target, fp_mixed_precision);
ARM_COMPUTE_EXPECT(bool(status) == false, framework::LogLevel::ERRORS);
}
}
diff --git a/tests/validation/CL/GEMMMatrixMultiplyNative.cpp b/tests/validation/CL/GEMMMatrixMultiplyNative.cpp
index ec6b87fbae..a737c687c4 100644
--- a/tests/validation/CL/GEMMMatrixMultiplyNative.cpp
+++ b/tests/validation/CL/GEMMMatrixMultiplyNative.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,7 +26,7 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "src/core/CL/kernels/CLGEMMMatrixMultiplyNativeKernel.h"
+#include "src/core/gpu/cl/kernels/ClGemmMatrixMultiplyNativeKernel.h"
#include "tests/CL/CLAccessor.h"
#include "tests/CL/Helper.h"
#include "tests/PaddingCalculator.h"
@@ -44,9 +44,10 @@ namespace test
namespace validation
{
using namespace arm_compute::misc::shape_calculator;
+using namespace arm_compute::opencl::kernels;
-// Create function for CLGEMMMatrixMultiplyNativeKernel
-using CLGEMMMatrixMultiplyNative = CLSynthetizeFunction<CLGEMMMatrixMultiplyNativeKernel>;
+// Create function for ClGemmMatrixMultiplyNativeKernel
+using CLGEMMMatrixMultiplyNative = CLSynthetizeOperator<ClGemmMatrixMultiplyNativeKernel>;
// Fixture for CLGEMMMatrixMultiplyNative
template <typename T>
@@ -184,7 +185,7 @@ void validate_configuration(unsigned int m_value, unsigned int n_value, unsigned
// Create and configure function
CLGEMMMatrixMultiplyNative gemm;
- gemm.configure(&lhs, &rhs, &bias, &dst, 1.0f, 1.0f, lhs_info, rhs_info, kernel_info);
+ gemm.configure(lhs.info(), rhs.info(), bias.info(), dst.info(), 1.0f, 1.0f, lhs_info, rhs_info, kernel_info);
}
} // namespace
diff --git a/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp b/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp
index 52afb716e4..6f368a9650 100644
--- a/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp
+++ b/tests/validation/CL/GEMMMatrixMultiplyReshaped.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,9 +26,9 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h"
-#include "src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h"
-#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
+#include "src/core/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedKernel.h"
+#include "src/core/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h"
+#include "src/core/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.h"
#include "tests/CL/CLAccessor.h"
#include "tests/CL/Helper.h"
#include "tests/PaddingCalculator.h"
@@ -46,15 +46,16 @@ namespace test
namespace validation
{
using namespace arm_compute::misc::shape_calculator;
+using namespace arm_compute::opencl::kernels;
-// Create function for CLGEMMReshapeLHSMatrixKernel
-using CLGEMMReshapeLHSMatrix = CLSynthetizeFunction<CLGEMMReshapeLHSMatrixKernel>;
+// Create function for ClGemmReshapeLhsMatrixKernel
+using CLGEMMReshapeLHSMatrix = CLSynthetizeOperator<ClGemmReshapeLhsMatrixKernel>;
-// Create function for CLGEMMReshapeRHSMatrixKernel
-using CLGEMMReshapeRHSMatrix = CLSynthetizeFunction<CLGEMMReshapeRHSMatrixKernel>;
+// Create function for ClGemmReshapeRhsMatrixKernel
+using CLGEMMReshapeRHSMatrix = CLSynthetizeOperator<ClGemmReshapeRhsMatrixKernel>;
-// Create function for CLGEMMMatrixMultiplyReshapedKernel
-using CLGEMMMatrixMultiplyReshaped = CLSynthetizeFunction<CLGEMMMatrixMultiplyReshapedKernel>;
+// Create function for ClGemmMatrixMultiplyReshapedKernel
+using CLGEMMMatrixMultiplyReshaped = CLSynthetizeOperator<ClGemmMatrixMultiplyReshapedKernel>;
// Fixture for CLGEMMMatrixMultiplyReshaped
template <typename T>
@@ -327,7 +328,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi
framework::dataset::make("Expected", { true, true, false, false, false, true, true,true})),
input0_info ,input1_info, input2_info, output_info, lhs_info, rhs_info, gemm_info, expected)
{
- ARM_COMPUTE_EXPECT(bool(CLGEMMMatrixMultiplyReshapedKernel::validate(&input0_info.clone()->set_is_resizable(true),
+ ARM_COMPUTE_EXPECT(bool(ClGemmMatrixMultiplyReshapedKernel::validate(&input0_info.clone()->set_is_resizable(true),
&input1_info.clone()->set_is_resizable(true),
&input2_info.clone()->set_is_resizable(true),
&output_info.clone()->set_is_resizable(true),1.f,1.f,
@@ -562,7 +563,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi
false})),
input0_info ,input1_info, input2_info, output_info, lhs_info, rhs_info, gemm_info, expected)
{
- ARM_COMPUTE_EXPECT(bool(CLGEMMMatrixMultiplyReshapedKernel::validate(&input0_info.clone()->set_is_resizable(true),
+ ARM_COMPUTE_EXPECT(bool(ClGemmMatrixMultiplyReshapedKernel::validate(&input0_info.clone()->set_is_resizable(true),
&input1_info.clone()->set_is_resizable(true),
&input2_info.clone()->set_is_resizable(true),
&output_info.clone()->set_is_resizable(true),1.f,1.f,
@@ -933,7 +934,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi
false})),
input0_info ,input1_info, input2_info, output_info, lhs_info, rhs_info, gemm_info, expected)
{
- ARM_COMPUTE_EXPECT(bool(CLGEMMMatrixMultiplyReshapedKernel::validate(&input0_info.clone()->set_is_resizable(true),
+ ARM_COMPUTE_EXPECT(bool(ClGemmMatrixMultiplyReshapedKernel::validate(&input0_info.clone()->set_is_resizable(true),
&input1_info.clone()->set_is_resizable(true),
&input2_info.clone()->set_is_resizable(true),
&output_info.clone()->set_is_resizable(true),1.f,1.f,
diff --git a/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp b/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp
index ebcecb8b78..88e99bcfef 100644
--- a/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp
+++ b/tests/validation/CL/GEMMMatrixMultiplyReshapedOnlyRHS.cpp
@@ -26,8 +26,8 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h"
-#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
+#include "src/core/gpu/cl/kernels/ClGemmMatrixMultiplyReshapedOnlyRhsKernel.h"
+#include "src/core/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.h"
#include "tests/CL/CLAccessor.h"
#include "tests/CL/Helper.h"
#include "tests/PaddingCalculator.h"
@@ -45,12 +45,13 @@ namespace test
namespace validation
{
using namespace arm_compute::misc::shape_calculator;
+using namespace arm_compute::opencl::kernels;
-// Create function for CLGEMMReshapeRHSMatrixKernel
-using CLGEMMReshapeRHSMatrix = CLSynthetizeFunction<CLGEMMReshapeRHSMatrixKernel>;
+// Create function for ClGemmReshapeRhsMatrixKernel
+using CLGEMMReshapeRHSMatrix = CLSynthetizeOperator<ClGemmReshapeRhsMatrixKernel>;
-// Create function for CLGEMMMatrixMultiplyReshapedOnlyRHSKernel
-using CLGEMMMatrixMultiplyReshapedOnlyRHS = CLSynthetizeFunction<CLGEMMMatrixMultiplyReshapedOnlyRHSKernel>;
+// Create function for ClGemmMatrixMultiplyReshapedOnlyRhsKernel
+using CLGEMMMatrixMultiplyReshapedOnlyRHS = CLSynthetizeOperator<ClGemmMatrixMultiplyReshapedOnlyRhsKernel>;
// Fixture for CLGEMMMatrixMultiplyReshapedOnlyRHS
template <typename T>
diff --git a/tests/validation/CL/GEMMReshapeLHSMatrix.cpp b/tests/validation/CL/GEMMReshapeLHSMatrix.cpp
index 34c37dffde..f995608308 100644
--- a/tests/validation/CL/GEMMReshapeLHSMatrix.cpp
+++ b/tests/validation/CL/GEMMReshapeLHSMatrix.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,7 +25,7 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h"
+#include "src/core/gpu/cl/kernels/ClGemmReshapeLhsMatrixKernel.h"
#include "tests/CL/CLAccessor.h"
#include "tests/CL/Helper.h"
#include "tests/PaddingCalculator.h"
@@ -43,9 +43,10 @@ namespace test
namespace validation
{
using namespace arm_compute::misc::shape_calculator;
+using namespace arm_compute::opencl::kernels;
// Initialize the output tensor with zero and fill the border with zero
-using CLGEMMReshapeLHSMatrix = CLSynthetizeFunctionInitOutputWithZeroAndWithZeroConstantBorder<CLGEMMReshapeLHSMatrixKernel, 16>;
+using CLGEMMReshapeLHSMatrix = CLSynthetizeOperatorInitOutputWithZeroAndWithZeroConstantBorder<ClGemmReshapeLhsMatrixKernel, 16>;
template <typename T>
using CLGEMMReshapeLHSMatrixFixture = GEMMReshapeLHSMatrixValidationFixture<CLTensor, CLAccessor, CLGEMMReshapeLHSMatrix, T, false>;
diff --git a/tests/validation/CL/GEMMReshapeRHSMatrix.cpp b/tests/validation/CL/GEMMReshapeRHSMatrix.cpp
index 14048e81ec..ff1240ea2e 100644
--- a/tests/validation/CL/GEMMReshapeRHSMatrix.cpp
+++ b/tests/validation/CL/GEMMReshapeRHSMatrix.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,7 +25,7 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
+#include "src/core/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.h"
#include "tests/CL/CLAccessor.h"
#include "tests/CL/Helper.h"
#include "tests/PaddingCalculator.h"
@@ -73,9 +73,10 @@ const auto i_values = framework::dataset::make("interleave", { true, false });
} // namespace
using namespace arm_compute::misc::shape_calculator;
+using namespace arm_compute::opencl::kernels;
// Initialize the output tensor with zero and fill the border with zero
-using CLGEMMReshapeRHSMatrix = CLSynthetizeFunctionInitOutputWithZeroAndWithZeroConstantBorder<CLGEMMReshapeRHSMatrixKernel, 16>;
+using CLGEMMReshapeRHSMatrix = CLSynthetizeOperatorInitOutputWithZeroAndWithZeroConstantBorder<ClGemmReshapeRhsMatrixKernel, 16>;
template <typename T>
using CLGEMMReshapeRHSMatrixFixture = GEMMReshapeRHSMatrixValidationFixture<CLTensor, CLAccessor, CLGEMMReshapeRHSMatrix, T>;
@@ -117,7 +118,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
rhs_info.transpose = true;
rhs_info.interleave = true;
- bool has_error = bool(CLGEMMReshapeRHSMatrixKernel::validate(&input_info.clone()->set_is_resizable(false), (output_info.total_size() == 0) ? nullptr : &output_info.clone()->set_is_resizable(false), rhs_info));
+ bool has_error = bool(ClGemmReshapeRhsMatrixKernel::validate(&input_info.clone()->set_is_resizable(false), (output_info.total_size() == 0) ? nullptr : &output_info.clone()->set_is_resizable(false), rhs_info));
ARM_COMPUTE_EXPECT(has_error == expected, framework::LogLevel::ERRORS);
}
@@ -158,9 +159,9 @@ DATA_TEST_CASE(ValidatePadding, framework::DatasetMode::ALL, combine(combine(com
padding = round_up_width - output_shape[0];
}
- CLGEMMReshapeRHSMatrixKernel kernel;
+ ClGemmReshapeRhsMatrixKernel kernel;
- kernel.configure(&input, &output, rhs_info);
+ kernel.configure(CLKernelLibrary::get().get_compile_context(), input.info(), output.info(), rhs_info);
ARM_COMPUTE_EXPECT((output.info()->padding().right == padding), framework::LogLevel::ERRORS);
}
diff --git a/tests/validation/CL/UNIT/DynamicTensor.cpp b/tests/validation/CL/UNIT/DynamicTensor.cpp
index 833256039e..ad2d4892ba 100644
--- a/tests/validation/CL/UNIT/DynamicTensor.cpp
+++ b/tests/validation/CL/UNIT/DynamicTensor.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,7 +29,6 @@
#include "arm_compute/runtime/MemoryManagerOnDemand.h"
#include "arm_compute/runtime/PoolManager.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
#include "src/core/CL/kernels/CLIm2ColKernel.h"
#include "src/core/CL/kernels/CLL2NormalizeLayerKernel.h"
#include "src/core/CL/kernels/CLReductionOperationKernel.h"
diff --git a/tests/validation/CL/UNIT/WeightsRetention.cpp b/tests/validation/CL/UNIT/WeightsRetention.cpp
index acf795e48b..1965f0f7a5 100644
--- a/tests/validation/CL/UNIT/WeightsRetention.cpp
+++ b/tests/validation/CL/UNIT/WeightsRetention.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,16 +24,6 @@
#include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
-#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
-#include "src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h"
-#include "src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h"
-#include "src/core/CL/kernels/CLGEMMLowpReductionKernel.h"
-#include "src/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
-#include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedKernel.h"
-#include "src/core/CL/kernels/CLGEMMMatrixMultiplyReshapedOnlyRHSKernel.h"
-#include "src/core/CL/kernels/CLGEMMReshapeLHSMatrixKernel.h"
-#include "src/core/CL/kernels/CLGEMMReshapeRHSMatrixKernel.h"
#include "tests/AssetsLibrary.h"
#include "tests/CL/CLAccessor.h"
#include "tests/Globals.h"
diff --git a/tests/validation/fixtures/GEMMFixture.h b/tests/validation/fixtures/GEMMFixture.h
index 9ad27c782e..c118da66ae 100644
--- a/tests/validation/fixtures/GEMMFixture.h
+++ b/tests/validation/fixtures/GEMMFixture.h
@@ -175,7 +175,7 @@ protected:
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename T, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename T, typename GEMMOperatorType>
class GEMMMatrixMultiplyValidationFixture : public framework::Fixture
{
public:
@@ -226,8 +226,8 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- GEMMFunctionType gemm;
- gemm.configure(gpu_arch, &lhs, &rhs, &bias, &dst, alpha, beta, false, reshape_info, fp16_mixed_precision, act_info);
+ GEMMOperatorType gemm;
+ gemm.configure(gpu_arch, lhs.info(), rhs.info(), bias.info(), dst.info(), alpha, beta, false, reshape_info, fp16_mixed_precision, act_info);
ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
@@ -252,7 +252,12 @@ protected:
fill(AccessorType(bias), 2);
// Compute GEMM
- gemm.run();
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs },
+ { ACL_SRC_1, &rhs },
+ { ACL_SRC_2, &bias },
+ { ACL_DST, &dst }
+ });
+ gemm.run(gemm_pack);
return dst;
}
@@ -294,7 +299,7 @@ protected:
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename T, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename T, typename GEMMOperatorType>
class GEMMMatrixMultiply3DValidationFixture : public framework::Fixture
{
public:
@@ -344,8 +349,8 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- GEMMFunctionType gemm;
- gemm.configure(gpu_arch, &lhs, &rhs, &bias, &dst, alpha, beta, false, reshape_info, fp16_mixed_precision, act_info);
+ GEMMOperatorType gemm;
+ gemm.configure(gpu_arch, lhs.info(), rhs.info(), bias.info(), dst.info(), alpha, beta, false, reshape_info, fp16_mixed_precision, act_info);
ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
@@ -370,7 +375,12 @@ protected:
fill(AccessorType(bias), 2);
// Compute GEMM
- gemm.run();
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs },
+ { ACL_SRC_1, &rhs },
+ { ACL_SRC_2, &bias },
+ { ACL_DST, &dst }
+ });
+ gemm.run(gemm_pack);
return dst;
}
@@ -411,7 +421,7 @@ protected:
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMOperatorType>
class GEMMMatrixMultiplyInterleavedTransposedValidationFixture : public framework::Fixture
{
public:
@@ -478,12 +488,12 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- ReshapeLHSFunctionType reshape_lhs;
- ReshapeRHSFunctionType reshape_rhs;
- GEMMFunctionType gemm;
- reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info);
- reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
- gemm.configure(gpu_arch, &lhs_reshaped, &rhs_reshaped, &bias, &dst, alpha, beta, true, reshape_info, fp16_mixed_precision, act_info);
+ ReshapeLHSOperatorType reshape_lhs;
+ ReshapeRHSOperatorType reshape_rhs;
+ GEMMOperatorType gemm;
+ reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info);
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
+ gemm.configure(gpu_arch, lhs_reshaped.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, true, reshape_info, fp16_mixed_precision, act_info);
ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
@@ -516,9 +526,16 @@ protected:
fill(AccessorType(bias), 2);
// Compute GEMM
- reshape_lhs.run();
- reshape_rhs.run();
- gemm.run();
+ ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } };
+ reshape_lhs.run(reshape_lhs_pack);
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs_reshaped },
+ { ACL_SRC_1, &rhs_reshaped },
+ { ACL_SRC_2, &bias },
+ { ACL_DST, &dst }
+ });
+ gemm.run(gemm_pack);
return dst;
}
@@ -560,7 +577,7 @@ protected:
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMOperatorType>
class GEMMMatrixMultiplyInterleavedTransposed3DValidationFixture : public framework::Fixture
{
public:
@@ -626,12 +643,12 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- ReshapeLHSFunctionType reshape_lhs;
- ReshapeRHSFunctionType reshape_rhs;
- GEMMFunctionType gemm;
- reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info);
- reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
- gemm.configure(gpu_arch, &lhs_reshaped, &rhs_reshaped, &bias, &dst, alpha, beta, true, reshape_info, fp16_mixed_precision, act_info);
+ ReshapeLHSOperatorType reshape_lhs;
+ ReshapeRHSOperatorType reshape_rhs;
+ GEMMOperatorType gemm;
+ reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info);
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
+ gemm.configure(gpu_arch, lhs_reshaped.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, true, reshape_info, fp16_mixed_precision, act_info);
ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
@@ -664,9 +681,16 @@ protected:
fill(AccessorType(bias), 2);
// Compute GEMM
- reshape_lhs.run();
- reshape_rhs.run();
- gemm.run();
+ ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } };
+ reshape_lhs.run(reshape_lhs_pack);
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs_reshaped },
+ { ACL_SRC_1, &rhs_reshaped },
+ { ACL_SRC_2, &bias },
+ { ACL_DST, &dst }
+ });
+ gemm.run(gemm_pack);
return dst;
}
@@ -707,7 +731,7 @@ protected:
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType, bool fp_mixed_precision = false>
+template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMOperatorType, bool fp_mixed_precision = false>
class GEMMMatrixMultiplyReshapedValidationFixture : public framework::Fixture
{
public:
@@ -786,9 +810,9 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- ReshapeLHSFunctionType reshape_lhs;
- ReshapeRHSFunctionType reshape_rhs;
- GEMMFunctionType gemm;
+ ReshapeLHSOperatorType reshape_lhs;
+ ReshapeRHSOperatorType reshape_rhs;
+ GEMMOperatorType gemm;
validate_result = bool(reshape_rhs.validate(rhs.info(), rhs_reshaped.info(), rhs_info));
validate_result = validate_result || !rhs_info.export_to_cl_image;
@@ -797,9 +821,9 @@ protected:
return nullptr;
}
- reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info);
- reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
- gemm.configure(&lhs_reshaped, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info);
+ reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info);
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
+ gemm.configure(lhs_reshaped.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info);
ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
@@ -832,9 +856,16 @@ protected:
fill(AccessorType(bias), 2);
// Compute GEMM
- reshape_lhs.run();
- reshape_rhs.run();
- gemm.run();
+ ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } };
+ reshape_lhs.run(reshape_lhs_pack);
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs_reshaped },
+ { ACL_SRC_1, &rhs_reshaped },
+ { ACL_SRC_2, &bias },
+ { ACL_DST, &dst }
+ });
+ gemm.run(gemm_pack);
return dst;
}
@@ -884,7 +915,7 @@ protected:
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType, bool fp_mixed_precision = false>
+template <typename TensorType, typename AccessorType, typename T, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMOperatorType, bool fp_mixed_precision = false>
class GEMMMatrixMultiplyReshaped3DValidationFixture : public framework::Fixture
{
public:
@@ -960,9 +991,9 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- ReshapeLHSFunctionType reshape_lhs;
- ReshapeRHSFunctionType reshape_rhs;
- GEMMFunctionType gemm;
+ ReshapeLHSOperatorType reshape_lhs;
+ ReshapeRHSOperatorType reshape_rhs;
+ GEMMOperatorType gemm;
validate_result = bool(reshape_rhs.validate(rhs.info(), rhs_reshaped.info(), rhs_info));
validate_result = validate_result || !rhs_info.export_to_cl_image;
@@ -971,9 +1002,9 @@ protected:
return nullptr;
}
- reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info);
- reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
- gemm.configure(&lhs_reshaped, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info);
+ reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info);
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
+ gemm.configure(lhs_reshaped.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info);
ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
@@ -1006,9 +1037,16 @@ protected:
fill(AccessorType(bias), 2);
// Compute GEMM
- reshape_lhs.run();
- reshape_rhs.run();
- gemm.run();
+ ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } };
+ reshape_lhs.run(reshape_lhs_pack);
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs_reshaped },
+ { ACL_SRC_1, &rhs_reshaped },
+ { ACL_SRC_2, &bias },
+ { ACL_DST, &dst }
+ });
+ gemm.run(gemm_pack);
return dst;
}
@@ -1057,7 +1095,7 @@ protected:
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename T, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename T, typename ReshapeRHSOperatorType, typename GEMMOperatorType>
class GEMMMatrixMultiplyReshapedOnlyRHSValidationFixture : public framework::Fixture
{
public:
@@ -1131,8 +1169,8 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- ReshapeRHSFunctionType reshape_rhs;
- GEMMFunctionType gemm;
+ ReshapeRHSOperatorType reshape_rhs;
+ GEMMOperatorType gemm;
validate_result = bool(reshape_rhs.validate(rhs.info(), rhs_reshaped.info(), rhs_info));
validate_result = validate_result || !rhs_info.export_to_cl_image;
@@ -1141,8 +1179,8 @@ protected:
return nullptr;
}
- reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
- gemm.configure(&lhs, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info);
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
+ gemm.configure(lhs.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info);
ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
@@ -1173,8 +1211,14 @@ protected:
fill(AccessorType(bias), 2);
// Compute GEMM
- reshape_rhs.run();
- gemm.run();
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs },
+ { ACL_SRC_1, &rhs_reshaped },
+ { ACL_SRC_2, &bias },
+ { ACL_DST, &dst }
+ });
+ gemm.run(gemm_pack);
return dst;
}
@@ -1217,7 +1261,7 @@ protected:
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename T, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename T, typename ReshapeRHSOperatorType, typename GEMMOperatorType>
class GEMMMatrixMultiplyReshapedOnlyRHS3DValidationFixture : public framework::Fixture
{
public:
@@ -1289,8 +1333,8 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- ReshapeRHSFunctionType reshape_rhs;
- GEMMFunctionType gemm;
+ ReshapeRHSOperatorType reshape_rhs;
+ GEMMOperatorType gemm;
validate_result = bool(reshape_rhs.validate(rhs.info(), rhs_reshaped.info(), rhs_info));
validate_result = validate_result || !rhs_info.export_to_cl_image;
@@ -1299,8 +1343,8 @@ protected:
return nullptr;
}
- reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
- gemm.configure(&lhs, &rhs_reshaped, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info);
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
+ gemm.configure(lhs.info(), rhs_reshaped.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info);
if(has_pad_y)
{
@@ -1338,8 +1382,14 @@ protected:
fill(AccessorType(bias), 2);
// Compute GEMM
- reshape_rhs.run();
- gemm.run();
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs },
+ { ACL_SRC_1, &rhs_reshaped },
+ { ACL_SRC_2, &bias },
+ { ACL_DST, &dst }
+ });
+ gemm.run(gemm_pack);
return dst;
}
@@ -1381,7 +1431,7 @@ protected:
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename T, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename T, typename GEMMOperatorType>
class GEMMMatrixMultiplyNativeValidationFixture : public framework::Fixture
{
public:
@@ -1445,8 +1495,8 @@ protected:
kernel_info.activation_info = act_info;
// Create and configure function
- GEMMFunctionType gemm;
- gemm.configure(&lhs, &rhs, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info);
+ GEMMOperatorType gemm;
+ gemm.configure(lhs.info(), rhs.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info);
ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
@@ -1471,7 +1521,12 @@ protected:
fill(AccessorType(bias), 2);
// Compute GEMM
- gemm.run();
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs },
+ { ACL_SRC_1, &rhs },
+ { ACL_SRC_2, &bias },
+ { ACL_DST, &dst }
+ });
+ gemm.run(gemm_pack);
return dst;
}
@@ -1513,7 +1568,7 @@ protected:
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename T, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename T, typename GEMMOperatorType>
class GEMMMatrixMultiplyNative3DValidationFixture : public framework::Fixture
{
public:
@@ -1576,8 +1631,8 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- GEMMFunctionType gemm;
- gemm.configure(&lhs, &rhs, &bias, &dst, alpha, beta, lhs_info, rhs_info, kernel_info);
+ GEMMOperatorType gemm;
+ gemm.configure(lhs.info(), rhs.info(), bias.info(), dst.info(), alpha, beta, lhs_info, rhs_info, kernel_info);
ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
ARM_COMPUTE_ASSERT(rhs.info()->is_resizable());
@@ -1602,7 +1657,12 @@ protected:
fill(AccessorType(bias), 2);
// Compute GEMM
- gemm.run();
+ ITensorPack gemm_pack({ { ACL_SRC_0, &lhs },
+ { ACL_SRC_1, &rhs },
+ { ACL_SRC_2, &bias },
+ { ACL_DST, &dst }
+ });
+ gemm.run(gemm_pack);
return dst;
}
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h
index d7fe96cd3d..5cf210bab4 100644
--- a/tests/validation/fixtures/GEMMLowpFixture.h
+++ b/tests/validation/fixtures/GEMMLowpFixture.h
@@ -869,7 +869,7 @@ protected:
SimpleTensor<int16_t> _reference{};
};
-template <typename TensorType, typename AccessorType, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType>
class GEMMLowpMatrixMultiplyReshapedValidationFixture : public framework::Fixture
{
public:
@@ -939,11 +939,11 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- ReshapeLHSFunctionType reshape_lhs;
- ReshapeRHSFunctionType reshape_rhs;
+ ReshapeLHSOperatorType reshape_lhs;
+ ReshapeRHSOperatorType reshape_rhs;
GEMMFunctionType gemm;
- reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info);
- reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
+ reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info);
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
gemm.configure(&lhs_reshaped, &rhs_reshaped, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K));
ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
@@ -969,8 +969,10 @@ protected:
fill(AccessorType(rhs), 1);
// Compute GEMM
- reshape_lhs.run();
- reshape_rhs.run();
+ ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } };
+ reshape_lhs.run(reshape_lhs_pack);
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
gemm.run();
return dst;
@@ -1017,7 +1019,7 @@ protected:
SimpleTensor<int32_t> _reference{};
};
-template <typename TensorType, typename AccessorType, typename ReshapeLHSFunctionType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename ReshapeLHSOperatorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType>
class GEMMLowpMatrixMultiplyReshaped3DValidationFixture : public framework::Fixture
{
public:
@@ -1091,11 +1093,11 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- ReshapeLHSFunctionType reshape_lhs;
- ReshapeRHSFunctionType reshape_rhs;
+ ReshapeLHSOperatorType reshape_lhs;
+ ReshapeRHSOperatorType reshape_rhs;
GEMMFunctionType gemm;
- reshape_lhs.configure(&lhs, &lhs_reshaped, lhs_info);
- reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
+ reshape_lhs.configure(lhs.info(), lhs_reshaped.info(), lhs_info);
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
gemm.configure(&lhs_reshaped, &rhs_reshaped, &dst, lhs_info, rhs_info, GEMMReshapeInfo(M, N, K, 1, 1, m_h));
ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
@@ -1121,8 +1123,10 @@ protected:
fill(AccessorType(rhs), 1);
// Compute GEMM
- reshape_lhs.run();
- reshape_rhs.run();
+ ITensorPack reshape_lhs_pack = { { ACL_SRC, &lhs }, { ACL_DST, &lhs_reshaped } };
+ reshape_lhs.run(reshape_lhs_pack);
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
gemm.run();
return dst;
@@ -1171,7 +1175,7 @@ protected:
SimpleTensor<int32_t> _reference{};
};
-template <typename TensorType, typename AccessorType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType>
class GEMMLowpMatrixMultiplyReshapedOnlyRHSValidationFixture : public framework::Fixture
{
public:
@@ -1244,9 +1248,9 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- ReshapeRHSFunctionType reshape_rhs;
+ ReshapeRHSOperatorType reshape_rhs;
GEMMFunctionType gemm;
- reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
gemm.configure(&lhs, &rhs_reshaped, &dst, gemm_info);
ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
@@ -1270,7 +1274,8 @@ protected:
fill(AccessorType(rhs), 1);
// Compute GEMM
- reshape_rhs.run();
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
gemm.run();
return dst;
@@ -1312,7 +1317,7 @@ protected:
SimpleTensor<int32_t> _reference{};
};
-template <typename TensorType, typename AccessorType, typename ReshapeRHSFunctionType, typename GEMMFunctionType>
+template <typename TensorType, typename AccessorType, typename ReshapeRHSOperatorType, typename GEMMFunctionType>
class GEMMLowpMatrixMultiplyReshapedOnlyRHS3DValidationFixture : public framework::Fixture
{
public:
@@ -1389,9 +1394,9 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- ReshapeRHSFunctionType reshape_rhs;
+ ReshapeRHSOperatorType reshape_rhs;
GEMMFunctionType gemm;
- reshape_rhs.configure(&rhs, &rhs_reshaped, rhs_info);
+ reshape_rhs.configure(rhs.info(), rhs_reshaped.info(), rhs_info);
gemm.configure(&lhs, &rhs_reshaped, &dst, gemm_info);
ARM_COMPUTE_ASSERT(lhs.info()->is_resizable());
@@ -1415,7 +1420,8 @@ protected:
fill(AccessorType(rhs), 1);
// Compute GEMM
- reshape_rhs.run();
+ ITensorPack reshape_rhs_pack = { { ACL_SRC, &rhs }, { ACL_DST, &rhs_reshaped } };
+ reshape_rhs.run(reshape_rhs_pack);
gemm.run();
return dst;
diff --git a/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h b/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h
index 70bafcc143..a9d6c9b6aa 100644
--- a/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h
+++ b/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h
@@ -46,7 +46,7 @@ namespace validation
{
using namespace arm_compute::misc::shape_calculator;
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool reinterpret_input_as_3d = false>
+template <typename TensorType, typename AccessorType, typename OperatorType, typename T, bool reinterpret_input_as_3d = false>
class GEMMReshapeLHSMatrixValidationFixture : public framework::Fixture
{
public:
@@ -86,8 +86,8 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- FunctionType gemm_lhs_reshape;
- gemm_lhs_reshape.configure(&src, &dst, lhs_info, reinterpret_input_as_3d);
+ OperatorType gemm_lhs_reshape;
+ gemm_lhs_reshape.configure(src.info(), dst.info(), lhs_info, reinterpret_input_as_3d);
ARM_COMPUTE_ASSERT(src.info()->is_resizable());
@@ -104,7 +104,8 @@ protected:
fill(AccessorType(src));
// Compute GEMM LHS matrix reshape function
- gemm_lhs_reshape.run();
+ ITensorPack tensors = { { ACL_SRC, &src }, { ACL_DST, &dst } };
+ gemm_lhs_reshape.run(tensors);
return dst;
}
diff --git a/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h b/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h
index 1428adb3a7..cdb3ec3944 100644
--- a/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h
+++ b/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h
@@ -46,7 +46,7 @@ namespace validation
{
using namespace arm_compute::misc::shape_calculator;
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename OperatorType, typename T>
class GEMMReshapeRHSMatrixValidationFixture : public framework::Fixture
{
public:
@@ -85,8 +85,8 @@ protected:
// The output tensor will be auto-initialized within the function
// Create and configure function
- FunctionType gemm_rhs_reshape;
- gemm_rhs_reshape.configure(&src, &dst, rhs_info);
+ OperatorType gemm_rhs_reshape;
+ gemm_rhs_reshape.configure(src.info(), dst.info(), rhs_info);
ARM_COMPUTE_ASSERT(src.info()->is_resizable());
@@ -103,7 +103,8 @@ protected:
fill(AccessorType(src));
// Compute GEMM RHS matrix reshape function
- gemm_rhs_reshape.run();
+ ITensorPack tensors = { { ACL_SRC, &src }, { ACL_DST, &dst } };
+ gemm_rhs_reshape.run(tensors);
return dst;
}