aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGunes Bayir <gunes.bayir@arm.com>2023-01-03 21:26:09 +0000
committerGunes Bayir <gunes.bayir@arm.com>2023-01-06 13:45:22 +0000
commit3a1e1256946028cb13f31521caec9f08235c7332 (patch)
treecc63b85bc30ecdc81d7218d7adf05d27a96e6411
parentb3077fbaee868579f9a41888fef1f71286d6757c (diff)
downloadComputeLibrary-3a1e1256946028cb13f31521caec9f08235c7332.tar.gz
Handle Intermediate tensors within the sketch
- Intermediate tensor info objects are not created by the user anymore. They're returned from create_op and reused. This will prevent allocation of the intermediate tensors in case of possible interface misuse. - Sketch object handles intermediate tensor info pointers inside its implementation class via a unique pointer vector - Conv2d operator is migrated into the new interface Resolves: COMPMID-5776 Change-Id: I9422e3681eef4f2d2922f6d0a5d7786380837c6d Signed-off-by: Gunes Bayir <gunes.bayir@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8906 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: SiCong Li <sicong.li@arm.com> Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--arm_compute/core/ITensorInfo.h4
-rw-r--r--arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h4
-rw-r--r--arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h27
-rw-r--r--src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h30
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuConv2d.cpp98
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp14
-rw-r--r--src/dynamic_fusion/sketch/gpu/template_writer/GpuKernelVariableTable.cpp4
-rw-r--r--src/dynamic_fusion/utils/Utils.h58
-rw-r--r--tests/validation/dynamic_fusion/gpu/Integration.cpp50
-rw-r--r--tests/validation/fixtures/dynamic_fusion/gpu/cl/DirectConv2dFixture.h16
10 files changed, 198 insertions, 107 deletions
diff --git a/arm_compute/core/ITensorInfo.h b/arm_compute/core/ITensorInfo.h
index ca2837e450..c48e6ebf79 100644
--- a/arm_compute/core/ITensorInfo.h
+++ b/arm_compute/core/ITensorInfo.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2022 Arm Limited.
+ * Copyright (c) 2016-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,7 @@ public:
*/
using Id = int32_t;
/** An invalid tensor id within a domain */
- static constexpr Id invalid_tensor_id = -1;
+ static constexpr Id invalid_tensor_id = 0;
/** Get the value representing dynamic dimension state
*
* @return Value representing dynamic dimension state
diff --git a/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h b/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h
index afbe2b8d0b..f19ad6dfc5 100644
--- a/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h
+++ b/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -83,7 +83,7 @@ public:
*/
TensorInfo create_tensor_info(const ITensorInfo &tensor_info);
/** Create a default @ref TensorInfo associated with the workload sketch
- * It is usually used by a destination tensor whose @ref ITensorInfo is to be inferred automatically
+ * It is usually used by user input or output tensors
*
* @return TensorInfo Newly created tensor info
*/
diff --git a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h
index 76decfd6cf..4ba237e3be 100644
--- a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h
+++ b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -60,32 +60,35 @@ public:
* @param[in] src Source tensor
* @param[in] wei Weight tensor
* @param[in] bia (Optional) Bias tensor
- * @param[out] dst Destination tensor. If an uninitialized ITensorInfo is passed in, it will be auto-initialized
* @param[in] attributes Operator attributes
+ *
+ * @return pointer for the destination tensor
*/
- static void create_op(GpuWorkloadSketch &sketch,
- ITensorInfo *src,
- ITensorInfo *wei,
- ITensorInfo *bia,
- ITensorInfo *dst,
- const Attributes &attributes);
+ static ITensorInfo *create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *src,
+ ITensorInfo *wei,
+ ITensorInfo *bia,
+ const Attributes &attributes);
/** Check if the operator configuration is supported, irrespective of fusion
- * Similar to @ref GpuConv2d::create_op()
+ *
+ * @param[in] context Workload context within which the operator is running
+ * @param[in] src Source tensor
+ * @param[in] wei Weight tensor
+ * @param[in] bia (Optional) Bias tensor
+ * @param[in] attributes Operator attributes
*/
static Status is_supported_op(const GpuWorkloadContext &context,
const ITensorInfo *src,
const ITensorInfo *wei,
const ITensorInfo *bia,
- const ITensorInfo *dst,
const Attributes &attributes);
/** Check if the operator configuration is supported and if it can be fused into the workload sketch.
- * Similar to @ref GpuConv2d::create_op()
+ * Similar to @ref GpuConv2d::create_op()
*/
static Status validate_op(const GpuWorkloadSketch &sketch,
const ITensorInfo *src,
const ITensorInfo *wei,
const ITensorInfo *bia,
- const ITensorInfo *dst,
const Attributes &attributes);
};
} // namespace dynamic_fusion
diff --git a/src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h b/src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h
index 3997395c98..08796b607b 100644
--- a/src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h
+++ b/src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,6 +29,9 @@
#include "src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.h"
#include "src/dynamic_fusion/sketch/gpu/GpuOperatorGroup.h"
+#include <memory>
+#include <vector>
+
namespace arm_compute
{
namespace experimental
@@ -48,7 +51,8 @@ public:
: _context{ context },
_comp_services{},
_component_graph{ &_comp_services },
- _operator_group{}
+ _operator_group{},
+ _interm_tensor_info_list{ std::vector<std::unique_ptr<TensorInfo>>() }
{
}
/** Prevent instances of this class from being copy constructed */
@@ -97,13 +101,25 @@ public:
{
return component_graph().fuse().write_workload_code();
}
+ /** Create an intermediate tensor info and save it
+ *
+ * @return ITensorInfo The created intermediate tensor info object pointer
+ */
+ ITensorInfo *create_intermediate_tensor()
+ {
+ auto uptr = std::make_unique<TensorInfo>();
+ uptr->set_id(-allocate_new_tensor_id()); // intermediate tensors must have negative id
+ _interm_tensor_info_list.emplace_back(std::move(uptr));
+ return _interm_tensor_info_list.back().get();
+ }
private:
- Context *_context;
- GpuComponentServices _comp_services;
- GpuKernelComponentGraph _component_graph;
- GpuOperatorGroup _operator_group;
- ITensorInfo::Id _next_id{ ITensorInfo::invalid_tensor_id };
+ Context *_context;
+ GpuComponentServices _comp_services;
+ GpuKernelComponentGraph _component_graph;
+ GpuOperatorGroup _operator_group;
+ ITensorInfo::Id _next_id{ ITensorInfo::invalid_tensor_id };
+ std::vector<std::unique_ptr<TensorInfo>> _interm_tensor_info_list;
};
} // namespace dynamic_fusion
} // namespace experimental
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuConv2d.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuConv2d.cpp
index 00b4fbccb2..00fbb730b9 100644
--- a/src/dynamic_fusion/sketch/gpu/operators/GpuConv2d.cpp
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuConv2d.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -111,33 +111,29 @@ void calculate_and_init_dst_if_empty(ITensorInfo *dst, const ITensorInfo *src, c
}
}
-constexpr GpuOperatorType operator_type = GpuOperatorType::Complex;
-} // namespace
-
-Status GpuConv2d::is_supported_op(const GpuWorkloadContext &context,
- const ITensorInfo *src,
- const ITensorInfo *wei,
- const ITensorInfo *bia,
- const ITensorInfo *dst,
- const Conv2dAttributes &attributes)
+/* A helper method to reduce the duplication in dst tensor initialization
+* when calling validate()
+*/
+Status is_supported_op_helper(const GpuWorkloadContext &context,
+ const ITensorInfo *src,
+ const ITensorInfo *wei,
+ const ITensorInfo *bia,
+ const ITensorInfo *dst,
+ const Conv2dAttributes &attributes)
{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, wei, dst);
- // Auto initialize dst tensor info
- TensorInfo dst_info_to_validate = *dst;
- const auto data_layout = src->data_layout();
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, wei);
- {
- auto shape = misc::shape_calculator::compute_deep_convolution_shape(src->tensor_shape(), data_layout, wei->tensor_shape(),
- PadStrideInfo(attributes.stride().x(), attributes.stride().y(), attributes.pad().left,
- attributes.pad().right,
- attributes.pad().top, attributes.pad().bottom, DimensionRoundingType::FLOOR)); // use the default DimensionRoundingType
+ TensorInfo dst_info_to_validate;
+ const ITensorInfo *dst_info_to_validate_ptr = &dst_info_to_validate;
- // Checks performed when dst is configured
- if(dst->total_size() != 0)
- {
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), shape);
- }
- auto_init_if_empty(dst_info_to_validate, src->clone()->set_tensor_shape(shape));
+ const DataLayout data_layout = src->data_layout();
+ if(dst != nullptr)
+ {
+ dst_info_to_validate_ptr = dst;
+ }
+ else
+ {
+ calculate_and_init_dst_if_empty(&dst_info_to_validate, src, wei, attributes);
}
// Check support level
@@ -147,7 +143,7 @@ Status GpuConv2d::is_supported_op(const GpuWorkloadContext &context,
ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(src, DataLayout::NHWC);
// Check components
- const auto gpu_target = context.gpu_target();
+ const auto gpu_target = context.gpu_target();
if(context.gpu_language() == GpuLanguage::OpenCL)
{
const auto cl_compile_ctx = context.cl_compile_context();
@@ -162,13 +158,13 @@ Status GpuConv2d::is_supported_op(const GpuWorkloadContext &context,
settings.fast_relaxed_math(
(gpu_target != GPUTarget::G71 && (gpu_target & GPUTarget::GPU_ARCH_MASK) == GPUTarget::BIFROST)
- && (dst_info_to_validate.data_type() == DataType::F32 || dst_info_to_validate.data_type() == DataType::F16));
+ && (dst_info_to_validate_ptr->data_type() == DataType::F32 || dst_info_to_validate_ptr->data_type() == DataType::F16));
ArgumentPack<ITensorInfo> arguments;
arguments.add_const_tensor(ACL_SRC_0, src);
arguments.add_const_tensor(ACL_SRC_1, wei);
arguments.add_const_tensor(ACL_SRC_2, bia);
- arguments.add_const_tensor(ACL_DST_0, &dst_info_to_validate);
+ arguments.add_const_tensor(ACL_DST_0, dst_info_to_validate_ptr);
ARM_COMPUTE_RETURN_ON_ERROR(ClComponentDirectConv2d::validate(properties, arguments, attributes, settings));
}
}
@@ -179,25 +175,40 @@ Status GpuConv2d::is_supported_op(const GpuWorkloadContext &context,
return Status{};
}
+constexpr GpuOperatorType operator_type = GpuOperatorType::Complex;
+} // namespace
+
+Status GpuConv2d::is_supported_op(const GpuWorkloadContext &context,
+ const ITensorInfo *src,
+ const ITensorInfo *wei,
+ const ITensorInfo *bia,
+ const Conv2dAttributes &attributes)
+{
+ return is_supported_op_helper(context, src, wei, bia, nullptr, attributes);
+}
+
Status GpuConv2d::validate_op(const GpuWorkloadSketch &sketch,
const ITensorInfo *src,
const ITensorInfo *wei,
const ITensorInfo *bia,
- const ITensorInfo *dst,
const Conv2dAttributes &attributes)
{
- ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, wei, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, wei);
// Check if tensors have valid id. I.e. they are created from a sketch
- ARM_COMPUTE_RETURN_ERROR_ON(
- !src->has_valid_id() || !wei->has_valid_id() || !dst->has_valid_id());
+ ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id() || !wei->has_valid_id());
if(bia != nullptr)
{
ARM_COMPUTE_RETURN_ERROR_ON(!bia->has_valid_id());
}
+ // This tensor info will have invalid id but because all the existing tensors in the
+ // sketch have valid ids and the DependencyGraph implementation has no notion of validness
+ // regarding tensor ids, it'll be just another tensor id and will validate
+ // Additionally, a new dst id is added every time in create_op, thus there's no need to validate it
+ TensorInfo dst_info_to_validate;
+
// Auto initialize dst tensor info
- TensorInfo dst_info_to_validate = *dst;
calculate_and_init_dst_if_empty(&dst_info_to_validate, src, wei, attributes);
// Perform fusion test
@@ -212,25 +223,26 @@ Status GpuConv2d::validate_op(const GpuWorkloadSketch &sketch,
"Operator fusion test failed. This operator cannot be fused into the workload");
// Check if configuration is supported
- return is_supported_op(*sketch.gpu_context(), src, wei, bia, &dst_info_to_validate, attributes);
+ return is_supported_op_helper(*sketch.gpu_context(), src, wei, bia, &dst_info_to_validate, attributes);
}
-void GpuConv2d::create_op(GpuWorkloadSketch &sketch,
- ITensorInfo *src,
- ITensorInfo *wei,
- ITensorInfo *bia,
- ITensorInfo *dst,
- const Conv2dAttributes &attributes)
+ITensorInfo *GpuConv2d::create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *src,
+ ITensorInfo *wei,
+ ITensorInfo *bia,
+ const Conv2dAttributes &attributes)
{
- ARM_COMPUTE_LOG_PARAMS(src, wei, bia, dst, attributes);
+ ARM_COMPUTE_LOG_PARAMS(src, wei, bia, attributes);
PadStrideInfo conv_info(attributes.stride().x(), attributes.stride().y(), attributes.pad().left,
attributes.pad().right,
attributes.pad().top, attributes.pad().bottom, DimensionRoundingType::FLOOR);
// Initialize the direct convolution descriptor
const DirectConvComputeKernelInfo desc = config_direct_convolution_nhwc(src, wei, conv_info);
+ ITensorInfo *dst = sketch.implementation().create_intermediate_tensor();
+
// Assert validation
- ARM_COMPUTE_ERROR_THROW_ON(GpuConv2d::validate_op(sketch, src, wei, bia, dst, attributes));
+ ARM_COMPUTE_ERROR_THROW_ON(GpuConv2d::validate_op(sketch, src, wei, bia, attributes));
ARM_COMPUTE_ERROR_ON_NULLPTR(src, wei, dst);
// Auto initialize dst tensor
@@ -295,6 +307,8 @@ void GpuConv2d::create_op(GpuWorkloadSketch &sketch,
const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
sketch.implementation().operator_group().add_operator(op);
+
+ return dst;
}
} // namespace dynamic_fusion
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp
index 60c2281433..cd5487c10b 100644
--- a/src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,12 +24,13 @@
#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
-#include "src/core/helpers/AutoConfiguration.h"
#include "src/common/utils/Log.h"
+#include "src/core/helpers/AutoConfiguration.h"
#include "src/dynamic_fusion/sketch/ArgumentPack.h"
#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h"
#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentStore.h"
+#include "src/dynamic_fusion/utils/Utils.h"
namespace arm_compute
{
@@ -65,7 +66,7 @@ Status GpuOutput::validate_op(const GpuWorkloadSketch &sketch,
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id());
- ARM_COMPUTE_RETURN_ERROR_ON(!dst->has_valid_id());
+ ARM_COMPUTE_RETURN_ERROR_ON(!is_user_tensor(dst));
// Initialize the destination tensor info.
TensorInfo dst_to_validate = *dst;
@@ -79,12 +80,11 @@ Status GpuOutput::validate_op(const GpuWorkloadSketch &sketch,
tensors.add_const_tensor(ACL_SRC_0, src);
tensors.add_const_tensor(ACL_DST_0, &dst_to_validate);
- const auto group = sketch.implementation().operator_group();
- const auto op = group.new_operator(operator_type, tensors);
+ const auto group = sketch.implementation().operator_group();
+ const auto op = group.new_operator(operator_type, tensors);
const auto success = group.try_add_operator(op, true);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(!success, "This operator cannot be fused into the workload.");
- ARM_COMPUTE_UNUSED(success);
const auto status = is_supported_op(*sketch.gpu_context(), src, dst);
return status;
@@ -101,7 +101,7 @@ void GpuOutput::create_op(GpuWorkloadSketch &sketch,
auto_init_if_empty(*dst, *src);
// Translate into components and add to component graph
- auto &comp_graph = sketch.implementation().component_graph();
+ auto &comp_graph = sketch.implementation().component_graph();
const auto sketch_ctx = sketch.implementation().context();
if(sketch_ctx->gpu_language() == GpuLanguage::OpenCL)
diff --git a/src/dynamic_fusion/sketch/gpu/template_writer/GpuKernelVariableTable.cpp b/src/dynamic_fusion/sketch/gpu/template_writer/GpuKernelVariableTable.cpp
index 2eafe62bfa..0972b4e8e2 100644
--- a/src/dynamic_fusion/sketch/gpu/template_writer/GpuKernelVariableTable.cpp
+++ b/src/dynamic_fusion/sketch/gpu/template_writer/GpuKernelVariableTable.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -62,7 +62,7 @@ void GpuKernelVariableTable::declare_variable(const GpuKernelComponentGroup &com
{
// Declare variable associated with the tensor
std::stringstream ss;
- ss << alias << "_t" << tensor->id();
+ ss << alias << "_t" << abs(tensor->id());
const auto uniq_name = ss.str();
TensorVariable var{ tensor->id(), uniq_name, argument_info };
diff --git a/src/dynamic_fusion/utils/Utils.h b/src/dynamic_fusion/utils/Utils.h
new file mode 100644
index 0000000000..063dbdc44e
--- /dev/null
+++ b/src/dynamic_fusion/utils/Utils.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_DYNAMIC_FUSION_UTILS_UTILS
+#define SRC_DYNAMIC_FUSION_UTILS_UTILS
+
+#include "arm_compute/core/ITensorInfo.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+bool is_user_tensor(const ITensorInfo *tensor_info)
+{
+ return tensor_info->id() > ITensorInfo::invalid_tensor_id;
+}
+
+bool is_intermediate_tensor(const ITensorInfo *tensor_info)
+{
+ return tensor_info->id() < ITensorInfo::invalid_tensor_id;
+}
+
+bool is_valid_tensor(const ITensorInfo *tensor_info)
+{
+ return tensor_info->has_valid_id();
+}
+
+bool is_invalid_tensor(const ITensorInfo *tensor_info)
+{
+ return !is_valid_tensor(tensor_info);
+}
+}
+}
+}
+
+#endif /* SRC_DYNAMIC_FUSION_UTILS_UTILS */
diff --git a/tests/validation/dynamic_fusion/gpu/Integration.cpp b/tests/validation/dynamic_fusion/gpu/Integration.cpp
index 0a689fa4b6..effd8bfeee 100644
--- a/tests/validation/dynamic_fusion/gpu/Integration.cpp
+++ b/tests/validation/dynamic_fusion/gpu/Integration.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,9 +28,9 @@
#include "arm_compute/dynamic_fusion/sketch/OperatorAttributes.h"
#include "arm_compute/dynamic_fusion/sketch/attributes/CastAttributes.h"
#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
-#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h"
#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h"
#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuCast.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h"
#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
#include "tests/CL/CLAccessor.h"
@@ -38,9 +38,9 @@
#include "tests/validation/Validation.h"
#include "tests/validation/dynamic_fusion/Utils.h"
#include "tests/validation/reference/ConvolutionLayer.h"
-#include "tests/validation/reference/Permute.h"
-#include "tests/validation/reference/ElementwiseOperations.h"
#include "tests/validation/reference/DepthConvertLayer.h"
+#include "tests/validation/reference/ElementwiseOperations.h"
+#include "tests/validation/reference/Permute.h"
using namespace arm_compute::experimental::dynamic_fusion;
using namespace arm_compute::test::validation::utils;
@@ -74,13 +74,13 @@ TEST_CASE(Conv2d, framework::DatasetMode::ALL)
// Fuse conv2d
Conv2dAttributes conv2d_attr{};
- auto input_info = sketch.create_tensor_info(t_input_shape, 1, data_type, data_layout);
- auto weight_info = sketch.create_tensor_info(TensorInfo(t_weight_shape, 1, data_type, data_layout));
- auto ans_info = sketch.create_tensor_info();
- GpuConv2d::create_op(sketch, &input_info, &weight_info, nullptr, &ans_info, conv2d_attr);
+ TensorInfo input_info = sketch.create_tensor_info(t_input_shape, 1, data_type, data_layout);
+ TensorInfo weight_info = sketch.create_tensor_info(TensorInfo(t_weight_shape, 1, data_type, data_layout));
- auto dst_info = sketch.create_tensor_info();
- GpuOutput::create_op(sketch, &ans_info, &dst_info);
+ ITensorInfo *conv_out_info = GpuConv2d::create_op(sketch, &input_info, &weight_info, nullptr, conv2d_attr);
+
+ TensorInfo dst_info = sketch.create_tensor_info();
+ GpuOutput::create_op(sketch, conv_out_info, &dst_info);
// Configure runtime
ClWorkloadRuntime runtime;
@@ -150,8 +150,8 @@ TEST_CASE(Add_Output_Add_Output, framework::DatasetMode::ALL)
*/
CLScheduler::get().default_reinit();
- const auto data_type = DataType::F32;
- const auto t_input_shape = TensorShape(33, 3, 2);
+ const auto data_type = DataType::F32;
+ const auto t_input_shape = TensorShape(33, 3, 2);
// Create a new workload sketch
auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
@@ -249,8 +249,8 @@ TEST_CASE(Add_Output_Add_Cast_Cast_Output, framework::DatasetMode::ALL)
*/
CLScheduler::get().default_reinit();
- const auto data_type = DataType::F32;
- const auto t_input_shape = TensorShape(3, 8, 5);
+ const auto data_type = DataType::F32;
+ const auto t_input_shape = TensorShape(3, 8, 5);
// Create a new workload sketch
auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
@@ -335,8 +335,8 @@ TEST_CASE(Add_Output_Add_Cast_Cast_Output, framework::DatasetMode::ALL)
SimpleTensor<float> ref_t_in_1{ t_input_shape, data_type, 1, QuantizationInfo() };
SimpleTensor<float> ref_t_in_2{ t_input_shape, data_type, 1, QuantizationInfo() };
- SimpleTensor<float> ref_t_out_0{ t_input_shape, data_type, 1, QuantizationInfo() };
- SimpleTensor<float> ref_t_ans_1{ t_input_shape, data_type, 1, QuantizationInfo() };
+ SimpleTensor<float> ref_t_out_0{ t_input_shape, data_type, 1, QuantizationInfo() };
+ SimpleTensor<float> ref_t_ans_1{ t_input_shape, data_type, 1, QuantizationInfo() };
// Fill reference
fill<float>(ref_t_in_0, 0, library.get());
@@ -377,28 +377,30 @@ TEST_CASE(Multiple_Complex_Ops_0, framework::DatasetMode::ALL)
GpuWorkloadSketch sketch{ &gpu_ctx };
// Create tensor infos
- auto input_info = sketch.create_tensor_info(t_input_shape, 1, data_type, data_layout);
- auto weight_info = sketch.create_tensor_info(TensorInfo(t_weight_shape, 1, data_type, data_layout));
- auto dst_info = sketch.create_tensor_info();
+ TensorInfo input_info = sketch.create_tensor_info(t_input_shape, 1, data_type, data_layout);
+ TensorInfo weight_info = sketch.create_tensor_info(TensorInfo(t_weight_shape, 1, data_type, data_layout));
+ ITensorInfo *dst_info;
// Fuse conv2d into the workload
{
// Validate operator
- const auto success = GpuConv2d::validate_op(sketch, &input_info, &weight_info, nullptr, &dst_info, conv2d_attr);
+ const Status success = GpuConv2d::validate_op(sketch, &input_info, &weight_info, nullptr, conv2d_attr);
ARM_COMPUTE_EXPECT(bool(success), framework::LogLevel::ERRORS);
- GpuConv2d::create_op(sketch, &input_info, &weight_info, nullptr, &dst_info, conv2d_attr);
+ dst_info = GpuConv2d::create_op(sketch, &input_info, &weight_info, nullptr, conv2d_attr);
}
// Create tensor infos
- auto weight_info_2 = sketch.create_tensor_info(t_weight_info);
- auto dst_info_2 = sketch.create_tensor_info();
+ TensorInfo weight_info_2 = sketch.create_tensor_info(t_weight_info);
// Fuse conv2d into the workload
{
// Validate operator, should fail
- const auto success = GpuConv2d::validate_op(sketch, &dst_info, &weight_info_2, nullptr, &dst_info_2, conv2d_attr);
+ const Status success = GpuConv2d::validate_op(sketch, dst_info, &weight_info_2, nullptr, conv2d_attr);
+ const auto expected_error_str = "Operator fusion test failed. This operator cannot be fused into the workload";
+
ARM_COMPUTE_EXPECT(!bool(success), framework::LogLevel::ERRORS);
+ ARM_COMPUTE_EXPECT((success.error_description().find(expected_error_str) != std::string::npos), framework::LogLevel::ERRORS);
}
}
TEST_SUITE_END() // Invalid_Fusion_Should_Fail
diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/DirectConv2dFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/DirectConv2dFixture.h
index 1a2676c438..488d449782 100644
--- a/tests/validation/fixtures/dynamic_fusion/gpu/cl/DirectConv2dFixture.h
+++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/DirectConv2dFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -110,15 +110,13 @@ protected:
GpuWorkloadSketch sketch{ &gpu_ctx };
// Create sketch tensors
- auto input_info = sketch.create_tensor_info(TensorInfo(input_shape, 1, _data_type, _data_layout));
- auto weight_info = sketch.create_tensor_info(TensorInfo(weights_shape, 1, _data_type, _data_layout));
- auto bias_info = sketch.create_tensor_info(TensorInfo(bias_shape, 1, _data_type, _data_layout));
- auto dst_info = sketch.create_tensor_info();
+ TensorInfo input_info = sketch.create_tensor_info(TensorInfo(input_shape, 1, _data_type, _data_layout));
+ TensorInfo weight_info = sketch.create_tensor_info(TensorInfo(weights_shape, 1, _data_type, _data_layout));
+ TensorInfo bias_info = sketch.create_tensor_info(TensorInfo(bias_shape, 1, _data_type, _data_layout));
+ TensorInfo dst_info = sketch.create_tensor_info();
- auto ans_info = sketch.create_tensor_info();
-
- FunctionType::create_op(sketch, &input_info, &weight_info, &bias_info, &ans_info, conv2d_attr);
- GpuOutput::create_op(sketch, &ans_info, &dst_info);
+ ITensorInfo *ans_info = FunctionType::create_op(sketch, &input_info, &weight_info, &bias_info, conv2d_attr);
+ GpuOutput::create_op(sketch, ans_info, &dst_info);
// Configure runtime
ClWorkloadRuntime runtime;