aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorViet-Hoa Do <viet-hoa.do@arm.com>2022-12-13 13:09:10 +0000
committerViet-Hoa Do <viet-hoa.do@arm.com>2022-12-16 15:17:51 +0000
commitb84e25313e5dc7acbc03623e1e071e845047c111 (patch)
treefbee083f1262017555c64c3280da45e2b638992e
parenta0ae8d2e6c57fd95c0edaf659b9df8b8c540d051 (diff)
downloadComputeLibrary-b84e25313e5dc7acbc03623e1e071e845047c111.tar.gz
Add output operator for dynamic fusion
* The output of the fused operator must be explicitly specified using GpuOutput operator. * Any temporary tensors used to connect the output of an operator to the input of another operator will be marked as no-alloc and won't be allocated as a tensor in the memory. Resolves: COMPMID-5771 Signed-off-by: Viet-Hoa Do <viet-hoa.do@arm.com> Change-Id: I5ae8e800f8f737db23a055a92b01c4f1d78c3bb8 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8794 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: SiCong Li <sicong.li@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--Android.bp1
-rw-r--r--arm_compute/dynamic_fusion/sketch/MemoryDescriptor.h1
-rw-r--r--arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h87
-rw-r--r--filelist.json1
-rw-r--r--src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp9
-rw-r--r--src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.cpp13
-rw-r--r--src/dynamic_fusion/sketch/gpu/GpuLogicalKernel.cpp27
-rw-r--r--src/dynamic_fusion/sketch/gpu/GpuLogicalKernel.h2
-rw-r--r--src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp141
-rw-r--r--src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateElementwiseBinary.cpp5
-rw-r--r--src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateWriter.cpp7
-rw-r--r--src/dynamic_fusion/sketch/utils/DependencyGraph.h27
-rw-r--r--tests/validation/dynamic_fusion/gpu/Integration.cpp8
-rw-r--r--tests/validation/fixtures/dynamic_fusion/gpu/cl/DepthwiseConv2dFixture.h7
-rw-r--r--tests/validation/fixtures/dynamic_fusion/gpu/cl/DirectConv2dFixture.h7
-rw-r--r--tests/validation/fixtures/dynamic_fusion/gpu/cl/ElementwiseBinaryFixture.h72
-rw-r--r--tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h6
-rw-r--r--tests/validation/fixtures/dynamic_fusion/operators/ClampFixture.h51
18 files changed, 350 insertions, 122 deletions
diff --git a/Android.bp b/Android.bp
index f4d94f9508..963dd84f86 100644
--- a/Android.bp
+++ b/Android.bp
@@ -610,6 +610,7 @@ cc_library_static {
"src/dynamic_fusion/sketch/gpu/operators/GpuClamp.cpp",
"src/dynamic_fusion/sketch/gpu/operators/GpuConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.cpp",
+ "src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp",
"src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/GpuKernelVariableTable.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateActivation.cpp",
diff --git a/arm_compute/dynamic_fusion/sketch/MemoryDescriptor.h b/arm_compute/dynamic_fusion/sketch/MemoryDescriptor.h
index deedf62262..25023ff0a1 100644
--- a/arm_compute/dynamic_fusion/sketch/MemoryDescriptor.h
+++ b/arm_compute/dynamic_fusion/sketch/MemoryDescriptor.h
@@ -37,6 +37,7 @@ enum class MemoryType
{
User = 0, /**< Memory coming directly from users, e.g. for argument tensors */
Auxiliary = 1, /**< Additional memory required by the workload tensor, e.g. for temporary tensors */
+ NoAlloc = 2, /**< Temporary tile which is not allocated as a whole tensor in the memory */
};
/** Memory information for tensors with @ref MemoryType::Auxiliary.
diff --git a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h
new file mode 100644
index 0000000000..2511b0efd5
--- /dev/null
+++ b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPUOUTPUT
+#define ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPUOUTPUT
+
+#include "arm_compute/core/ITensorInfo.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+
+/** Forward declaration */
+class GpuWorkloadContext;
+class GpuWorkloadSketch;
+
+/** Operator interface. */
+class GpuOutput final
+{
+public:
+ /** Create an operator and fuse it into the workload sketch.
+ * @note If @ref validate_op() fails, the creation also fails and may throw an error.
+ * @note If @ref validate_op() fails, @p sketch remains unchanged and valid.
+ *
+ * Valid data type configurations:
+ * - Any
+ *
+ * Valid data layouts:
+ * - Any
+ *
+ * @param[in, out] sketch Workload sketch into which the operator will be fused.
+ * @param[in, out] src Source tensor info.
+ * @param[in, out] dst Destination tensor info.
+ * If an uninitialized ITensorInfo is passed in, it will be auto-initialized.
+ */
+ static void create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *src,
+ ITensorInfo *dst);
+
+ /** Check if the operator configuration is supported, irrespective of fusion.
+ *
+ * @param[in] context Workload context within which the operator is running.
+ * @param[in] src Source tensor info.
+ * @param[in] dst Destination tensor info.
+ */
+ static Status is_supported_op(const GpuWorkloadContext &context,
+ const ITensorInfo *src,
+ const ITensorInfo *dst);
+
+ /** Validate the operator and check if the its configuration is supported and if it can be fused into the workload sketch.
+ *
+ * Similar to @ref GpuOutput::create_op().
+ */
+ static Status validate_op(const GpuWorkloadSketch &sketch,
+ const ITensorInfo *src,
+ const ITensorInfo *dst);
+};
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif /* ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPUOUTPUT */
diff --git a/filelist.json b/filelist.json
index 89ac6461b5..146731b95e 100644
--- a/filelist.json
+++ b/filelist.json
@@ -2221,6 +2221,7 @@
"src/dynamic_fusion/sketch/gpu/operators/GpuCast.cpp",
"src/dynamic_fusion/sketch/gpu/operators/GpuClamp.cpp",
"src/dynamic_fusion/sketch/gpu/operators/GpuConv2d.cpp",
+ "src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp",
"src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.cpp",
"src/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.cpp",
"src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateActivation.cpp",
diff --git a/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp b/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp
index 36168d14f1..7e427fef72 100644
--- a/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp
+++ b/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp
@@ -132,10 +132,11 @@ Status create_aux_tensors(ClAuxTensors *aux_tensors, const GpuWorkloadSourceCode
ARM_COMPUTE_ERROR_ON(tensor_info.id() != t_id);
const auto aux_memory_info = workload_arg->memory_descriptor()->aux_memory_info;
tensor_object = aux_tensors->add_aux_tensor(tensor_info, aux_memory_info);
- }
- if(tensor_object == nullptr)
- {
- return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Failed to construct an auxiliary tensor");
+
+ if(tensor_object == nullptr)
+ {
+ return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Failed to construct an auxiliary tensor");
+ }
}
}
return Status{};
diff --git a/src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.cpp b/src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.cpp
index 6e6422c957..1f90aab477 100644
--- a/src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.cpp
+++ b/src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.cpp
@@ -42,15 +42,24 @@ namespace
*/
MemoryDescriptorMap assign_memory_descriptors(const std::map<ITensorInfo::Id, const ITensorInfo *> tensors, const DependencyGraph &graph)
{
+ const auto all_tensors = graph.all_tensors();
+ const auto src_tensors = graph.global_src_tensors();
+ const auto dst_tensors = graph.global_dst_tensors();
+ const auto interm_tensors = graph.intermediate_tensors();
+
MemoryDescriptorMap mem_map{};
- for(auto t_id : graph.all_tensors())
+ for(auto t_id : all_tensors)
{
const auto &tensor = tensors.at(t_id);
// Only global src and dst tensors to the entire component graph are "User" tensors, which are user-specified memories
- if(is_in(t_id, graph.global_src_tensors()) || is_in(t_id, graph.global_dst_tensors()))
+ if(is_in(t_id, src_tensors) || is_in(t_id, dst_tensors))
{
mem_map[t_id] = MemoryDescriptor{ MemoryType::User };
}
+ else if(is_in(t_id, interm_tensors))
+ {
+ mem_map[t_id] = MemoryDescriptor { MemoryType::NoAlloc };
+ }
else
{
AuxMemoryInfo aux_mem_info{ tensor->total_size() };
diff --git a/src/dynamic_fusion/sketch/gpu/GpuLogicalKernel.cpp b/src/dynamic_fusion/sketch/gpu/GpuLogicalKernel.cpp
index c560e9a931..66760b3812 100644
--- a/src/dynamic_fusion/sketch/gpu/GpuLogicalKernel.cpp
+++ b/src/dynamic_fusion/sketch/gpu/GpuLogicalKernel.cpp
@@ -40,7 +40,6 @@ namespace dynamic_fusion
GpuLogicalKernel::GpuLogicalKernel(GpuComponentServices *services, const GpuKernelComponentGroup &components)
: _services{ services }, _comp_group{ components }, _store_components{}
{
- add_load_store();
}
GpuKernelSourceCode GpuLogicalKernel::write_kernel_code()
@@ -57,32 +56,6 @@ GpuKernelSourceCode GpuLogicalKernel::write_kernel_code()
return code;
}
-
-void GpuLogicalKernel::add_load_store()
-{
- const auto dst_tensors = _comp_group.get_dst_tensors();
- // Each dst tensor from the component group requires exactly one store component
- for(const auto &dst_tensor : dst_tensors)
- {
- ArgumentPack<ITensorInfo> tensors;
- // Pass same destination tensor to both source and destination of the store component
- // In other words, the addition of a store component does not create a new dst tensor
- // This way we avoid the issue of the dst tensor of the component group differs from that of a logical kernel
- // This may seem to violate the acyclic-ness of the component graph. But it is fine because at the point of
- // the construction of the logical kernel, we do not need a graph representation of components anymore
- // (the graph has been serialized)
- tensors.add_const_tensor(ACL_SRC_0, dst_tensor);
- tensors.add_const_tensor(ACL_DST_0, dst_tensor);
-
- auto store = _services->component_factory().create<ClComponentStore>(
- _comp_group.get_root_component()->properties(), // Store component share the same properties as that of the root component
- tensors);
- _store_components.push_back(std::move(store));
- auto success = _comp_group.add_component(_store_components.back().get());
- ARM_COMPUTE_UNUSED(success);
- ARM_COMPUTE_ERROR_ON(!success); // It's guaranteed that any load store insertion should be successful
- }
-}
} // namespace dynamic_fusion
} // namespace experimental
} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/GpuLogicalKernel.h b/src/dynamic_fusion/sketch/gpu/GpuLogicalKernel.h
index 4ce4443f60..2654224329 100644
--- a/src/dynamic_fusion/sketch/gpu/GpuLogicalKernel.h
+++ b/src/dynamic_fusion/sketch/gpu/GpuLogicalKernel.h
@@ -65,8 +65,6 @@ public:
GpuKernelSourceCode write_kernel_code();
private:
- void add_load_store();
-
GpuComponentServices *_services;
GpuKernelComponentGroup _comp_group{};
std::vector<std::unique_ptr<IGpuKernelComponent>> _store_components{};
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp
new file mode 100644
index 0000000000..017536df6c
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
+
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/common/utils/Log.h"
+
+#include "src/dynamic_fusion/sketch/ArgumentPack.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h"
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentStore.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+namespace
+{
+constexpr GpuOperatorType operator_type = GpuOperatorType::Simple;
+}
+
+Status GpuOutput::is_supported_op(const GpuWorkloadContext &context,
+ const ITensorInfo *src,
+ const ITensorInfo *dst)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+
+ // Initialize the destination tensor info.
+ TensorInfo dst_to_validate = *dst;
+ auto_init_if_empty(dst_to_validate, *src);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, &dst_to_validate);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, &dst_to_validate);
+
+ ARM_COMPUTE_UNUSED(context);
+ return Status{};
+}
+
+Status GpuOutput::validate_op(const GpuWorkloadSketch &sketch,
+ const ITensorInfo *src,
+ const ITensorInfo *dst)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id());
+ ARM_COMPUTE_RETURN_ERROR_ON(!dst->has_valid_id());
+
+ // Initialize the destination tensor info.
+ TensorInfo dst_to_validate = *dst;
+ auto_init_if_empty(dst_to_validate, *src);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, &dst_to_validate);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, &dst_to_validate);
+
+ // Perform fusion test.
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_DST_0, &dst_to_validate);
+
+ const auto group = sketch.implementation().operator_group();
+ const auto op = group.new_operator(operator_type, tensors);
+ const auto success = group.try_add_operator(op);
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(!success, "This operator cannot be fused into the workload.");
+ ARM_COMPUTE_UNUSED(success);
+
+ const auto status = is_supported_op(*sketch.gpu_context(), src, dst);
+ return status;
+}
+
+void GpuOutput::create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *src,
+ ITensorInfo *dst)
+{
+ ARM_COMPUTE_LOG_PARAMS(src, dst);
+ ARM_COMPUTE_ERROR_THROW_ON(GpuOutput::validate_op(sketch, src, dst));
+
+ // Auto initialize dst tensor info if empty
+ auto_init_if_empty(*dst, *src);
+
+ // Translate into components and add to component graph
+ auto &comp_graph = sketch.implementation().component_graph();
+ const auto sketch_ctx = sketch.implementation().context();
+
+ if(sketch_ctx->gpu_language() == GpuLanguage::OpenCL)
+ {
+ ARM_COMPUTE_ERROR_ON(sketch_ctx->cl_compile_context() == nullptr);
+
+ // Add store component
+ {
+ IGpuKernelComponent::Properties properties;
+ properties.stage(UnitWorkloadStage{ UnitWorkloadStage::Stage::Run });
+
+ ArgumentPack<ITensorInfo> arguments;
+ arguments.add_const_tensor(ACL_SRC_0, src);
+ arguments.add_const_tensor(ACL_DST_0, dst);
+ comp_graph.add_new_component<ClComponentStore>(properties, arguments);
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Unimplemented Gpu language");
+ }
+
+ // Set up fusion test by adding to the Operator Group
+ // Note this has to be performed after all the components have been successfully added to the component graph
+
+ // Pack tensor infos
+ ArgumentPack<ITensorInfo> tensors;
+ tensors.add_const_tensor(ACL_SRC_0, src);
+ tensors.add_const_tensor(ACL_DST_0, dst);
+
+ const Operator op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+ sketch.implementation().operator_group().add_operator(op);
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateElementwiseBinary.cpp b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateElementwiseBinary.cpp
index 996bf15d01..6c1e0fb1de 100644
--- a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateElementwiseBinary.cpp
+++ b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateElementwiseBinary.cpp
@@ -110,8 +110,8 @@ R"_(
R"_(
LOOP_UNROLLING(int, i, 0, 1, M0,
{
- g_dst_indirect_y[i].v = (uint)min(g_ind_1 + i, (int)({{dst}}_w * {{dst}}_h) - 1);
- g_dst_indirect_y[i].v += g_ind_2 * (int)({{dst}}_w * {{dst}}_h);
+ g_dst_indirect_y[i].v = (uint)min(g_ind_1 + i, (int)({{out}}_w * {{out}}_h) - 1);
+ g_dst_indirect_y[i].v += g_ind_2 * (int)({{out}}_w * {{out}}_h);
})
}
//------------------ END KERNEL {{meta_kernel_id}} ELTWISE_OP ---------------------
@@ -194,6 +194,7 @@ TagLUT ClTemplateElementwiseBinary::get_tag_lut(const GpuKernelVariableTable &vt
lut["lhs"] = vtable.get_variable(_lhs);
lut["rhs"] = vtable.get_variable(_rhs);
lut["dst"] = vtable.get_variable(_dst);
+ lut["out"] = vtable.get_variable(comp_group.get_dst_tensors().front());
}
else
{
diff --git a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateWriter.cpp b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateWriter.cpp
index cb643a741d..0afd0e7581 100644
--- a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateWriter.cpp
+++ b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateWriter.cpp
@@ -179,7 +179,11 @@ std::string ClTemplateWriter::write_code()
code += macros;
}
- code += write_kernel_signature(_vtable.get_variable_list(_components.get_argument_tensors()));
+ auto arguments = _components.get_argument_tensors();
+ std::sort(arguments.begin(), arguments.end(), [](const ITensorInfo *l, const ITensorInfo *r) {
+ return l->id() < r->id();
+ });
+ code += write_kernel_signature(_vtable.get_variable_list(arguments));
code += "\n{\n\n";
@@ -190,6 +194,7 @@ std::string ClTemplateWriter::write_code()
for(const auto &component_code : component_codes)
{
code += component_code;
+ code += "\n";
}
code += "}\n";
diff --git a/src/dynamic_fusion/sketch/utils/DependencyGraph.h b/src/dynamic_fusion/sketch/utils/DependencyGraph.h
index 03678defae..633c5e4263 100644
--- a/src/dynamic_fusion/sketch/utils/DependencyGraph.h
+++ b/src/dynamic_fusion/sketch/utils/DependencyGraph.h
@@ -417,6 +417,33 @@ public:
}
return tensors;
}
+ /** Get intermediate tensors of the whole graph.
+ *
+ * @return std::vector<TensorId>
+ */
+ std::vector<TensorId> intermediate_tensors() const
+ {
+ std::vector<TensorId> tensors;
+
+ // If a tensor is used to connect the input of an operator and the output of another operator,
+ // it is not allocated in the memory. The tensor exists as a temporary variable only.
+ for(auto src_tensor : _adj_src_ops)
+ {
+ if(!src_tensor.second.empty())
+ {
+ const auto dst_tensor = _adj_dst_ops.find(src_tensor.first);
+ if(dst_tensor != _adj_dst_ops.end())
+ {
+ if(!dst_tensor->second.empty())
+ {
+ tensors.push_back(src_tensor.first);
+ }
+ }
+ }
+ }
+
+ return tensors;
+ }
/** Get all root ops. Root ops can also be referred to as "src ops" of the whole graph
*
* @return std::vector<OperatorId>
diff --git a/tests/validation/dynamic_fusion/gpu/Integration.cpp b/tests/validation/dynamic_fusion/gpu/Integration.cpp
index 0b81dac1f0..58d2215e64 100644
--- a/tests/validation/dynamic_fusion/gpu/Integration.cpp
+++ b/tests/validation/dynamic_fusion/gpu/Integration.cpp
@@ -28,6 +28,7 @@
#include "arm_compute/dynamic_fusion/sketch/OperatorAttributes.h"
#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
#include "tests/CL/CLAccessor.h"
#include "tests/framework/Macros.h"
@@ -70,8 +71,11 @@ TEST_CASE(Conv2d, framework::DatasetMode::ALL)
Conv2dAttributes conv2d_attr{};
auto input_info = sketch.create_tensor_info(t_input_shape, 1, data_type, data_layout);
auto weight_info = sketch.create_tensor_info(TensorInfo(t_weight_shape, 1, data_type, data_layout));
- auto dst_info = sketch.create_tensor_info();
- GpuConv2d::create_op(sketch, &input_info, &weight_info, nullptr, &dst_info, conv2d_attr);
+ auto ans_info = sketch.create_tensor_info();
+ GpuConv2d::create_op(sketch, &input_info, &weight_info, nullptr, &ans_info, conv2d_attr);
+
+ auto dst_info = sketch.create_tensor_info();
+ GpuOutput::create_op(sketch, &ans_info, &dst_info);
// Configure runtime
ClWorkloadRuntime runtime;
diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/DepthwiseConv2dFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/DepthwiseConv2dFixture.h
index c7600e082e..630b664b78 100644
--- a/tests/validation/fixtures/dynamic_fusion/gpu/cl/DepthwiseConv2dFixture.h
+++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/DepthwiseConv2dFixture.h
@@ -33,6 +33,7 @@
#include "arm_compute/dynamic_fusion/sketch/attributes/DepthwiseConv2dAttributes.h"
#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
#include "tests/CL/CLAccessor.h"
@@ -133,7 +134,11 @@ protected:
auto weight_info = sketch.create_tensor_info(TensorInfo(weights_shape, 1, _data_type, _data_layout));
auto bias_info = sketch.create_tensor_info(TensorInfo(bias_shape, 1, _data_type, _data_layout));
auto dst_info = sketch.create_tensor_info();
- FunctionType::create_op(sketch, &input_info, &weight_info, &bias_info, &dst_info, dwc_conv2d_attr);
+
+ auto ans_info = sketch.create_tensor_info();
+
+ FunctionType::create_op(sketch, &input_info, &weight_info, &bias_info, &ans_info, dwc_conv2d_attr);
+ GpuOutput::create_op(sketch, &ans_info, &dst_info);
// Configure runtime
ClWorkloadRuntime runtime;
diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/DirectConv2dFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/DirectConv2dFixture.h
index e437c440d0..1a2676c438 100644
--- a/tests/validation/fixtures/dynamic_fusion/gpu/cl/DirectConv2dFixture.h
+++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/DirectConv2dFixture.h
@@ -32,6 +32,7 @@
#include "arm_compute/dynamic_fusion/sketch/OperatorAttributes.h"
#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
#include "tests/CL/CLAccessor.h"
#include "tests/framework/Fixture.h"
@@ -113,7 +114,11 @@ protected:
auto weight_info = sketch.create_tensor_info(TensorInfo(weights_shape, 1, _data_type, _data_layout));
auto bias_info = sketch.create_tensor_info(TensorInfo(bias_shape, 1, _data_type, _data_layout));
auto dst_info = sketch.create_tensor_info();
- FunctionType::create_op(sketch, &input_info, &weight_info, &bias_info, &dst_info, conv2d_attr);
+
+ auto ans_info = sketch.create_tensor_info();
+
+ FunctionType::create_op(sketch, &input_info, &weight_info, &bias_info, &ans_info, conv2d_attr);
+ GpuOutput::create_op(sketch, &ans_info, &dst_info);
// Configure runtime
ClWorkloadRuntime runtime;
diff --git a/tests/validation/fixtures/dynamic_fusion/gpu/cl/ElementwiseBinaryFixture.h b/tests/validation/fixtures/dynamic_fusion/gpu/cl/ElementwiseBinaryFixture.h
index d11237748f..f97b541ce3 100644
--- a/tests/validation/fixtures/dynamic_fusion/gpu/cl/ElementwiseBinaryFixture.h
+++ b/tests/validation/fixtures/dynamic_fusion/gpu/cl/ElementwiseBinaryFixture.h
@@ -29,6 +29,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
#include "tests/CL/CLAccessor.h"
#include "tests/framework/Fixture.h"
@@ -102,32 +103,30 @@ protected:
auto cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
auto gpu_ctx = GpuWorkloadContext{ &cl_compile_ctx };
GpuWorkloadSketch sketch{ &gpu_ctx };
- TensorInfo dst_info{};
- TensorInfo dst_info_fuse{};
// Fuse first element wise binary Op
auto lhs_info = sketch.create_tensor_info(shape0, 1, _data_type);
auto rhs_info = sketch.create_tensor_info(TensorInfo(shape1, 1, _data_type));
+
+ auto ans_info = sketch.create_tensor_info();
+ auto dst_info = sketch.create_tensor_info();
+
TensorInfo rhs_info_fuse;
+ TensorInfo ans2_info;
+
+ FunctionType::create_op(sketch, &lhs_info, &rhs_info, &ans_info);
- // Testing root case while in-place
- if(!_is_inplace)
+ if(_fuse)
{
- dst_info = sketch.create_tensor_info(TensorInfo(1, _data_type));
+ rhs_info_fuse = sketch.create_tensor_info(shape2, 1, _data_type);
+ ans2_info = sketch.create_tensor_info();
- FunctionType::create_op(sketch, &lhs_info, &rhs_info, &dst_info);
+ FunctionType::create_op(sketch, &ans_info, &rhs_info_fuse, &ans2_info);
+ GpuOutput::create_op(sketch, &ans2_info, &dst_info);
}
else
{
- FunctionType::create_op(sketch, &lhs_info, &rhs_info, &lhs_info);
- }
-
- if(_fuse)
- {
- // Fuse first element wise binary Op
- rhs_info_fuse = sketch.create_tensor_info(TensorInfo(shape2, 1, _data_type));
- dst_info_fuse = sketch.create_tensor_info();
- FunctionType::create_op(sketch, &dst_info, &rhs_info_fuse, &dst_info_fuse);
+ GpuOutput::create_op(sketch, &ans_info, &dst_info);
}
// Configure runtime
@@ -148,33 +147,24 @@ protected:
TensorType t_rhs{};
TensorType t_rhs_fuse{};
TensorType t_dst{};
- TensorType t_dst_fuse{};
// Initialize user tensors
t_lhs.allocator()->init(lhs_info);
t_rhs.allocator()->init(rhs_info);
- if(!_is_inplace)
+ t_dst.allocator()->init(dst_info);
+ if(_fuse)
{
- t_dst.allocator()->init(dst_info);
- if(_fuse)
- {
- t_rhs_fuse.allocator()->init(rhs_info_fuse);
- t_dst_fuse.allocator()->init(dst_info_fuse);
- }
+ t_rhs_fuse.allocator()->init(rhs_info_fuse);
}
// Allocate and fill user tensors
// Instead of using ACL allocator, the user can choose to import memory into the tensors
t_lhs.allocator()->allocate();
t_rhs.allocator()->allocate();
- if(!_is_inplace)
+ t_dst.allocator()->allocate();
+ if(_fuse)
{
- t_dst.allocator()->allocate();
- if(_fuse)
- {
- t_rhs_fuse.allocator()->allocate();
- t_dst_fuse.allocator()->allocate();
- }
+ t_rhs_fuse.allocator()->allocate();
}
fill(AccessorType(t_lhs), 0);
@@ -183,31 +173,17 @@ protected:
{
fill(AccessorType(t_rhs_fuse), 2);
}
+
// Run runtime
- if(_is_inplace)
+ if(_fuse)
{
- runtime.run({ &t_lhs, &t_rhs, &t_lhs });
+ runtime.run({ &t_lhs, &t_rhs, &t_rhs_fuse, &t_dst });
}
else
{
- if(_fuse)
- {
- runtime.run({ &t_lhs, &t_rhs, &t_rhs_fuse, &t_dst_fuse });
- }
- else
- {
- runtime.run({ &t_lhs, &t_rhs, &t_dst });
- }
+ runtime.run({ &t_lhs, &t_rhs, &t_dst });
}
- if(_is_inplace)
- {
- return t_lhs;
- }
- else if(_fuse)
- {
- return t_dst_fuse;
- }
return t_dst;
}
diff --git a/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h
index 8553472fb9..418cf4fe04 100644
--- a/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h
+++ b/tests/validation/fixtures/dynamic_fusion/operators/CastFixture.h
@@ -30,6 +30,7 @@
#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
#include "arm_compute/dynamic_fusion/sketch/attributes/CastAttributes.h"
#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
#include "tests/framework/Fixture.h"
#include "tests/validation/reference/DepthConvertLayer.h"
@@ -121,7 +122,10 @@ protected:
CastAttributes attributes;
attributes.convert_policy(policy).data_type(dt_out);
- FunctionType::create_op(sketch, &src_info, &dst_info, attributes);
+ auto ans_info = sketch.create_tensor_info();
+
+ FunctionType::create_op(sketch, &src_info, &ans_info, attributes);
+ GpuOutput::create_op(sketch, &ans_info, &dst_info);
// Configure runtime
ClWorkloadRuntime runtime;
diff --git a/tests/validation/fixtures/dynamic_fusion/operators/ClampFixture.h b/tests/validation/fixtures/dynamic_fusion/operators/ClampFixture.h
index dbac29fd22..fe87d9a022 100644
--- a/tests/validation/fixtures/dynamic_fusion/operators/ClampFixture.h
+++ b/tests/validation/fixtures/dynamic_fusion/operators/ClampFixture.h
@@ -29,6 +29,7 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
#include "tests/framework/Fixture.h"
#include "tests/validation/reference/ActivationLayer.h"
@@ -108,14 +109,23 @@ protected:
// Create sketch tensors
TensorInfo src_info = sketch.create_tensor_info(TensorInfo(shape, 1, _data_type));
- TensorInfo dst_0_info = sketch.create_tensor_info(TensorInfo(shape, 1, _data_type));
- TensorInfo dst_1_info;
+ TensorInfo dst_info = sketch.create_tensor_info(TensorInfo(shape, 1, _data_type));
+
+ auto ans_0_info = sketch.create_tensor_info();
+ TensorInfo ans_1_info;
+
+ FunctionType::create_op(sketch, &src_info, &ans_0_info, attributes);
- FunctionType::create_op(sketch, &src_info, &dst_0_info, attributes);
if(_fuse)
{
- dst_1_info = sketch.create_tensor_info(shape, 1, _data_type);
- FunctionType::create_op(sketch, &dst_0_info, &dst_1_info, attributes);
+ ans_1_info = sketch.create_tensor_info();
+
+ FunctionType::create_op(sketch, &ans_0_info, &ans_1_info, attributes);
+ GpuOutput::create_op(sketch, &ans_1_info, &dst_info);
+ }
+ else
+ {
+ GpuOutput::create_op(sketch, &ans_0_info, &dst_info);
}
// Configure runtime
@@ -124,43 +134,22 @@ protected:
// Construct user tensors
TensorType t_src{};
- TensorType t_dst_0{};
- TensorType t_dst_1{};
+ TensorType t_dst{};
// Initialize user tensors
t_src.allocator()->init(src_info);
- t_dst_0.allocator()->init(dst_0_info);
- if(_fuse)
- {
- t_dst_1.allocator()->init(dst_1_info);
- }
+ t_dst.allocator()->init(dst_info);
// Allocate and fill user tensors
t_src.allocator()->allocate();
- t_dst_0.allocator()->allocate();
- if(_fuse)
- {
- t_dst_1.allocator()->allocate();
- }
+ t_dst.allocator()->allocate();
fill(AccessorType(t_src));
// Run runtime
- if(_fuse)
- {
- runtime.run({ &t_src, &t_dst_1 });
- }
- else
- {
- runtime.run({ &t_src, &t_dst_0 });
- }
-
- if(_fuse)
- {
- return t_dst_1;
- }
+ runtime.run({ &t_src, &t_dst });
- return t_dst_0;
+ return t_dst;
}
SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo act_info)