aboutsummaryrefslogtreecommitdiff
path: root/src/backends/gpuFsa/workloads/GpuFsaPreCompiledWorkload.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/gpuFsa/workloads/GpuFsaPreCompiledWorkload.cpp')
-rw-r--r--src/backends/gpuFsa/workloads/GpuFsaPreCompiledWorkload.cpp40
1 files changed, 26 insertions, 14 deletions
diff --git a/src/backends/gpuFsa/workloads/GpuFsaPreCompiledWorkload.cpp b/src/backends/gpuFsa/workloads/GpuFsaPreCompiledWorkload.cpp
index 20386b5d86..1663044a17 100644
--- a/src/backends/gpuFsa/workloads/GpuFsaPreCompiledWorkload.cpp
+++ b/src/backends/gpuFsa/workloads/GpuFsaPreCompiledWorkload.cpp
@@ -10,6 +10,7 @@
#include <gpuFsa/GpuFsaTensorHandle.hpp>
#include <gpuFsa/GpuFsaBackend.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <fmt/format.h>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <arm_compute/runtime/CL/CLTensor.h>
@@ -21,7 +22,6 @@
#include <arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h>
#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h>
-#include <src/dynamic_fusion/sketch/gpu/GpuWorkloadContextImpl.h>
#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h>
#include <arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h>
#include <arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h>
@@ -48,26 +48,38 @@ void GpuFsaPreCompiledWorkload::Execute() const
* First we need all of the data stored within the PreCompiled blob which was used to setup the workload, namely:
* The GpuWorkloadContext, this is a context which contains the TensorInfos and is unique to the graph being run
* The Sketch, this can contain one or many ops and acts as a subgraph within the context
- * The TensorInfoIds, These are the ids of the TensorInfos used during the creation of the Sketch and stored within
- * the context.
+ * The inputTensorInfos / outputTensorInfos, These are vectors containing the TensorInfos used when creating the sketch
+ *
* It is very important that the Tensors passed into the Runtime being used to execute this sketch are created with
* the same TensorInfos as used when creating the sketch. We do this by creating new tensors, getting the original
- * TensorInfos from the GpuWorkloadContext via their ids, and then importing the buffers from our own TensorHandles
- * directly into these newly created Tensors. This allows us to link the externally visible Tensors from ArmNN to the
- * Tensors which are needed to execute with the Sketch.
+ * TensorInfos from the vectors of tensorInfos stored in the blob, and then importing the buffers from our own
+ * TensorHandles directly into these newly created Tensors. This allows us to link the externally visible Tensors
+ * from ArmNN to the Tensors which are needed to execute with the Sketch.
*
*/
using namespace arm_compute::experimental::dynamic_fusion;
// Get the runtime and configure it with the precompiled sketch
ClWorkloadRuntime runtime;
GpuFsaPreCompiledBlob *preCompiledBlob = static_cast<GpuFsaPreCompiledBlob*>(m_Data.m_PreCompiledObject);
- auto workloadContext =
- &(preCompiledBlob->workloadContext->implementation());
auto sketch = preCompiledBlob->sketch.release();
- std::vector<int32_t> inputIds = *(preCompiledBlob->inputIds.get());
- std::vector<int32_t> outputIds = *(preCompiledBlob->outputIds.get());
auto status = runtime.configure(*sketch);
+ // Get the TensorInfos stored within the PreCompiledBlob and check they're the right size
+ auto inputTensorInfos = preCompiledBlob->inputTensorInfos.get();
+ auto outputTensorInfos = preCompiledBlob->outputTensorInfos.get();
+ if (inputTensorInfos->size() != m_Data.m_Inputs.size())
+ {
+ throw InvalidArgumentException(fmt::format("GpuFsaPreCompiledWorkload::Execute: The number of inputTensorInfos"
+ " {} does not match the number of inputs {}.",
+ inputTensorInfos->size(), m_Data.m_Inputs.size()));
+ }
+ if (outputTensorInfos->size() != m_Data.m_Outputs.size())
+ {
+ throw InvalidArgumentException(fmt::format("GpuFsaPreCompiledWorkload::Execute: The number of outputTensorInfos"
+ " {} does not match the number of outputs {}.",
+ outputTensorInfos->size(), m_Data.m_Outputs.size()));
+ }
+
// (Important) Allocate auxiliary tensor memory if there are any
for(auto &data : runtime.get_auxiliary_tensors())
{
@@ -85,8 +97,8 @@ void GpuFsaPreCompiledWorkload::Execute() const
for (uint32_t inputSlotIdx = 0; inputSlotIdx < m_Data.m_Inputs.size(); ++inputSlotIdx)
{
arm_compute::CLTensor* input = new arm_compute::CLTensor{};
- input->allocator()->init(*(dynamic_cast<arm_compute::TensorInfo*>(
- workloadContext->get_tensor_info(inputIds[inputSlotIdx]))));
+ // inputTensorInfos is a ptr to a vector of ptrs, so we need to do a double dereference
+ input->allocator()->init(*((*inputTensorInfos)[inputSlotIdx]));
auto* inputHandle = PolymorphicDowncast<GpuFsaTensorHandle*>(m_Data.m_Inputs[inputSlotIdx]);
input->allocator()->import_memory(inputHandle->GetTensor().cl_buffer());
inputsWeightsOutputs.emplace_back(std::move(input));
@@ -95,8 +107,8 @@ void GpuFsaPreCompiledWorkload::Execute() const
for (uint32_t outputSlotIdx = 0; outputSlotIdx < m_Data.m_Outputs.size(); ++outputSlotIdx)
{
arm_compute::CLTensor* output = new arm_compute::CLTensor{};
- output->allocator()->init(*(dynamic_cast<arm_compute::TensorInfo*>(
- workloadContext->get_tensor_info(outputIds[outputSlotIdx]))));
+ // outputTensorInfos is a ptr to a vector of ptrs, so we need to do a double dereference
+ output->allocator()->init(*((*outputTensorInfos)[outputSlotIdx]));
auto* outputHandle = PolymorphicDowncast<GpuFsaTensorHandle*>(m_Data.m_Outputs[outputSlotIdx]);
output->allocator()->import_memory(outputHandle->GetTensor().cl_buffer());
inputsWeightsOutputs.emplace_back(std::move(output));