23.02
RefReshapeWorkload.cpp
Go to the documentation of this file.
1
//
2
// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3
// SPDX-License-Identifier: MIT
4
//
5
6
#include "
RefReshapeWorkload.hpp
"
7
#include "
RefWorkloadUtils.hpp
"
8
#include "
Profiling.hpp
"
9
10
#include <cstring>
11
12
namespace
armnn
13
{
14
15
void
RefReshapeWorkload::Execute
()
const
16
{
17
Execute
(
m_Data
.
m_Inputs
,
m_Data
.
m_Outputs
);
18
}
19
20
void
RefReshapeWorkload::ExecuteAsync
(
ExecutionData
& executionData)
21
{
22
WorkingMemDescriptor
* workingMemDescriptor =
static_cast<
WorkingMemDescriptor
*
>
(executionData.
m_Data
);
23
Execute
(workingMemDescriptor->
m_Inputs
, workingMemDescriptor->
m_Outputs
);
24
}
25
26
void
RefReshapeWorkload::Execute
(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs)
const
27
{
28
ARMNN_SCOPED_PROFILING_EVENT
(
Compute::CpuRef
,
"RefReshapeWorkload_Execute"
);
29
30
void
* output = outputs[0]->Map();
31
const
void
* input = inputs[0]->Map();
32
unsigned
int
numBytes =
GetTensorInfo
(inputs[0]).
GetNumBytes
();
33
memcpy(output, input, numBytes);
34
}
35
36
}
//namespace armnn
armnn::GetTensorInfo
const TensorInfo & GetTensorInfo(const ITensorHandle *tensorHandle)
float32 helpers
Definition:
RefWorkloadUtils.hpp:27
armnn::experimental::WorkingMemDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition:
WorkingMemDescriptor.hpp:20
armnn::experimental::ExecutionData
Definition:
ExecutionData.hpp:14
armnn::experimental::ExecutionData::m_Data
void * m_Data
Definition:
ExecutionData.hpp:16
armnn::experimental::WorkingMemDescriptor
Definition:
WorkingMemDescriptor.hpp:18
armnn::BaseWorkload< ReshapeQueueDescriptor >::m_Data
ReshapeQueueDescriptor m_Data
Definition:
Workload.hpp:83
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition:
01_00_quick_start.dox:6
RefReshapeWorkload.hpp
RefWorkloadUtils.hpp
ARMNN_SCOPED_PROFILING_EVENT
#define ARMNN_SCOPED_PROFILING_EVENT(backendId, name)
Definition:
Profiling.hpp:220
armnn::RefReshapeWorkload::ExecuteAsync
void ExecuteAsync(ExecutionData &executionData) override
Definition:
RefReshapeWorkload.cpp:20
armnn::TensorInfo::GetNumBytes
unsigned int GetNumBytes() const
Definition:
Tensor.cpp:427
armnn::experimental::WorkingMemDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition:
WorkingMemDescriptor.hpp:21
armnn::RefReshapeWorkload::Execute
void Execute() const override
Definition:
RefReshapeWorkload.cpp:15
armnn::QueueDescriptor::m_Outputs
std::vector< ITensorHandle * > m_Outputs
Definition:
WorkloadData.hpp:27
armnn::Compute::CpuRef
@ CpuRef
CPU Execution: Reference C++ kernels.
armnn::QueueDescriptor::m_Inputs
std::vector< ITensorHandle * > m_Inputs
Definition:
WorkloadData.hpp:26
Profiling.hpp
src
backends
reference
workloads
RefReshapeWorkload.cpp
Generated on Wed Mar 22 2023 15:53:02 for ArmNN by
1.8.17