ArmNN
 22.02
WorkloadTestUtils.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
7 #include <armnn/Tensor.hpp>
8 
13 
14 namespace armnn
15 {
16 class ITensorHandle;
17 } // namespace armnn
18 
19 namespace
20 {
21 
22 template <typename QueueDescriptor>
23 void AddInputToWorkload(QueueDescriptor& descriptor,
24  armnn::WorkloadInfo& info,
25  const armnn::TensorInfo& tensorInfo,
26  armnn::ITensorHandle* tensorHandle)
27 {
28  descriptor.m_Inputs.push_back(tensorHandle);
29  info.m_InputTensorInfos.push_back(tensorInfo);
30 }
31 
32 template <typename QueueDescriptor>
33 void AddOutputToWorkload(QueueDescriptor& descriptor,
34  armnn::WorkloadInfo& info,
35  const armnn::TensorInfo& tensorInfo,
36  armnn::ITensorHandle* tensorHandle)
37 {
38  descriptor.m_Outputs.push_back(tensorHandle);
39  info.m_OutputTensorInfos.push_back(tensorInfo);
40 }
41 
42 template <typename QueueDescriptor>
43 void SetWorkloadInput(QueueDescriptor& descriptor,
44  armnn::WorkloadInfo& info,
45  unsigned int index,
46  const armnn::TensorInfo& tensorInfo,
47  armnn::ITensorHandle* tensorHandle)
48 {
49  descriptor.m_Inputs[index] = tensorHandle;
50  info.m_InputTensorInfos[index] = tensorInfo;
51 }
52 
53 template <typename QueueDescriptor>
54 void SetWorkloadOutput(QueueDescriptor& descriptor,
55  armnn::WorkloadInfo& info,
56  unsigned int index,
57  const armnn::TensorInfo& tensorInfo,
58  armnn::ITensorHandle* tensorHandle)
59 {
60  descriptor.m_Outputs[index] = tensorHandle;
61  info.m_OutputTensorInfos[index] = tensorInfo;
62 }
63 
64 inline void ExecuteWorkload(armnn::IWorkload& workload,
66  bool memoryManagementRequested = true)
67 {
68  const bool manageMemory = memoryManager && memoryManagementRequested;
69 
70  // Acquire working memory (if needed)
71  if (manageMemory)
72  {
73  memoryManager->Acquire();
74  }
75 
76  // Perform PostAllocationConfiguration
77  workload.PostAllocationConfigure();
78 
79  // Execute the workload
80  workload.Execute();
81 
82  // Release working memory (if needed)
83  if (manageMemory)
84  {
85  memoryManager->Release();
86  }
87 }
88 
90 {
91  if (!weightsType)
92  {
93  return weightsType;
94  }
95 
96  switch(weightsType.value())
97  {
101  return weightsType;
107  default:
108  ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
109  }
110  return armnn::EmptyOptional();
111 }
112 
113 } // anonymous namespace
virtual void PostAllocationConfigure()=0
Copyright (c) 2021 ARM Limited and Contributors.
armnn::Optional< armnn::DataType > GetBiasTypeFromWeightsType(armnn::Optional< armnn::DataType > weightsType)
std::vector< TensorInfo > m_InputTensorInfos
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
std::vector< TensorInfo > m_OutputTensorInfos
Workload interface to enqueue a layer computation.
Definition: IWorkload.hpp:22
EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...
Definition: Optional.hpp:32
Contains information about TensorInfos of a layer.
virtual void Execute() const =0