ArmNN
 21.05
armnn::experimental Namespace Reference

Classes

class  AsyncExecutionCallback
 
class  IAsyncExecutionCallback
 
class  IWorkingMemHandle
 
struct  WorkingMemDescriptor
 
class  WorkingMemHandle
 

Typedefs

using IAsyncExecutionCallbackPtr = std::shared_ptr< IAsyncExecutionCallback >
 

Functions

template<DataType ArmnnIType, DataType ArmnnOType, typename TInput = ResolveType <ArmnnIType>, typename TOutput = ResolveType <ArmnnOType>>
void AsyncThreadedEndToEndTestImpl (INetworkPtr network, const std::vector< std::map< int, std::vector< TInput >>> &inputTensorData, const std::vector< std::map< int, std::vector< TOutput >>> &expectedOutputData, std::vector< BackendId > backends, const size_t numberOfInferences, float tolerance=0.000001f)
 
template<DataType ArmnnIType, DataType ArmnnOType, typename TInput = ResolveType<ArmnnIType>, typename TOutput = ResolveType<ArmnnOType>>
void AsyncEndToEndTestImpl (INetworkPtr network, const std::map< int, std::vector< TInput >> &inputTensorData, const std::map< int, std::vector< TOutput >> &expectedOutputData, std::vector< BackendId > backends, float tolerance=0.000001f, size_t numThreads=0)
 
template<typename armnn::DataType DataType>
INetworkPtr CreateStridedSliceNetwork (const TensorShape &inputShape, const TensorShape &outputShape, const std::vector< int > &beginData, const std::vector< int > &endData, const std::vector< int > &stridesData, int beginMask=0, int endMask=0, int shrinkAxisMask=0, int ellipsisMask=0, int newAxisMask=0, const float qScale=1.0f, const int32_t qOffset=0)
 
template<armnn::DataType ArmnnType>
void StridedSlicedEndToEndTest (const std::vector< BackendId > &backends)
 
template<armnn::DataType ArmnnType>
void AsyncScheduledStridedSlicedEndToEndTest (const std::vector< BackendId > &backends)
 
template<armnn::DataType ArmnnType>
void AsyncScheduledStridedSlicedMultiThreadedEndToEndTest (const std::vector< BackendId > &backends)
 
template<armnn::DataType ArmnnType>
void StridedSlicedMultiThreadedEndToEndTest (const std::vector< BackendId > &backends)
 

Typedef Documentation

◆ IAsyncExecutionCallbackPtr

Definition at line 17 of file IAsyncExecutionCallback.hpp.

Function Documentation

◆ AsyncEndToEndTestImpl()

void armnn::experimental::AsyncEndToEndTestImpl ( INetworkPtr  network,
const std::map< int, std::vector< TInput >> &  inputTensorData,
const std::map< int, std::vector< TOutput >> &  expectedOutputData,
std::vector< BackendId backends,
float  tolerance = 0.000001f,
size_t  numThreads = 0 
)

Definition at line 121 of file StridedSliceAsyncEndToEndTest.hpp.

References IRuntime::Create(), armnn::Optimize(), armnn::Success, and armnn::Undefined.

127 {
128  // Create Runtime in which test will run
130  IRuntimePtr runtime(IRuntime::Create(options));
131 
132  // Optimize the Network
133  IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec());
134 
135  // Creates AsyncNetwork
136  NetworkId networkId = 0;
137 
138  std::string errorMessage;
139 
140  const INetworkProperties networkProperties(true, MemorySource::Undefined, MemorySource::Undefined, numThreads);
141 
142  runtime->LoadNetwork(networkId, std::move(optNet), errorMessage, networkProperties);
143 
144  InputTensors inputTensors;
145  inputTensors.reserve(inputTensorData.size());
146  for (auto&& it : inputTensorData)
147  {
148  inputTensors.push_back({it.first,
149  ConstTensor(runtime->GetInputTensorInfo(networkId, it.first), it.second.data())});
150  }
151 
152  OutputTensors outputTensors;
153  outputTensors.reserve(expectedOutputData.size());
154  std::map<int, std::vector<TOutput>> outputStorage;
155  for (auto&& it : expectedOutputData)
156  {
157  std::vector<TOutput> out(it.second.size());
158  outputStorage.emplace(it.first, out);
159  outputTensors.push_back({it.first,
160  Tensor(runtime->GetOutputTensorInfo(networkId, it.first),
161  outputStorage.at(it.first).data())});
162  }
163 
164  if (numThreads == 0)
165  {
166  // Create WorkingMemHandle for this async network
167  std::unique_ptr<IWorkingMemHandle> workingMemHandle = runtime->CreateWorkingMemHandle(networkId);
168  IWorkingMemHandle& workingMemHandleRef = *workingMemHandle.get();
169 
170  // Run the async network
171  runtime->Execute(workingMemHandleRef, inputTensors, outputTensors);
172  }
173  else
174  {
175  std::vector<IAsyncExecutionCallbackPtr> callbacks;
176 
177  // Create 1000 callbacks that will be checked post scheduling
178  for (size_t i = 0; i < 1000; ++i)
179  {
180  callbacks.emplace_back(std::make_shared<AsyncExecutionCallback>());
181  }
182 
183  // For the asyncronous execution, we are adding a pool of working memory handles (1 per thread) in the
184  // LoadedNetwork with a each scheduled inference having a spefic priority
185  for (IAsyncExecutionCallbackPtr cb : callbacks)
186  {
187  runtime->Schedule(networkId,
188  inputTensors,
189  outputTensors,
190  static_cast<QosExecPriority>(rand()%3),
191  cb);
192  }
193 
194  // Wait until the execution signals a notify
195  for (IAsyncExecutionCallbackPtr cb : callbacks)
196  {
197  cb->Wait();
198 
199  // Checks the results.
200  BOOST_CHECK(cb->GetStatus() == Status::Success);
201  }
202  }
203 
204  for (auto&& it : expectedOutputData)
205  {
206  std::vector<TOutput> out = outputStorage.at(it.first);
207 
208  for (unsigned int i = 0; i < out.size(); ++i)
209  {
210  BOOST_CHECK(Compare<ArmnnOType>(it.second[i], out[i], tolerance) == true);
211  }
212  }
213 }
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:28
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:340
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:306
std::shared_ptr< IAsyncExecutionCallback > IAsyncExecutionCallbackPtr
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1568
int NetworkId
Definition: IRuntime.hpp:22
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:341
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:174

◆ AsyncScheduledStridedSlicedEndToEndTest()

void armnn::experimental::AsyncScheduledStridedSlicedEndToEndTest ( const std::vector< BackendId > &  backends)

Definition at line 307 of file StridedSliceAsyncEndToEndTest.hpp.

308 {
309  using namespace armnn;
310  using T = ResolveType<ArmnnType>;
311 
312  const TensorShape& inputShape = {3, 2, 3, 1};
313  const TensorShape& outputShape = {1, 2, 3, 1};
314  const std::vector<int>& beginData = {1, 0, 0, 0};
315  const std::vector<int>& endData = {2, 2, 3, 1};
316  const std::vector<int>& stridesData = {1, 1, 1, 1};
317  int beginMask = 0;
318  int endMask = 0;
319  int shrinkAxisMask = 0;
320  int ellipsisMask = 0;
321  int newAxisMask = 0;
322 
323  // Builds up the structure of the network
324  INetworkPtr net = CreateStridedSliceNetwork<ArmnnType>(inputShape,
325  outputShape,
326  beginData,
327  endData,
328  stridesData,
329  beginMask,
330  endMask,
331  shrinkAxisMask,
332  ellipsisMask,
333  newAxisMask);
334 
335  // Creates structures for input & output.
336  std::vector<T> inputData{
337  1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
338 
339  3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f,
340 
341  5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f
342  };
343 
344  std::vector<T> outputExpected{
345  3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f
346  };
347 
348  std::map<int, std::vector<T>> inputTensorData = {{0, inputData}};
349  std::map<int, std::vector<T>> expectedOutputData = {{0, outputExpected}};
350 
351  AsyncEndToEndTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends, 0.000001f, 1);
352 }
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
Copyright (c) 2021 ARM Limited and Contributors.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173

◆ AsyncScheduledStridedSlicedMultiThreadedEndToEndTest()

void armnn::experimental::AsyncScheduledStridedSlicedMultiThreadedEndToEndTest ( const std::vector< BackendId > &  backends)

Definition at line 355 of file StridedSliceAsyncEndToEndTest.hpp.

356 {
357  using namespace armnn;
358  using T = ResolveType<ArmnnType>;
359 
360  const TensorShape& inputShape = {3, 2, 3, 1};
361  const TensorShape& outputShape = {1, 2, 3, 1};
362  const std::vector<int>& beginData = {1, 0, 0, 0};
363  const std::vector<int>& endData = {2, 2, 3, 1};
364  const std::vector<int>& stridesData = {1, 1, 1, 1};
365  int beginMask = 0;
366  int endMask = 0;
367  int shrinkAxisMask = 0;
368  int ellipsisMask = 0;
369  int newAxisMask = 0;
370 
371  // Builds up the structure of the network
372  INetworkPtr net = CreateStridedSliceNetwork<ArmnnType>(inputShape,
373  outputShape,
374  beginData,
375  endData,
376  stridesData,
377  beginMask,
378  endMask,
379  shrinkAxisMask,
380  ellipsisMask,
381  newAxisMask);
382 
383  // Creates structures for input & output.
384  std::vector<T> inputData{
385  1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
386 
387  3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f,
388 
389  5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f
390  };
391 
392  std::vector<T> outputExpected{
393  3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f
394  };
395 
396  std::map<int, std::vector<T>> inputTensorData = {{0, inputData}};
397  std::map<int, std::vector<T>> expectedOutputData = {{0, outputExpected}};
398 
399  AsyncEndToEndTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends, 0.000001f, 3);
400 }
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
Copyright (c) 2021 ARM Limited and Contributors.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173

◆ AsyncThreadedEndToEndTestImpl()

void armnn::experimental::AsyncThreadedEndToEndTestImpl ( INetworkPtr  network,
const std::vector< std::map< int, std::vector< TInput >>> &  inputTensorData,
const std::vector< std::map< int, std::vector< TOutput >>> &  expectedOutputData,
std::vector< BackendId backends,
const size_t  numberOfInferences,
float  tolerance = 0.000001f 
)

Definition at line 29 of file StridedSliceAsyncEndToEndTest.hpp.

References IRuntime::Create(), armnn::Optimize(), and armnn::Undefined.

35 {
36  // Create Runtime in which test will run
38  IRuntimePtr runtime(IRuntime::Create(options));
39 
40  // Optimize the Network
41  IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec());
42 
43 
44  // Creates AsyncNetwork
45  NetworkId networkId = 0;
46  std::string errorMessage;
47  const INetworkProperties networkProperties(true, MemorySource::Undefined, MemorySource::Undefined);
48  runtime->LoadNetwork(networkId, std::move(optNet), errorMessage, networkProperties);
49 
50  std::vector<InputTensors> inputTensorsVec;
51  std::vector<OutputTensors> outputTensorsVec;
52  std::vector<std::map<int, std::vector<TOutput>>> outputStorageVec;
53  std::vector<std::unique_ptr<IWorkingMemHandle>> workingMemHandles;
54 
55  for (unsigned int i = 0; i < numberOfInferences; ++i)
56  {
57  InputTensors inputTensors;
58  OutputTensors outputTensors;
59  outputStorageVec.emplace_back(std::map<int, std::vector<TOutput>>());
60 
61  inputTensors.reserve(inputTensorData.size());
62  for (auto&& it : inputTensorData[i])
63  {
64  inputTensors.push_back({it.first,
65  ConstTensor(runtime->GetInputTensorInfo(networkId, it.first), it.second.data())});
66  }
67 
68  outputTensors.reserve(expectedOutputData.size());
69  for (auto&& it : expectedOutputData[i])
70  {
71  std::vector<TOutput> out(it.second.size());
72  outputStorageVec[i].emplace(it.first, out);
73  outputTensors.push_back({it.first,
74  Tensor(runtime->GetOutputTensorInfo(networkId, it.first),
75  outputStorageVec[i].at(it.first).data())});
76  }
77 
78  inputTensorsVec.push_back(inputTensors);
79  outputTensorsVec.push_back(outputTensors);
80 
81  workingMemHandles.push_back(runtime->CreateWorkingMemHandle(networkId));
82  }
83 
84  std::vector<std::thread> threads;
85  for (unsigned int i = 0; i < numberOfInferences; ++i)
86  {
87  // Access the vectors before we do anything multi-threaded
88  InputTensors& inputTensors = inputTensorsVec[i];
89  OutputTensors& outputTensors = outputTensorsVec[i];
90  IWorkingMemHandle& workingMemHandle = *workingMemHandles[i].get();
91 
92  threads.emplace_back([&]()
93  {
94  // Run the async network
95  runtime->Execute(workingMemHandle, inputTensors, outputTensors);
96  });
97  }
98 
99  for (unsigned int i = 0; i < numberOfInferences; ++i)
100  {
101  threads[i].join();
102  }
103 
104  // Checks the results.
105  for (unsigned int i = 0; i < numberOfInferences; ++i)
106  {
107  for (auto &&it : expectedOutputData[i])
108  {
109  std::vector<TOutput> out = outputStorageVec[i].at(it.first);
110  for (unsigned int j = 0; j < out.size(); ++j)
111  {
112  BOOST_CHECK(Compare<ArmnnOType>(it.second[j], out[j], tolerance) == true);
113  }
114  }
115  }
116 
117 }
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:28
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:340
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:306
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1568
int NetworkId
Definition: IRuntime.hpp:22
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:341
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:174

◆ CreateStridedSliceNetwork()

INetworkPtr armnn::experimental::CreateStridedSliceNetwork ( const TensorShape inputShape,
const TensorShape outputShape,
const std::vector< int > &  beginData,
const std::vector< int > &  endData,
const std::vector< int > &  stridesData,
int  beginMask = 0,
int  endMask = 0,
int  shrinkAxisMask = 0,
int  ellipsisMask = 0,
int  newAxisMask = 0,
const float  qScale = 1.0f,
const int32_t  qOffset = 0 
)

Definition at line 216 of file StridedSliceAsyncEndToEndTest.hpp.

References Connect(), INetwork::Create(), StridedSliceDescriptor::m_Begin, StridedSliceDescriptor::m_BeginMask, StridedSliceDescriptor::m_EllipsisMask, StridedSliceDescriptor::m_End, StridedSliceDescriptor::m_EndMask, StridedSliceDescriptor::m_NewAxisMask, StridedSliceDescriptor::m_ShrinkAxisMask, and StridedSliceDescriptor::m_Stride.

228 {
229  using namespace armnn;
230  // Builds up the structure of the network.
232 
233  TensorInfo inputTensorInfo(inputShape, DataType, qScale, qOffset);
234  TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset);
235 
236  armnn::StridedSliceDescriptor stridedSliceDescriptor;
237  stridedSliceDescriptor.m_Begin = beginData;
238  stridedSliceDescriptor.m_End = endData;
239  stridedSliceDescriptor.m_Stride = stridesData;
240  stridedSliceDescriptor.m_BeginMask = beginMask;
241  stridedSliceDescriptor.m_EndMask = endMask;
242  stridedSliceDescriptor.m_ShrinkAxisMask = shrinkAxisMask;
243  stridedSliceDescriptor.m_EllipsisMask = ellipsisMask;
244  stridedSliceDescriptor.m_NewAxisMask = newAxisMask;
245 
246  IConnectableLayer* input = net->AddInputLayer(0, "Input_Layer");
247  IConnectableLayer* stridedSlice = net->AddStridedSliceLayer(stridedSliceDescriptor, "splitter");
248  IConnectableLayer* output = net->AddOutputLayer(0);
249 
250  Connect(input, stridedSlice, inputTensorInfo, 0, 0);
251  Connect(stridedSlice, output, outputTensorInfo, 0, 0);
252 
253  return net;
254 }
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:62
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
std::vector< int > m_Begin
Begin values for the input that will be sliced.
Copyright (c) 2021 ARM Limited and Contributors.
int32_t m_BeginMask
Begin mask value.
int32_t m_EndMask
End mask value.
DataType
Definition: Types.hpp:36
int32_t m_NewAxisMask
New axis mask value.
int32_t m_EllipsisMask
Ellipsis mask value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
std::vector< int > m_End
End values for the input that will be sliced.
A StridedSliceDescriptor for the StridedSliceLayer.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:529

◆ StridedSlicedEndToEndTest()

void armnn::experimental::StridedSlicedEndToEndTest ( const std::vector< BackendId > &  backends)

Definition at line 257 of file StridedSliceAsyncEndToEndTest.hpp.

258 {
259  using namespace armnn;
260  using T = ResolveType<ArmnnType>;
261 
262  const TensorShape& inputShape = {3, 2, 3, 1};
263  const TensorShape& outputShape = {1, 2, 3, 1};
264  const std::vector<int>& beginData = {1, 0, 0, 0};
265  const std::vector<int>& endData = {2, 2, 3, 1};
266  const std::vector<int>& stridesData = {1, 1, 1, 1};
267  int beginMask = 0;
268  int endMask = 0;
269  int shrinkAxisMask = 0;
270  int ellipsisMask = 0;
271  int newAxisMask = 0;
272 
273  // Builds up the structure of the network
274  INetworkPtr net = CreateStridedSliceNetwork<ArmnnType>(inputShape,
275  outputShape,
276  beginData,
277  endData,
278  stridesData,
279  beginMask,
280  endMask,
281  shrinkAxisMask,
282  ellipsisMask,
283  newAxisMask);
284 
285  BOOST_TEST_CHECKPOINT("create a network");
286 
287  // Creates structures for input & output.
288  std::vector<T> inputData{
289  1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
290 
291  3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f,
292 
293  5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f
294  };
295 
296  std::vector<T> outputExpected{
297  3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f
298  };
299 
300  std::map<int, std::vector<T>> inputTensorData = {{0, inputData}};
301  std::map<int, std::vector<T>> expectedOutputData = {{0, outputExpected}};
302 
303  AsyncEndToEndTestImpl<ArmnnType, ArmnnType>(move(net), inputTensorData, expectedOutputData, backends, 0.000001f);
304 }
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
Copyright (c) 2021 ARM Limited and Contributors.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173

◆ StridedSlicedMultiThreadedEndToEndTest()

void armnn::experimental::StridedSlicedMultiThreadedEndToEndTest ( const std::vector< BackendId > &  backends)

Definition at line 403 of file StridedSliceAsyncEndToEndTest.hpp.

404 {
405  using namespace armnn;
406  using T = ResolveType<ArmnnType>;
407 
408  const TensorShape& inputShape = {3, 2, 3, 1};
409  const TensorShape& outputShape = {1, 2, 3, 1};
410  const std::vector<int>& beginData = {1, 0, 0, 0};
411  const std::vector<int>& endData = {2, 2, 3, 1};
412  const std::vector<int>& stridesData = {1, 1, 1, 1};
413  int beginMask = 0;
414  int endMask = 0;
415  int shrinkAxisMask = 0;
416  int ellipsisMask = 0;
417  int newAxisMask = 0;
418 
419  // Builds up the structure of the network
420  INetworkPtr net = CreateStridedSliceNetwork<ArmnnType>(inputShape,
421  outputShape,
422  beginData,
423  endData,
424  stridesData,
425  beginMask,
426  endMask,
427  shrinkAxisMask,
428  ellipsisMask,
429  newAxisMask);
430 
431  BOOST_TEST_CHECKPOINT("create a network");
432 
433  // Creates structures for input & output.
434  std::vector<T> inputData1{
435  1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
436 
437  3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f,
438 
439  5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f
440  };
441 
442  std::vector<T> outputExpected1{ 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f };
443 
444  // Creates structures for input & output.
445  std::vector<T> inputData2{
446  1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
447 
448  8.0f, 8.0f, 8.0f, 7.0f, 7.0f, 7.0f,
449 
450  5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f
451  };
452 
453  std::vector<T> outputExpected2{ 8.0f, 8.0f, 8.0f, 7.0f, 7.0f, 7.0f };
454 
455  std::vector<std::map<int, std::vector<T>>> inputTensors;
456  std::vector<std::map<int, std::vector<T>>> outputTensors;
457 
458  inputTensors.push_back(std::map<int, std::vector<T>> {{0, inputData1}});
459  inputTensors.push_back(std::map<int, std::vector<T>> {{0, inputData2}});
460  outputTensors.push_back(std::map<int, std::vector<T>> {{0, outputExpected1}});
461  outputTensors.push_back(std::map<int, std::vector<T>> {{0, outputExpected2}});
462 
463  AsyncThreadedEndToEndTestImpl<ArmnnType, ArmnnType>(move(net), inputTensors, outputTensors, backends, 2);
464 }
typename ResolveTypeImpl< DT >::Type ResolveType
Definition: ResolveType.hpp:79
Copyright (c) 2021 ARM Limited and Contributors.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:173