ArmNN
 21.02
CreateWorkloadClNeon.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
8 
13 
14 #if defined(ARMCOMPUTECL_ENABLED)
15 #include <cl/ClTensorHandle.hpp>
16 #endif
17 
18 #if defined(ARMCOMPUTENEON_ENABLED)
20 #endif
21 
22 using namespace armnn;
23 
24 namespace
25 {
26 
27 using namespace std;
28 
29 template<typename IComputeTensorHandle>
30 boost::test_tools::predicate_result CompareTensorHandleShape(IComputeTensorHandle* tensorHandle,
31  std::initializer_list<unsigned int> expectedDimensions)
32 {
33  arm_compute::ITensorInfo* info = tensorHandle->GetTensor().info();
34 
35  auto infoNumDims = info->num_dimensions();
36  auto numExpectedDims = expectedDimensions.size();
37  if (infoNumDims != numExpectedDims)
38  {
39  boost::test_tools::predicate_result res(false);
40  res.message() << "Different number of dimensions [" << info->num_dimensions()
41  << "!=" << expectedDimensions.size() << "]";
42  return res;
43  }
44 
45  size_t i = info->num_dimensions() - 1;
46 
47  for (unsigned int expectedDimension : expectedDimensions)
48  {
49  if (info->dimension(i) != expectedDimension)
50  {
51  boost::test_tools::predicate_result res(false);
52  res.message() << "For dimension " << i <<
53  " expected size " << expectedDimension <<
54  " got " << info->dimension(i);
55  return res;
56  }
57 
58  i--;
59  }
60 
61  return true;
62 }
63 
64 template<typename IComputeTensorHandle>
65 void CreateMemCopyWorkloads(IWorkloadFactory& factory)
66 {
68  Graph graph;
69  RefWorkloadFactory refFactory;
70 
71  // Creates the layers we're testing.
72  Layer* const layer1 = graph.AddLayer<MemCopyLayer>("layer1");
73  Layer* const layer2 = graph.AddLayer<MemCopyLayer>("layer2");
74 
75  // Creates extra layers.
76  Layer* const input = graph.AddLayer<InputLayer>(0, "input");
77  Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
78 
79  // Connects up.
80  TensorInfo tensorInfo({2, 3}, DataType::Float32);
81  Connect(input, layer1, tensorInfo);
82  Connect(layer1, layer2, tensorInfo);
83  Connect(layer2, output, tensorInfo);
84 
85  input->CreateTensorHandles(registry, refFactory);
86  layer1->CreateTensorHandles(registry, factory);
87  layer2->CreateTensorHandles(registry, refFactory);
88  output->CreateTensorHandles(registry, refFactory);
89 
90  // make the workloads and check them
91  auto workload1 = MakeAndCheckWorkload<CopyMemGenericWorkload>(*layer1, factory);
92  auto workload2 = MakeAndCheckWorkload<CopyMemGenericWorkload>(*layer2, refFactory);
93 
94  MemCopyQueueDescriptor queueDescriptor1 = workload1->GetData();
95  BOOST_TEST(queueDescriptor1.m_Inputs.size() == 1);
96  BOOST_TEST(queueDescriptor1.m_Outputs.size() == 1);
97  auto inputHandle1 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor1.m_Inputs[0]);
98  auto outputHandle1 = PolymorphicDowncast<IComputeTensorHandle*>(queueDescriptor1.m_Outputs[0]);
99  BOOST_TEST((inputHandle1->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
100  BOOST_TEST(CompareTensorHandleShape<IComputeTensorHandle>(outputHandle1, {2, 3}));
101 
102 
103  MemCopyQueueDescriptor queueDescriptor2 = workload2->GetData();
104  BOOST_TEST(queueDescriptor2.m_Inputs.size() == 1);
105  BOOST_TEST(queueDescriptor2.m_Outputs.size() == 1);
106  auto inputHandle2 = PolymorphicDowncast<IComputeTensorHandle*>(queueDescriptor2.m_Inputs[0]);
107  auto outputHandle2 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor2.m_Outputs[0]);
108  BOOST_TEST(CompareTensorHandleShape<IComputeTensorHandle>(inputHandle2, {2, 3}));
109  BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
110 }
111 
112 } //namespace
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
Copyright (c) 2021 ARM Limited and Contributors.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
This layer represents a memory copy operation.
std::vector< ITensorHandle * > m_Outputs
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
std::vector< ITensorHandle * > m_Inputs
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Definition: Layer.cpp:250