ArmNN
 20.02
CreateWorkloadClNeon.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6 
8 
12 
13 #if defined(ARMCOMPUTECL_ENABLED)
14 #include <cl/ClTensorHandle.hpp>
15 #endif
16 
17 #if defined(ARMCOMPUTENEON_ENABLED)
19 #endif
20 
21 using namespace armnn;
22 
23 namespace
24 {
25 
26 using namespace std;
27 
28 template<typename IComputeTensorHandle>
29 boost::test_tools::predicate_result CompareTensorHandleShape(IComputeTensorHandle* tensorHandle,
30  std::initializer_list<unsigned int> expectedDimensions)
31 {
32  arm_compute::ITensorInfo* info = tensorHandle->GetTensor().info();
33 
34  auto infoNumDims = info->num_dimensions();
35  auto numExpectedDims = expectedDimensions.size();
36  if (infoNumDims != numExpectedDims)
37  {
38  boost::test_tools::predicate_result res(false);
39  res.message() << "Different number of dimensions [" << info->num_dimensions()
40  << "!=" << expectedDimensions.size() << "]";
41  return res;
42  }
43 
44  size_t i = info->num_dimensions() - 1;
45 
46  for (unsigned int expectedDimension : expectedDimensions)
47  {
48  if (info->dimension(i) != expectedDimension)
49  {
50  boost::test_tools::predicate_result res(false);
51  res.message() << "For dimension " << i <<
52  " expected size " << expectedDimension <<
53  " got " << info->dimension(i);
54  return res;
55  }
56 
57  i--;
58  }
59 
60  return true;
61 }
62 
63 template<typename IComputeTensorHandle>
64 void CreateMemCopyWorkloads(IWorkloadFactory& factory)
65 {
67  Graph graph;
68  RefWorkloadFactory refFactory;
69 
70  // Creates the layers we're testing.
71  Layer* const layer1 = graph.AddLayer<MemCopyLayer>("layer1");
72  Layer* const layer2 = graph.AddLayer<MemCopyLayer>("layer2");
73 
74  // Creates extra layers.
75  Layer* const input = graph.AddLayer<InputLayer>(0, "input");
76  Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
77 
78  // Connects up.
79  TensorInfo tensorInfo({2, 3}, DataType::Float32);
80  Connect(input, layer1, tensorInfo);
81  Connect(layer1, layer2, tensorInfo);
82  Connect(layer2, output, tensorInfo);
83 
84  input->CreateTensorHandles(registry, refFactory);
85  layer1->CreateTensorHandles(registry, factory);
86  layer2->CreateTensorHandles(registry, refFactory);
87  output->CreateTensorHandles(registry, refFactory);
88 
89  // make the workloads and check them
90  auto workload1 = MakeAndCheckWorkload<CopyMemGenericWorkload>(*layer1, factory);
91  auto workload2 = MakeAndCheckWorkload<CopyMemGenericWorkload>(*layer2, refFactory);
92 
93  MemCopyQueueDescriptor queueDescriptor1 = workload1->GetData();
94  BOOST_TEST(queueDescriptor1.m_Inputs.size() == 1);
95  BOOST_TEST(queueDescriptor1.m_Outputs.size() == 1);
96  auto inputHandle1 = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor1.m_Inputs[0]);
97  auto outputHandle1 = boost::polymorphic_downcast<IComputeTensorHandle*>(queueDescriptor1.m_Outputs[0]);
98  BOOST_TEST((inputHandle1->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
99  BOOST_TEST(CompareTensorHandleShape<IComputeTensorHandle>(outputHandle1, {2, 3}));
100 
101 
102  MemCopyQueueDescriptor queueDescriptor2 = workload2->GetData();
103  BOOST_TEST(queueDescriptor2.m_Inputs.size() == 1);
104  BOOST_TEST(queueDescriptor2.m_Outputs.size() == 1);
105  auto inputHandle2 = boost::polymorphic_downcast<IComputeTensorHandle*>(queueDescriptor2.m_Inputs[0]);
106  auto outputHandle2 = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor2.m_Outputs[0]);
107  BOOST_TEST(CompareTensorHandleShape<IComputeTensorHandle>(inputHandle2, {2, 3}));
108  BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
109 }
110 
111 } //namespace
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:397
Copyright (c) 2020 ARM Limited.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
This layer represents a memory copy operation.
std::vector< ITensorHandle * > m_Outputs
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:12
std::vector< ITensorHandle * > m_Inputs
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry &registry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
Definition: Layer.cpp:240