13 #if defined(ARMCOMPUTECL_ENABLED) 17 #if defined(ARMCOMPUTENEON_ENABLED) 21 using namespace armnn;
28 template<
typename IComputeTensorHandle>
29 boost::test_tools::predicate_result CompareTensorHandleShape(IComputeTensorHandle* tensorHandle,
30 std::initializer_list<unsigned int> expectedDimensions)
32 arm_compute::ITensorInfo*
info = tensorHandle->GetTensor().info();
34 auto infoNumDims = info->num_dimensions();
35 auto numExpectedDims = expectedDimensions.size();
36 if (infoNumDims != numExpectedDims)
38 boost::test_tools::predicate_result res(
false);
39 res.message() <<
"Different number of dimensions [" << info->num_dimensions()
40 <<
"!=" << expectedDimensions.size() <<
"]";
44 size_t i = info->num_dimensions() - 1;
46 for (
unsigned int expectedDimension : expectedDimensions)
48 if (info->dimension(i) != expectedDimension)
50 boost::test_tools::predicate_result res(
false);
51 res.message() <<
"For dimension " << i <<
52 " expected size " << expectedDimension <<
53 " got " << info->dimension(i);
63 template<
typename IComputeTensorHandle>
80 Connect(input, layer1, tensorInfo);
81 Connect(layer1, layer2, tensorInfo);
82 Connect(layer2, output, tensorInfo);
84 input->CreateTensorHandles(registry, refFactory);
86 layer2->CreateTensorHandles(registry, refFactory);
87 output->CreateTensorHandles(registry, refFactory);
90 auto workload1 = MakeAndCheckWorkload<CopyMemGenericWorkload>(*layer1, factory);
91 auto workload2 = MakeAndCheckWorkload<CopyMemGenericWorkload>(*layer2, refFactory);
94 BOOST_TEST(queueDescriptor1.m_Inputs.size() == 1);
95 BOOST_TEST(queueDescriptor1.m_Outputs.size() == 1);
96 auto inputHandle1 = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor1.m_Inputs[0]);
97 auto outputHandle1 = boost::polymorphic_downcast<IComputeTensorHandle*>(queueDescriptor1.m_Outputs[0]);
99 BOOST_TEST(CompareTensorHandleShape<IComputeTensorHandle>(outputHandle1, {2, 3}));
103 BOOST_TEST(queueDescriptor2.
m_Inputs.size() == 1);
104 BOOST_TEST(queueDescriptor2.
m_Outputs.size() == 1);
105 auto inputHandle2 = boost::polymorphic_downcast<IComputeTensorHandle*>(queueDescriptor2.
m_Inputs[0]);
106 auto outputHandle2 = boost::polymorphic_downcast<RefTensorHandle*>(queueDescriptor2.
m_Outputs[0]);
107 BOOST_TEST(CompareTensorHandleShape<IComputeTensorHandle>(inputHandle2, {2, 3}));
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry ®istry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)
This layer represents a memory copy operation.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
A layer user-provided data can be bound to (e.g. inputs, outputs).
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
std::vector< ITensorHandle * > m_Outputs
std::vector< ITensorHandle * > m_Inputs