14 #if defined(ARMCOMPUTECL_ENABLED) 18 #if defined(ARMCOMPUTENEON_ENABLED) 22 #include <doctest/doctest.h> 24 using namespace armnn;
31 template<
typename IComputeTensorHandle>
32 PredicateResult CompareTensorHandleShape(IComputeTensorHandle* tensorHandle,
33 std::initializer_list<unsigned int> expectedDimensions)
35 arm_compute::ITensorInfo*
info = tensorHandle->GetTensor().info();
37 auto infoNumDims = info->num_dimensions();
38 auto numExpectedDims = expectedDimensions.size();
39 if (infoNumDims != numExpectedDims)
42 res.Message() <<
"Different number of dimensions [" << info->num_dimensions()
43 <<
"!=" << expectedDimensions.size() <<
"]";
47 size_t i = info->num_dimensions() - 1;
49 for (
unsigned int expectedDimension : expectedDimensions)
51 if (info->dimension(i) != expectedDimension)
54 res.Message() <<
"For dimension " << i <<
55 " expected size " << expectedDimension <<
56 " got " << info->dimension(i);
66 template<
typename IComputeTensorHandle>
83 Connect(input, layer1, tensorInfo);
84 Connect(layer1, layer2, tensorInfo);
85 Connect(layer2, output, tensorInfo);
87 input->CreateTensorHandles(registry, refFactory);
89 layer2->CreateTensorHandles(registry, refFactory);
90 output->CreateTensorHandles(registry, refFactory);
93 auto workload1 = MakeAndCheckWorkload<CopyMemGenericWorkload>(*layer1, factory);
94 auto workload2 = MakeAndCheckWorkload<CopyMemGenericWorkload>(*layer2, refFactory);
97 CHECK(queueDescriptor1.m_Inputs.size() == 1);
98 CHECK(queueDescriptor1.m_Outputs.size() == 1);
99 auto inputHandle1 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor1.m_Inputs[0]);
100 auto outputHandle1 = PolymorphicDowncast<IComputeTensorHandle*>(queueDescriptor1.m_Outputs[0]);
102 auto result = CompareTensorHandleShape<IComputeTensorHandle>(outputHandle1, {2, 3});
103 CHECK_MESSAGE(result.m_Result, result.m_Message.str());
107 CHECK(queueDescriptor2.m_Inputs.size() == 1);
108 CHECK(queueDescriptor2.m_Outputs.size() == 1);
109 auto inputHandle2 = PolymorphicDowncast<IComputeTensorHandle*>(queueDescriptor2.m_Inputs[0]);
110 auto outputHandle2 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor2.m_Outputs[0]);
111 result = CompareTensorHandleShape<IComputeTensorHandle>(inputHandle2, {2, 3});
112 CHECK_MESSAGE(result.m_Result, result.m_Message.str());
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Copyright (c) 2021 ARM Limited and Contributors.
A layer user-provided data can be bound to (e.g. inputs, outputs).
This layer represents a memory copy operation.
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
virtual void CreateTensorHandles(const TensorHandleFactoryRegistry ®istry, const IWorkloadFactory &factory, const bool IsMemoryManaged=true)