20 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
21 std::vector<LayerTestResult<T,3>> SplitterTestCommon(
29 unsigned int inputWidth = 5;
30 unsigned int inputHeight = 6;
31 unsigned int inputChannels = 3;
41 unsigned int outputWidth1 = inputWidth;
42 unsigned int outputHeight1 = inputHeight;
43 unsigned int outputChannels1 = 1;
46 unsigned int outputWidth2 = inputWidth;
47 unsigned int outputHeight2 = inputHeight;
48 unsigned int outputChannels2 = 2;
51 armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, ArmnnType, qScale, qOffset);
54 armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
55 armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, ArmnnType, qScale, qOffset);
58 armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
59 armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
63 if(armnn::IsQuantizedType<T>())
66 inputTensorInfo.SetQuantizationOffset(qOffset);
67 outputTensorInfo1.SetQuantizationScale(qScale);
68 outputTensorInfo1.SetQuantizationOffset(qOffset);
69 outputTensorInfo2.SetQuantizationScale(qScale);
70 outputTensorInfo2.SetQuantizationOffset(qOffset);
71 outputTensorInfo3.SetQuantizationScale(qScale);
72 outputTensorInfo3.SetQuantizationOffset(qOffset);
73 outputTensorInfo4.SetQuantizationScale(qScale);
74 outputTensorInfo4.SetQuantizationOffset(qOffset);
77 auto input = armnnUtils::QuantizedVector<T>(
79 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
80 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
81 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
82 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
83 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
84 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
86 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
87 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
88 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
89 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
90 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
91 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
93 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
94 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
95 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
96 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
97 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
98 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
103 auto expectedData1 = armnnUtils::QuantizedVector<T>(
105 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
106 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
107 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
108 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
109 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
110 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
115 auto expectedData2 = armnnUtils::QuantizedVector<T>(
117 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
118 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
119 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
120 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
121 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
122 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
124 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
125 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
126 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
127 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
128 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
129 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
134 auto expectedData3 = armnnUtils::QuantizedVector<T>(
136 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
137 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
138 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
139 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
140 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
141 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
146 auto expectedData4 = armnnUtils::QuantizedVector<T>(
148 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
149 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
150 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
151 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
152 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
153 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
157 std::vector<T> actualData1(outputTensorInfo1.GetNumElements());
158 std::vector<T> actualData2(outputTensorInfo2.GetNumElements());
159 std::vector<T> actualData3(outputTensorInfo3.GetNumElements());
160 std::vector<T> actualData4(outputTensorInfo4.GetNumElements());
165 std::vector<unsigned int> wOrigin1 = {0, 0, 0};
168 std::vector<unsigned int> wOrigin2 = {1, 0, 0};
171 std::vector<unsigned int> wOrigin3 = {0, 0, 0};
174 std::vector<unsigned int> wOrigin4 = {1, 0, 0};
178 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
180 std::unique_ptr<armnn::ITensorHandle> outputHandle1 =
181 subTensorsSupported ?
182 tensorHandleFactory.
CreateSubTensorHandle(*inputHandle, outputTensorInfo1.GetShape(), wOrigin1.data()) :
185 std::unique_ptr<armnn::ITensorHandle> outputHandle2 =
186 subTensorsSupported ?
187 tensorHandleFactory.
CreateSubTensorHandle(*inputHandle, outputTensorInfo2.GetShape(), wOrigin2.data()) :
190 std::unique_ptr<armnn::ITensorHandle> outputHandle3 =
191 subTensorsSupported ?
192 tensorHandleFactory.
CreateSubTensorHandle(*outputHandle2, outputTensorInfo3.GetShape(), wOrigin3.data()) :
195 std::unique_ptr<armnn::ITensorHandle> outputHandle4 =
196 subTensorsSupported ?
197 tensorHandleFactory.
CreateSubTensorHandle(*outputHandle2, outputTensorInfo4.GetShape(), wOrigin4.data()) :
203 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
204 AddOutputToWorkload(data, info, outputTensorInfo1, outputHandle1.get());
205 AddOutputToWorkload(data, info, outputTensorInfo2, outputHandle2.get());
214 inputHandle->Allocate();
215 outputHandle1->Allocate();
216 outputHandle2->Allocate();
228 AddInputToWorkload(data2, info2, outputTensorInfo2, outputHandle2.get());
229 AddOutputToWorkload(data2, info2, outputTensorInfo3, outputHandle3.get());
230 AddOutputToWorkload(data2, info2, outputTensorInfo4, outputHandle4.get());
232 data2.m_ViewOrigins.push_back(window3);
233 data2.m_ViewOrigins.push_back(window4);
239 outputHandle3->Allocate();
240 outputHandle4->Allocate();
242 ExecuteWorkload(*workload2, memoryManager);
247 LayerTestResult<T,3> ret1(actualData1, expectedData1, outputHandle1->GetShape(), outputTensorInfo1.GetShape());
248 LayerTestResult<T,3> ret2(actualData2, expectedData2, outputHandle2->GetShape(), outputTensorInfo2.GetShape());
249 LayerTestResult<T,3> ret3(actualData3, expectedData3, outputHandle3->GetShape(), outputTensorInfo3.GetShape());
250 LayerTestResult<T,3> ret4(actualData4, expectedData4, outputHandle4->GetShape(), outputTensorInfo4.GetShape());
252 std::vector<LayerTestResult<T,3>> ret = {ret1, ret2, ret3, ret4,};
257 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
262 float qScale, int32_t qOffset)
267 auto input = armnnUtils::QuantizedVector<T>(
269 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
270 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
271 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
272 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
273 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
274 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
276 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
277 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
278 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
279 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
280 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
281 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
283 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
284 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
285 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
286 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
287 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
288 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
292 std::vector<T> actualOutput(tensorInfo.GetNumElements());
294 std::vector<unsigned int> origin = { 0, 0, 0 };
298 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(tensorInfo);
300 std::unique_ptr<armnn::ITensorHandle> outputHandle =
301 subTensorsSupported ?
307 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
308 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
316 inputHandle->Allocate();
317 outputHandle->Allocate();
327 outputHandle->GetShape(),
328 tensorInfo.GetShape());
338 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory);
346 return SplitterTestCommon<armnn::DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory);
354 return SplitterTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
362 return SplitterTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
370 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory,
382 return CopyViaSplitterTestImpl<armnn::DataType::Float16>(workloadFactory,
394 return CopyViaSplitterTestImpl<armnn::DataType::QAsymmU8>(workloadFactory,
406 return CopyViaSplitterTestImpl<armnn::DataType::QSymmS16>(workloadFactory,
std::vector< LayerTestResult< float, 3 > > SplitterFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< float, 3 > CopyViaSplitterFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::vector< LayerTestResult< uint8_t, 3 > > SplitterUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
void IgnoreUnused(Ts &&...)
LayerTestResult< uint8_t, 3 > CopyViaSplitterUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void CopyDataFromITensorHandle(void *mem, const armnn::ITensorHandle *tensorHandle)
void SetQuantizationScale(float scale)
virtual bool SupportsSubTensors() const =0
std::vector< ViewOrigin > m_ViewOrigins
std::vector< LayerTestResult< armnn::Half, 3 > > SplitterFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< int16_t, 3 > CopyViaSplitterInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
std::vector< LayerTestResult< int16_t, 3 > > SplitterInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< IWorkload > CreateWorkload(LayerType type, const QueueDescriptor &descriptor, const WorkloadInfo &info) const
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
LayerTestResult< armnn::Half, 3 > CopyViaSplitterFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)