20 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
21 std::vector<LayerTestResult<T,3>> SplitterTestCommon(
28 unsigned int inputWidth = 5;
29 unsigned int inputHeight = 6;
30 unsigned int inputChannels = 3;
40 unsigned int outputWidth1 = inputWidth;
41 unsigned int outputHeight1 = inputHeight;
42 unsigned int outputChannels1 = 1;
45 unsigned int outputWidth2 = inputWidth;
46 unsigned int outputHeight2 = inputHeight;
47 unsigned int outputChannels2 = 2;
51 armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, ArmnnType, qScale, qOffset);
54 armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
55 armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, ArmnnType, qScale, qOffset);
58 armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
59 armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
63 if(armnn::IsQuantizedType<T>())
66 inputTensorInfo.SetQuantizationOffset(qOffset);
67 outputTensorInfo1.SetQuantizationScale(qScale);
68 outputTensorInfo1.SetQuantizationOffset(qOffset);
69 outputTensorInfo2.SetQuantizationScale(qScale);
70 outputTensorInfo2.SetQuantizationOffset(qOffset);
71 outputTensorInfo3.SetQuantizationScale(qScale);
72 outputTensorInfo3.SetQuantizationOffset(qOffset);
73 outputTensorInfo4.SetQuantizationScale(qScale);
74 outputTensorInfo4.SetQuantizationOffset(qOffset);
82 auto input = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(
83 armnnUtils::QuantizedVector<T>({
84 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
85 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
86 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
87 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
88 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
89 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
91 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
92 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
93 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
94 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
95 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
96 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
98 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
99 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
100 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
101 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
102 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
103 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
109 ret1.outputExpected = MakeTensor<T, 3>(outputTensorInfo1, std::vector<T>(
110 armnnUtils::QuantizedVector<T>({
111 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
112 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
113 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
114 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
115 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
116 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
122 ret2.outputExpected = MakeTensor<T, 3>(outputTensorInfo2, std::vector<T>(
123 armnnUtils::QuantizedVector<T>({
124 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
125 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
126 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
127 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
128 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
129 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
131 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
132 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
133 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
134 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
135 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
136 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
142 ret3.outputExpected = MakeTensor<T, 3>(outputTensorInfo3, std::vector<T>(
143 armnnUtils::QuantizedVector<T>({
144 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
145 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
146 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
147 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
148 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
149 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
155 ret4.outputExpected = MakeTensor<T, 3>(outputTensorInfo4, std::vector<T>(
156 armnnUtils::QuantizedVector<T>({
157 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
158 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
159 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
160 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
161 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
162 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
170 std::vector<unsigned int> wOrigin1 = {0, 0, 0};
173 std::vector<unsigned int> wOrigin2 = {1, 0, 0};
176 std::vector<unsigned int> wOrigin3 = {0, 0, 0};
179 std::vector<unsigned int> wOrigin4 = {1, 0, 0};
184 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
186 std::unique_ptr<armnn::ITensorHandle> outputHandle1 =
187 subTensorsSupported ?
191 std::unique_ptr<armnn::ITensorHandle> outputHandle2 =
192 subTensorsSupported ?
196 std::unique_ptr<armnn::ITensorHandle> outputHandle3 =
197 subTensorsSupported ?
198 workloadFactory.
CreateSubTensorHandle(*outputHandle2, outputTensorInfo3.GetShape(), wOrigin3.data()) :
201 std::unique_ptr<armnn::ITensorHandle> outputHandle4 =
202 subTensorsSupported ?
203 workloadFactory.
CreateSubTensorHandle(*outputHandle2, outputTensorInfo4.GetShape(), wOrigin4.data()) :
209 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
210 AddOutputToWorkload(data, info, outputTensorInfo1, outputHandle1.get());
211 AddOutputToWorkload(data, info, outputTensorInfo2, outputHandle2.get());
216 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateSplitter(data, info);
218 inputHandle->Allocate();
219 outputHandle1->Allocate();
220 outputHandle2->Allocate();
232 AddInputToWorkload(data2, info2, outputTensorInfo2, outputHandle2.get());
233 AddOutputToWorkload(data2, info2, outputTensorInfo3, outputHandle3.get());
234 AddOutputToWorkload(data2, info2, outputTensorInfo4, outputHandle4.get());
236 data2.m_ViewOrigins.push_back(window3);
237 data2.m_ViewOrigins.push_back(window4);
239 std::unique_ptr<armnn::IWorkload> workload2 = workloadFactory.
CreateSplitter(data2, info2);
241 outputHandle3->Allocate();
242 outputHandle4->Allocate();
244 ExecuteWorkload(*workload2, memoryManager);
249 std::vector<LayerTestResult<T,3>> ret = {ret1, ret2, ret3, ret4,};
254 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
258 float qScale, int32_t qOffset)
262 auto input = MakeTensor<T, 3>(
264 armnnUtils::QuantizedVector<T>({
265 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
266 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
267 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
268 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
269 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
270 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
272 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
273 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
274 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
275 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
276 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
277 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
279 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
280 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
281 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
282 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
283 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
284 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
288 std::vector<unsigned int> origin = { 0, 0, 0 };
293 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(tensorInfo);
295 std::unique_ptr<armnn::ITensorHandle> outputHandle =
296 subTensorsSupported ?
302 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
303 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
307 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateSplitter(data, info);
309 inputHandle->Allocate();
310 outputHandle->Allocate();
318 ret.outputExpected = input;
329 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
336 return SplitterTestCommon<armnn::DataType::Float16>(workloadFactory, memoryManager);
343 return SplitterTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
350 return SplitterTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 1.0f, 0);
357 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
364 return CopyViaSplitterTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0);
371 return CopyViaSplitterTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
378 return CopyViaSplitterTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 1.0f, 0);
virtual std::unique_ptr< IWorkload > CreateSplitter(const SplitterQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< LayerTestResult< float, 3 > > SplitterFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 3 > CopyViaSplitterUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
LayerTestResult< armnn::Half, 3 > CopyViaSplitterFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
std::vector< LayerTestResult< armnn::Half, 3 > > SplitterFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
std::vector< LayerTestResult< uint8_t, 3 > > SplitterUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void IgnoreUnused(Ts &&...)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
std::vector< LayerTestResult< int16_t, 3 > > SplitterInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationScale(float scale)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
std::vector< ViewOrigin > m_ViewOrigins
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< float, 3 > CopyViaSplitterFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
LayerTestResult< int16_t, 3 > CopyViaSplitterInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0