20 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
21 std::vector<LayerTestResult<T,3>> SplitterTestCommon(
28 unsigned int inputWidth = 5;
29 unsigned int inputHeight = 6;
30 unsigned int inputChannels = 3;
40 unsigned int outputWidth1 = inputWidth;
41 unsigned int outputHeight1 = inputHeight;
42 unsigned int outputChannels1 = 1;
45 unsigned int outputWidth2 = inputWidth;
46 unsigned int outputHeight2 = inputHeight;
47 unsigned int outputChannels2 = 2;
51 armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, ArmnnType, qScale, qOffset);
54 armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
55 armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, ArmnnType, qScale, qOffset);
58 armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
59 armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType, qScale, qOffset);
63 if(armnn::IsQuantizedType<T>())
66 inputTensorInfo.SetQuantizationOffset(qOffset);
67 outputTensorInfo1.SetQuantizationScale(qScale);
68 outputTensorInfo1.SetQuantizationOffset(qOffset);
69 outputTensorInfo2.SetQuantizationScale(qScale);
70 outputTensorInfo2.SetQuantizationOffset(qOffset);
71 outputTensorInfo3.SetQuantizationScale(qScale);
72 outputTensorInfo3.SetQuantizationOffset(qOffset);
73 outputTensorInfo4.SetQuantizationScale(qScale);
74 outputTensorInfo4.SetQuantizationOffset(qOffset);
82 auto input = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(
83 armnnUtils::QuantizedVector<T>({
84 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
85 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
86 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
87 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
88 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
89 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
91 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
92 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
93 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
94 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
95 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
96 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
98 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
99 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
100 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
101 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
102 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
103 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
109 ret1.outputExpected = MakeTensor<T, 3>(outputTensorInfo1, std::vector<T>(
110 armnnUtils::QuantizedVector<T>({
111 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
112 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
113 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
114 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
115 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
116 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
122 ret2.outputExpected = MakeTensor<T, 3>(outputTensorInfo2, std::vector<T>(
123 armnnUtils::QuantizedVector<T>({
124 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
125 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
126 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
127 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
128 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
129 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
131 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
132 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
133 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
134 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
135 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
136 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
142 ret3.outputExpected = MakeTensor<T, 3>(outputTensorInfo3, std::vector<T>(
143 armnnUtils::QuantizedVector<T>({
144 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
145 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
146 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
147 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
148 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
149 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
155 ret4.outputExpected = MakeTensor<T, 3>(outputTensorInfo4, std::vector<T>(
156 armnnUtils::QuantizedVector<T>({
157 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
158 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
159 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
160 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
161 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
162 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
170 std::vector<unsigned int> wOrigin1 = {0, 0, 0};
173 std::vector<unsigned int> wOrigin2 = {1, 0, 0};
176 std::vector<unsigned int> wOrigin3 = {0, 0, 0};
179 std::vector<unsigned int> wOrigin4 = {1, 0, 0};
185 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
187 std::unique_ptr<armnn::ITensorHandle> outputHandle1 =
188 subTensorsSupported ?
192 std::unique_ptr<armnn::ITensorHandle> outputHandle2 =
193 subTensorsSupported ?
197 std::unique_ptr<armnn::ITensorHandle> outputHandle3 =
198 subTensorsSupported ?
199 workloadFactory.
CreateSubTensorHandle(*outputHandle2, outputTensorInfo3.GetShape(), wOrigin3.data()) :
202 std::unique_ptr<armnn::ITensorHandle> outputHandle4 =
203 subTensorsSupported ?
204 workloadFactory.
CreateSubTensorHandle(*outputHandle2, outputTensorInfo4.GetShape(), wOrigin4.data()) :
211 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
212 AddOutputToWorkload(data, info, outputTensorInfo1, outputHandle1.get());
213 AddOutputToWorkload(data, info, outputTensorInfo2, outputHandle2.get());
215 data.m_ViewOrigins.push_back(window1);
216 data.m_ViewOrigins.push_back(window2);
218 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateSplitter(data, info);
220 inputHandle->Allocate();
221 outputHandle1->Allocate();
222 outputHandle2->Allocate();
234 AddInputToWorkload(data2, info2, outputTensorInfo2, outputHandle2.get());
235 AddOutputToWorkload(data2, info2, outputTensorInfo3, outputHandle3.get());
236 AddOutputToWorkload(data2, info2, outputTensorInfo4, outputHandle4.get());
238 data2.m_ViewOrigins.push_back(window3);
239 data2.m_ViewOrigins.push_back(window4);
241 std::unique_ptr<armnn::IWorkload> workload2 = workloadFactory.
CreateSplitter(data2, info2);
243 outputHandle3->Allocate();
244 outputHandle4->Allocate();
246 ExecuteWorkload(*workload2, memoryManager);
251 std::vector<LayerTestResult<T,3>> ret = {ret1, ret2, ret3, ret4,};
256 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
260 float qScale, int32_t qOffset)
264 auto input = MakeTensor<T, 3>(
266 armnnUtils::QuantizedVector<T>({
267 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
268 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
269 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
270 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
271 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
272 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
274 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
275 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
276 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
277 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
278 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
279 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
281 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
282 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
283 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
284 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
285 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
286 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
290 std::vector<unsigned int> origin = { 0, 0, 0 };
296 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(tensorInfo);
298 std::unique_ptr<armnn::ITensorHandle> outputHandle =
299 subTensorsSupported ?
306 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
307 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
309 data.m_ViewOrigins.push_back(window);
311 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateSplitter(data, info);
313 inputHandle->Allocate();
314 outputHandle->Allocate();
322 ret.outputExpected = input;
333 return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
340 return SplitterTestCommon<armnn::DataType::Float16>(workloadFactory, memoryManager);
347 return SplitterTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
354 return SplitterTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 1.0f, 0);
361 return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
368 return CopyViaSplitterTestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0);
375 return CopyViaSplitterTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, 1.0f, 0);
382 return CopyViaSplitterTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, 1.0f, 0);
virtual std::unique_ptr< IWorkload > CreateSplitter(const SplitterQueueDescriptor &descriptor, const WorkloadInfo &info) const
std::vector< LayerTestResult< float, 3 > > SplitterFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
LayerTestResult< uint8_t, 3 > CopyViaSplitterUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual std::unique_ptr< ITensorHandle > CreateSubTensorHandle(ITensorHandle &parent, TensorShape const &subTensorShape, unsigned int const *subTensorOrigin) const =0
LayerTestResult< armnn::Half, 3 > CopyViaSplitterFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
std::vector< LayerTestResult< armnn::Half, 3 > > SplitterFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
std::vector< LayerTestResult< uint8_t, 3 > > SplitterUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void IgnoreUnused(Ts &&...)
#define ARMNN_NO_DEPRECATE_WARN_END
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
std::vector< LayerTestResult< int16_t, 3 > > SplitterInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationScale(float scale)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< float, 3 > CopyViaSplitterFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
LayerTestResult< int16_t, 3 > CopyViaSplitterInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
virtual bool SupportsSubTensors() const =0