20 template<
typename T, std::
size_t Dim>
26 std::vector<float>& inputData,
27 std::vector<float>& outputExpectedData,
29 const std::string expectedStringOutput,
30 const float qScale = 1.0f,
31 const int32_t qOffset = 0)
34 if(armnn::IsQuantizedType<T>())
43 boost::multi_array<T, Dim> input =
44 MakeTensor<T, Dim>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
48 MakeTensor<T, Dim>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset));
51 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
52 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
56 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
57 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
59 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateDebug(descriptor, info);
61 inputHandle->Allocate();
62 outputHandle->Allocate();
66 std::ostringstream oss;
67 std::streambuf* coutStreambuf = std::cout.rdbuf();
68 std::cout.rdbuf(oss.rdbuf());
70 ExecuteWorkload(*workload, memoryManager);
72 std::cout.rdbuf(coutStreambuf);
74 BOOST_TEST(oss.str() == expectedStringOutput);
81 template <armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
89 unsigned int inputShape[] = {1, 2, 2, 3};
90 unsigned int outputShape[] = {1, 2, 2, 3};
100 std::vector<float> input = std::vector<float>(
108 std::vector<float> outputExpected = std::vector<float>(
116 const std::string expectedStringOutput =
117 "{ \"layerGuid\": 1," 118 " \"layerName\": \"TestOutput\"," 119 " \"outputSlot\": 0," 120 " \"shape\": [1, 2, 2, 3]," 121 " \"min\": 1, \"max\": 12," 122 " \"data\": [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] }\n";
124 return DebugTestImpl<T, 4>(workloadFactory,
131 expectedStringOutput);
134 template <armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
142 unsigned int inputShape[] = {3, 3, 1};
143 unsigned int outputShape[] = {3, 3, 1};
153 std::vector<float> input = std::vector<float>(
160 std::vector<float> outputExpected = std::vector<float>(
167 const std::string expectedStringOutput =
168 "{ \"layerGuid\": 1," 169 " \"layerName\": \"TestOutput\"," 170 " \"outputSlot\": 0," 171 " \"shape\": [3, 3, 1]," 172 " \"min\": 1, \"max\": 9," 173 " \"data\": [[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]] }\n";
175 return DebugTestImpl<T, 3>(workloadFactory,
182 expectedStringOutput);
185 template <armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
193 unsigned int inputShape[] = {2, 2};
194 unsigned int outputShape[] = {2, 2};
204 std::vector<float> input = std::vector<float>(
210 std::vector<float> outputExpected = std::vector<float>(
216 const std::string expectedStringOutput =
217 "{ \"layerGuid\": 1," 218 " \"layerName\": \"TestOutput\"," 219 " \"outputSlot\": 0," 220 " \"shape\": [2, 2]," 221 " \"min\": 1, \"max\": 4," 222 " \"data\": [[1, 2], [3, 4]] }\n";
224 return DebugTestImpl<T, 2>(workloadFactory,
231 expectedStringOutput);
234 template <armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
242 unsigned int inputShape[] = {4};
243 unsigned int outputShape[] = {4};
253 std::vector<float> input = std::vector<float>(
255 1.0f, 2.0f, 3.0f, 4.0f,
258 std::vector<float> outputExpected = std::vector<float>(
260 1.0f, 2.0f, 3.0f, 4.0f,
263 const std::string expectedStringOutput =
264 "{ \"layerGuid\": 1," 265 " \"layerName\": \"TestOutput\"," 266 " \"outputSlot\": 0," 268 " \"min\": 1, \"max\": 4," 269 " \"data\": [1, 2, 3, 4] }\n";
271 return DebugTestImpl<T, 1>(workloadFactory,
278 expectedStringOutput);
287 return Debug4dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
294 return Debug3dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
301 return Debug2dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
308 return Debug1dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
315 return Debug4dTest<armnn::DataType::BFloat16>(workloadFactory, memoryManager);
322 return Debug3dTest<armnn::DataType::BFloat16>(workloadFactory, memoryManager);
329 return Debug2dTest<armnn::DataType::BFloat16>(workloadFactory, memoryManager);
336 return Debug1dTest<armnn::DataType::BFloat16>(workloadFactory, memoryManager);
343 return Debug4dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
350 return Debug3dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
357 return Debug2dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
364 return Debug1dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
371 return Debug4dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
378 return Debug3dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
385 return Debug2dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
392 return Debug1dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
virtual std::unique_ptr< IWorkload > CreateDebug(const DebugQueueDescriptor &descriptor, const WorkloadInfo &info) const
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
LayerTestResult< int16_t, 3 > Debug3dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 1 > Debug1dFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< armnn::BFloat16, 1 > Debug1dBFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 1 > Debug1dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void IgnoreUnused(Ts &&...)
#define ARMNN_NO_DEPRECATE_WARN_END
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< int16_t, 1 > Debug1dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 2 > Debug2dFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationScale(float scale)
LayerTestResult< armnn::BFloat16, 3 > Debug3dBFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< armnn::BFloat16, 2 > Debug2dBFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 4 > Debug4dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 3 > Debug3dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< armnn::BFloat16, 4 > Debug4dBFloat16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 4 > Debug4dFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 2 > Debug2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
Contains information about inputs and outputs to a layer.
void SetQuantizationOffset(int32_t offset)
LayerTestResult< uint8_t, 4 > Debug4dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 2 > Debug2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 3 > Debug3dFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)