20 template<
typename T, std::
size_t Dim>
26 std::vector<float>& inputData,
27 std::vector<float>& outputExpectedData,
29 const std::string expectedStringOutput,
30 const float qScale = 1.0f,
31 const int32_t qOffset = 0)
33 boost::ignore_unused(memoryManager);
34 if(armnn::IsQuantizedType<T>())
43 boost::multi_array<T, Dim> input =
44 MakeTensor<T, Dim>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
48 MakeTensor<T, Dim>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset));
50 std::unique_ptr<armnn::ITensorHandle> inputHandle =
53 std::unique_ptr<armnn::ITensorHandle> outputHandle =
57 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
58 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
60 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateDebug(descriptor, info);
62 inputHandle->Allocate();
63 outputHandle->Allocate();
67 std::ostringstream oss;
68 std::streambuf* coutStreambuf = std::cout.rdbuf();
69 std::cout.rdbuf(oss.rdbuf());
71 ExecuteWorkload(*workload, memoryManager);
73 std::cout.rdbuf(coutStreambuf);
75 BOOST_TEST(oss.str() == expectedStringOutput);
82 template <armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
90 unsigned int inputShape[] = {1, 2, 2, 3};
91 unsigned int outputShape[] = {1, 2, 2, 3};
101 std::vector<float> input = std::vector<float>(
109 std::vector<float> outputExpected = std::vector<float>(
117 const std::string expectedStringOutput =
118 "{ \"layerGuid\": 1," 119 " \"layerName\": \"TestOutput\"," 120 " \"outputSlot\": 0," 121 " \"shape\": [1, 2, 2, 3]," 122 " \"min\": 1, \"max\": 12," 123 " \"data\": [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] }\n";
125 return DebugTestImpl<T, 4>(workloadFactory,
132 expectedStringOutput);
135 template <armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
143 unsigned int inputShape[] = {3, 3, 1};
144 unsigned int outputShape[] = {3, 3, 1};
154 std::vector<float> input = std::vector<float>(
161 std::vector<float> outputExpected = std::vector<float>(
168 const std::string expectedStringOutput =
169 "{ \"layerGuid\": 1," 170 " \"layerName\": \"TestOutput\"," 171 " \"outputSlot\": 0," 172 " \"shape\": [3, 3, 1]," 173 " \"min\": 1, \"max\": 9," 174 " \"data\": [[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]] }\n";
176 return DebugTestImpl<T, 3>(workloadFactory,
183 expectedStringOutput);
186 template <armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
194 unsigned int inputShape[] = {2, 2};
195 unsigned int outputShape[] = {2, 2};
205 std::vector<float> input = std::vector<float>(
211 std::vector<float> outputExpected = std::vector<float>(
217 const std::string expectedStringOutput =
218 "{ \"layerGuid\": 1," 219 " \"layerName\": \"TestOutput\"," 220 " \"outputSlot\": 0," 221 " \"shape\": [2, 2]," 222 " \"min\": 1, \"max\": 4," 223 " \"data\": [[1, 2], [3, 4]] }\n";
225 return DebugTestImpl<T, 2>(workloadFactory,
232 expectedStringOutput);
235 template <armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
243 unsigned int inputShape[] = {4};
244 unsigned int outputShape[] = {4};
254 std::vector<float> input = std::vector<float>(
256 1.0f, 2.0f, 3.0f, 4.0f,
259 std::vector<float> outputExpected = std::vector<float>(
261 1.0f, 2.0f, 3.0f, 4.0f,
264 const std::string expectedStringOutput =
265 "{ \"layerGuid\": 1," 266 " \"layerName\": \"TestOutput\"," 267 " \"outputSlot\": 0," 269 " \"min\": 1, \"max\": 4," 270 " \"data\": [1, 2, 3, 4] }\n";
272 return DebugTestImpl<T, 1>(workloadFactory,
279 expectedStringOutput);
288 return Debug4dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
295 return Debug3dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
302 return Debug2dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
309 return Debug1dTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
316 return Debug4dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
323 return Debug3dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
330 return Debug2dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
337 return Debug1dTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
344 return Debug4dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
351 return Debug3dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
358 return Debug2dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
365 return Debug1dTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
LayerTestResult< int16_t, 4 > Debug4dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 1 > Debug1dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< float, 3 > Debug3dFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< uint8_t, 2 > Debug2dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< float, 2 > Debug2dFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationScale(float scale)
LayerTestResult< float, 1 > Debug1dFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 2 > Debug2dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 3 > Debug3dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< uint8_t, 1 > Debug1dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void SetQuantizationOffset(int32_t offset)
LayerTestResult< uint8_t, 4 > Debug4dUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual std::unique_ptr< IWorkload > CreateDebug(const DebugQueueDescriptor &descriptor, const WorkloadInfo &info) const
LayerTestResult< float, 4 > Debug4dFloat32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
LayerTestResult< int16_t, 3 > Debug3dInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)