22 template<
typename T, std::
size_t Dim>
28 const std::vector<float>& inputData,
29 const std::vector<T>& expectedOutputData,
32 boost::ignore_unused(memoryManager);
33 boost::multi_array<float, Dim> input = MakeTensor<float, Dim>(inputTensorInfo, inputData);
36 ret.outputExpected = MakeTensor<T, Dim>(outputTensorInfo, expectedOutputData);
38 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.
CreateTensorHandle(inputTensorInfo);
39 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.
CreateTensorHandle(outputTensorInfo);
42 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
43 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
45 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateQuantize(descriptor, info);
47 inputHandle->Allocate();
48 outputHandle->Allocate();
52 ExecuteWorkload(*workload, memoryManager);
59 template <armnn::DataType ArmnnOutputType,
typename T = armnn::ResolveType<ArmnnOutputType>>
69 std::vector<float> inputData = std::vector<float>(
77 std::vector<T> expectedOutputData = std::vector<T>(
85 return QuantizeTestImpl<T, 4>(workloadFactory,
94 template <armnn::DataType ArmnnOutputType,
typename T = armnn::ResolveType<ArmnnOutputType>>
102 const armnn::TensorInfo outputTensorInfo({1, 1, 2, 1}, ArmnnOutputType, 0.0001f, 0);
104 const T max = std::numeric_limits<T>::max();
105 const T min = std::numeric_limits<T>::lowest();
107 std::vector<float> inputData = std::vector<float>(
112 std::vector<T> expectedOutputData = std::vector<T>(
117 return QuantizeTestImpl<T, 4>(workloadFactory,
132 return QuantizeSimpleTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
139 return QuantizeClampTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
146 return QuantizeClampTest<armnn::DataType::QAsymmS8>(workloadFactory, memoryManager);
153 return QuantizeClampTest<armnn::DataType::QSymmS8>(workloadFactory, memoryManager);
160 return QuantizeClampTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
LayerTestResult< uint8_t, 4 > QuantizeClampUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
LayerTestResult< uint8_t, 4 > QuantizeSimpleUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo, const bool IsMemoryManaged=true) const =0
LayerTestResult< int8_t, 4 > QuantizeClampInt8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
LayerTestResult< int8_t, 4 > QuantizeClampAsymmInt8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)
virtual std::unique_ptr< IWorkload > CreateQuantize(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &Info) const
LayerTestResult< int16_t, 4 > QuantizeClampInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager)