22 template<
typename T, std::
size_t Dim>
29 const std::vector<float>& inputData,
30 const std::vector<T>& expectedOutputData,
34 boost::multi_array<float, Dim> input = MakeTensor<float, Dim>(inputTensorInfo, inputData);
37 ret.outputExpected = MakeTensor<T, Dim>(outputTensorInfo, expectedOutputData);
39 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
40 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
43 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
44 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
46 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateQuantize(descriptor, info);
48 inputHandle->Allocate();
49 outputHandle->Allocate();
53 ExecuteWorkload(*workload, memoryManager);
60 template <armnn::DataType ArmnnOutputType,
typename T = armnn::ResolveType<ArmnnOutputType>>
71 std::vector<float> inputData = std::vector<float>(
79 std::vector<T> expectedOutputData = std::vector<T>(
87 return QuantizeTestImpl<T, 4>(workloadFactory,
97 template <armnn::DataType ArmnnOutputType,
typename T = armnn::ResolveType<ArmnnOutputType>>
106 const armnn::TensorInfo outputTensorInfo({1, 1, 2, 1}, ArmnnOutputType, 0.0001f, 0);
108 const T max = std::numeric_limits<T>::max();
109 const T min = std::numeric_limits<T>::lowest();
111 std::vector<float> inputData = std::vector<float>(
116 std::vector<T> expectedOutputData = std::vector<T>(
121 return QuantizeTestImpl<T, 4>(workloadFactory,
138 return QuantizeSimpleTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory);
146 return QuantizeClampTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory);
154 return QuantizeClampTest<armnn::DataType::QAsymmS8>(workloadFactory, memoryManager, tensorHandleFactory);
162 return QuantizeClampTest<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, tensorHandleFactory);
170 return QuantizeClampTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory);
LayerTestResult< int8_t, 4 > QuantizeClampAsymmInt8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
void IgnoreUnused(Ts &&...)
LayerTestResult< int8_t, 4 > QuantizeClampInt8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > QuantizeClampUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
virtual std::unique_ptr< IWorkload > CreateQuantize(const QuantizeQueueDescriptor &descriptor, const WorkloadInfo &Info) const
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
Contains information about inputs and outputs to a layer.
LayerTestResult< int16_t, 4 > QuantizeClampInt16Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > QuantizeSimpleUint8Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)