24 template<armnn::DataType ArmnnType,
typename T = armnn::ResolveType<ArmnnType>>
33 constexpr
unsigned int inputWidth = 3;
34 constexpr
unsigned int inputHeight = 4;
35 constexpr
unsigned int inputChannels = 3;
36 constexpr
unsigned int inputBatchSize = 2;
38 constexpr
unsigned int outputWidth = inputWidth;
39 constexpr
unsigned int outputHeight = inputHeight;
40 constexpr
unsigned int outputChannels = inputChannels;
41 constexpr
unsigned int outputBatchSize = inputBatchSize;
43 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
44 ArmnnType, qScale, qOffset);
46 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
47 ArmnnType, qScale, qOffset);
50 if(armnn::IsQuantizedType<T>())
53 inputTensorInfo.SetQuantizationOffset(qOffset);
54 outputTensorInfo.SetQuantizationScale(qScale);
55 outputTensorInfo.SetQuantizationOffset(qOffset);
58 auto input = armnnUtils::QuantizedVector<T>(
61 235.0f, 46.0f, 178.0f,
62 100.0f, 123.0f, 19.0f,
63 172.0f, 74.0f, 250.0f,
67 113.0f, 95.0f, 202.0f,
69 122.0f, 246.0f, 166.0f,
73 56.0f, 170.0f, 162.0f,
74 194.0f, 89.0f, 254.0f,
75 12.0f, 209.0f, 200.0f,
81 25.0f, 117.0f, 103.0f,
82 247.0f, 59.0f, 189.0f,
85 239.0f, 104.0f, 199.0f,
86 17.0f, 124.0f, 153.0f,
87 222.0f, 217.0f, 75.0f,
91 97.0f, 145.0f, 215.0f,
92 115.0f, 116.0f, 238.0f,
93 226.0f, 16.0f, 132.0f,
98 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
100 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
109 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
111 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.
CreateConstant(descriptor, info);
113 outputHandle->Allocate();
115 workload->PostAllocationConfigure();
122 outputHandle->GetShape(),
123 outputTensorInfo.GetShape());
133 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
141 return ConstantTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
149 return ConstantTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
157 return ConstantTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 2e-6f, 1);
165 return ConstantTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 2e-6f, 1);
LayerTestResult< float, 4 > ConstantTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
const ConstTensorHandle * m_LayerOutput
LayerTestResult< int16_t, 4 > ConstantInt16CustomQuantizationScaleAndOffsetTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > ConstantUint8CustomQuantizationScaleAndOffsetTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
virtual std::unique_ptr< IWorkload > CreateConstant(const ConstantQueueDescriptor &descriptor, const WorkloadInfo &info) const
void IgnoreUnused(Ts &&...)
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void SetQuantizationScale(float scale)
void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< int16_t, 4 > ConstantInt16SimpleQuantizationScaleNoOffsetTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
LayerTestResult< uint8_t, 4 > ConstantUint8SimpleQuantizationScaleNoOffsetTest(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0