// // Copyright © 2017 Arm Ltd and Contributors. All rights reserved. // SPDX-License-Identifier: MIT // #include "ConstantTestImpl.hpp" #include #include #include #include #include #include #include namespace { template> LayerTestResult ConstantTestImpl( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory, float qScale, int32_t qOffset) { IgnoreUnused(memoryManager); constexpr unsigned int inputWidth = 3; constexpr unsigned int inputHeight = 4; constexpr unsigned int inputChannels = 3; constexpr unsigned int inputBatchSize = 2; constexpr unsigned int outputWidth = inputWidth; constexpr unsigned int outputHeight = inputHeight; constexpr unsigned int outputChannels = inputChannels; constexpr unsigned int outputBatchSize = inputBatchSize; armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType, qScale, qOffset); armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType, qScale, qOffset); // Set quantization parameters if the requested type is a quantized type. if(armnn::IsQuantizedType()) { inputTensorInfo.SetQuantizationScale(qScale); inputTensorInfo.SetQuantizationOffset(qOffset); outputTensorInfo.SetQuantizationScale(qScale); outputTensorInfo.SetQuantizationOffset(qOffset); } auto input = armnnUtils::QuantizedVector( { // Batch 0, Channel 0 235.0f, 46.0f, 178.0f, 100.0f, 123.0f, 19.0f, 172.0f, 74.0f, 250.0f, 6.0f, 195.0f, 80.0f, // Batch 0, Channel 1 113.0f, 95.0f, 202.0f, 77.0f, 114.0f, 71.0f, 122.0f, 246.0f, 166.0f, 82.0f, 28.0f, 37.0f, // Batch 0, Channel 2 56.0f, 170.0f, 162.0f, 194.0f, 89.0f, 254.0f, 12.0f, 209.0f, 200.0f, 1.0f, 64.0f, 54.0f, // Batch 1, Channel 0 67.0f, 90.0f, 49.0f, 7.0f, 163.0f, 18.0f, 25.0f, 117.0f, 103.0f, 247.0f, 59.0f, 189.0f, // Batch 1, Channel 1 239.0f, 104.0f, 199.0f, 17.0f, 124.0f, 153.0f, 222.0f, 217.0f, 75.0f, 32.0f, 126.0f, 21.0f, // Batch 1, Channel 2 97.0f, 145.0f, 215.0f, 115.0f, 116.0f, 238.0f, 226.0f, 16.0f, 132.0f, 92.0f, 125.0f, 88.0f, }, qScale, qOffset); std::vector actualOutput(outputTensorInfo.GetNumElements()); std::unique_ptr outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo); armnn::ScopedTensorHandle constantTensor(inputTensorInfo); AllocateAndCopyDataToITensorHandle(&constantTensor, input.data()); armnn::ConstantQueueDescriptor descriptor; descriptor.m_LayerOutput = &constantTensor; armnn::WorkloadInfo info; AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get()); std::unique_ptr workload = workloadFactory.CreateWorkload(armnn::LayerType::Constant, descriptor, info); outputHandle->Allocate(); workload->PostAllocationConfigure(); workload->Execute(); CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get()); return LayerTestResult(actualOutput, input, outputHandle->GetShape(), outputTensorInfo.GetShape()); } } // anonymous namespace LayerTestResult ConstantTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory) { return ConstantTestImpl(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0); } LayerTestResult ConstantInt16SimpleQuantizationScaleNoOffsetTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory) { return ConstantTestImpl(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0); } LayerTestResult ConstantUint8SimpleQuantizationScaleNoOffsetTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory) { return ConstantTestImpl(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0); } LayerTestResult ConstantUint8CustomQuantizationScaleAndOffsetTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory) { return ConstantTestImpl(workloadFactory, memoryManager, tensorHandleFactory, 2e-6f, 1); } LayerTestResult ConstantInt16CustomQuantizationScaleAndOffsetTest( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, const armnn::ITensorHandleFactory& tensorHandleFactory) { return ConstantTestImpl(workloadFactory, memoryManager, tensorHandleFactory, 2e-6f, 1); }