// // Copyright © 2020 Arm Ltd. All rights reserved. // SPDX-License-Identifier: MIT // #include "ConvertBf16ToFp32TestImpl.hpp" #include #include #include LayerTestResult ConvertBf16ToFp32Test( armnn::IWorkloadFactory& workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager) { IgnoreUnused(memoryManager); const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::BFloat16); const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32); std::vector inputValues = armnnUtils::QuantizedVector( { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f }, 1.0f, 0); auto input = MakeTensor(inputTensorInfo, std::vector(inputValues)); LayerTestResult ret(outputTensorInfo); ret.outputExpected = MakeTensor(outputTensorInfo, { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f, 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f }); std::unique_ptr inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo); std::unique_ptr outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo); armnn::ConvertBf16ToFp32QueueDescriptor data; armnn::WorkloadInfo info; AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get()); AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get()); std::unique_ptr workload = workloadFactory.CreateConvertBf16ToFp32(data, info); inputHandle->Allocate(); outputHandle->Allocate(); CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]); workload->Execute(); CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get()); return ret; }