23 std::vector<armnn::BFloat16> inputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
25 -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
26 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f
30 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
31 std::vector<float> expectedOutput =
33 -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
34 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f
37 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.
CreateTensorHandle(inputTensorInfo);
38 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.
CreateTensorHandle(outputTensorInfo);
42 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
43 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
47 inputHandle->Allocate();
48 outputHandle->Allocate();
58 outputHandle->GetShape(),
59 outputTensorInfo.GetShape());
void IgnoreUnused(Ts &&...)
virtual std::unique_ptr< IWorkload > CreateConvertBf16ToFp32(const ConvertBf16ToFp32QueueDescriptor &descriptor, const WorkloadInfo &info) const
std::shared_ptr< IMemoryManager > IMemoryManagerSharedPtr
void CopyDataFromITensorHandle(void *memory, const armnn::ITensorHandle *tensorHandle)
LayerTestResult< float, 4 > ConvertBf16ToFp32Test(armnn::IWorkloadFactory &workloadFactory, const armnn::IBackendInternal::IMemoryManagerSharedPtr &memoryManager, const armnn::ITensorHandleFactory &tensorHandleFactory)
Contains information about TensorInfos of a layer.
virtual std::unique_ptr< ITensorHandle > CreateTensorHandle(const TensorInfo &tensorInfo) const =0
void CopyDataToITensorHandle(armnn::ITensorHandle *tensorHandle, const void *memory)