diff options
author | David Beck <david.beck@arm.com> | 2018-09-19 12:03:20 +0100 |
---|---|---|
committer | Matthew Bentham <matthew.bentham@arm.com> | 2018-10-10 16:16:56 +0100 |
commit | 10b4dfd8e9ccd7a03df7bb053ee1c644cb37f8ab (patch) | |
tree | 1ac5b4f415531e2ef759439ab8e113f177bea7c5 /src/armnn/backends/NeonWorkloads/NeonConvertFp32ToFp16Workload.cpp | |
parent | a3f165624b2cdfbced674af5a6e11856b1e746d9 (diff) | |
download | armnn-10b4dfd8e9ccd7a03df7bb053ee1c644cb37f8ab.tar.gz |
IVGCVSW-1897 : build infrastructure for the src/backends folder
Change-Id: I7ebafb675ccc77ad54d1deb01412a8379a5356bb
Diffstat (limited to 'src/armnn/backends/NeonWorkloads/NeonConvertFp32ToFp16Workload.cpp')
-rw-r--r-- | src/armnn/backends/NeonWorkloads/NeonConvertFp32ToFp16Workload.cpp | 43 |
1 files changed, 0 insertions, 43 deletions
diff --git a/src/armnn/backends/NeonWorkloads/NeonConvertFp32ToFp16Workload.cpp b/src/armnn/backends/NeonWorkloads/NeonConvertFp32ToFp16Workload.cpp deleted file mode 100644 index 0f4fbe4e93..0000000000 --- a/src/armnn/backends/NeonWorkloads/NeonConvertFp32ToFp16Workload.cpp +++ /dev/null @@ -1,43 +0,0 @@ -// -// Copyright © 2017 Arm Ltd. All rights reserved. -// SPDX-License-Identifier: MIT -// - -#include "NeonConvertFp32ToFp16Workload.hpp" - -#include "Half.hpp" -#include "FloatingPointConverter.hpp" - -#include "Profiling.hpp" -#include "backends/WorkloadUtils.hpp" - -namespace armnn -{ - -NeonConvertFp32ToFp16Workload::NeonConvertFp32ToFp16Workload(const ConvertFp32ToFp16QueueDescriptor& descriptor, - const WorkloadInfo& info) - : Float32ToFloat16Workload<ConvertFp32ToFp16QueueDescriptor>(descriptor, info) -{ - this->m_Data.ValidateInputsOutputs("NeonConvertFp32ToFp16Workload", 1, 1); - GatherTensorHandlePairs(descriptor, m_TensorHandlePairs); -} - -void NeonConvertFp32ToFp16Workload::Execute() const -{ - ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonConvertFp32ToFp16Workload_Execute"); - - auto convertFunc = [](uint8_t* dst, const uint8_t* src, size_t size) - { - auto input = reinterpret_cast<const float*>(src); - auto output = reinterpret_cast<Half*>(dst); - size_t numElements = size/2; // 2 bytes per fp16 - armnnUtils::FloatingPointConverter::ConvertFloat32To16(input, numElements, output); - }; - - for (const auto& pair : m_TensorHandlePairs) - { - CopyTensorContentsGeneric(pair.first, pair.second, convertFunc); - } -} - -} //namespace armnn |