diff options
author | David Beck <david.beck@arm.com> | 2018-09-24 15:59:27 +0100 |
---|---|---|
committer | Matthew Bentham <matthew.bentham@arm.com> | 2018-10-10 16:16:57 +0100 |
commit | 0dbe0ee25312b728d77383d11c465156e64ae757 (patch) | |
tree | af37a9802e3ad551e1bf63f7636508cde7a41643 /src/backends/neon/workloads/NeonWorkloadUtils.cpp | |
parent | b4540bef0b0327683fe8e63f727c1212800dc2a9 (diff) | |
download | armnn-0dbe0ee25312b728d77383d11c465156e64ae757.tar.gz |
IVGCVSW-1899 : Neon backend folder structure
armnn:149855
Change-Id: I26e8cf83422a65049386a5ebdb6d0001627aefaa
Diffstat (limited to 'src/backends/neon/workloads/NeonWorkloadUtils.cpp')
-rw-r--r-- | src/backends/neon/workloads/NeonWorkloadUtils.cpp | 60 |
1 files changed, 60 insertions, 0 deletions
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.cpp b/src/backends/neon/workloads/NeonWorkloadUtils.cpp new file mode 100644 index 0000000000..195f090171 --- /dev/null +++ b/src/backends/neon/workloads/NeonWorkloadUtils.cpp @@ -0,0 +1,60 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#include "NeonWorkloadUtils.hpp" +#include <backends/aclCommon/ArmComputeTensorUtils.hpp> +#include <backends/aclCommon/ArmComputeUtils.hpp> +#include <backends/neon/NeonTensorHandle.hpp> +#include <backends/neon/NeonLayerSupport.hpp> +#include <backends/CpuTensorHandle.hpp> + +#include <armnn/Utils.hpp> +#include <armnn/Exceptions.hpp> + +#include <cstring> +#include <boost/assert.hpp> +#include <boost/cast.hpp> +#include <boost/format.hpp> + +#include "Profiling.hpp" + +#include <armnn/Types.hpp> +#include <Half.hpp> + +using namespace armnn::armcomputetensorutils; + +namespace armnn +{ + +// Allocates a tensor and copy the contents in data to the tensor contents. +template<typename T> +void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const T* data) +{ + InitialiseArmComputeTensorEmpty(tensor); + CopyArmComputeITensorData(data, tensor); +} + +template void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const Half* data); +template void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const float* data); +template void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const uint8_t* data); +template void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const int32_t* data); + +void InitializeArmComputeTensorDataForFloatTypes(arm_compute::Tensor& tensor, + const ConstCpuTensorHandle* handle) +{ + BOOST_ASSERT(handle); + switch(handle->GetTensorInfo().GetDataType()) + { + case DataType::Float16: + InitialiseArmComputeTensorData(tensor, handle->GetConstTensor<Half>()); + break; + case DataType::Float32: + InitialiseArmComputeTensorData(tensor, handle->GetConstTensor<float>()); + break; + default: + BOOST_ASSERT_MSG(false, "Unexpected floating point type."); + } +}; + +} //namespace armnn |