diff options
author | David Beck <david.beck@arm.com> | 2018-10-30 11:38:41 +0000 |
---|---|---|
committer | Matteo Martincigh <matteo.martincigh@arm.com> | 2018-11-07 13:24:50 +0000 |
commit | 09e2f27a9da7a65eb409f3dbdfc029eb3afbb003 (patch) | |
tree | a2af70b701dca0f4688610dffbe68a74778289d3 /src/backends/backendsCommon/test/TensorCopyUtils.cpp | |
parent | 9efb57d62197aeb7d868c289bb34166c132f0287 (diff) | |
download | armnn-09e2f27a9da7a65eb409f3dbdfc029eb3afbb003.tar.gz |
IVGCVSW-1949 : Refactor ITensorHandle and move backend specifics to their place
Change-Id: I48242425c6a6856e13ebcee1b140cbd2af94a3aa
Diffstat (limited to 'src/backends/backendsCommon/test/TensorCopyUtils.cpp')
-rw-r--r-- | src/backends/backendsCommon/test/TensorCopyUtils.cpp | 150 |
1 files changed, 6 insertions, 144 deletions
diff --git a/src/backends/backendsCommon/test/TensorCopyUtils.cpp b/src/backends/backendsCommon/test/TensorCopyUtils.cpp index acc28c9c03..ba7208cc40 100644 --- a/src/backends/backendsCommon/test/TensorCopyUtils.cpp +++ b/src/backends/backendsCommon/test/TensorCopyUtils.cpp @@ -4,158 +4,20 @@ // #include "TensorCopyUtils.hpp" - #include <Half.hpp> -#ifdef ARMCOMPUTECL_ENABLED -#include <cl/ClTensorHandle.hpp> -#endif - -#if ARMCOMPUTENEON_ENABLED -#include <neon/NeonTensorHandle.hpp> -#endif - -#if ARMCOMPUTECLENABLED || ARMCOMPUTENEON_ENABLED -#include <aclCommon/ArmComputeTensorUtils.hpp> -#endif - -#include <backendsCommon/CpuTensorHandle.hpp> - -#include <boost/cast.hpp> - -#include <algorithm> -#include <cstring> - -void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem) +void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory) { - switch (tensorHandle->GetType()) - { - case armnn::ITensorHandle::Cpu: - { - auto handle = boost::polymorphic_downcast<armnn::ScopedCpuTensorHandle*>(tensorHandle); - memcpy(handle->GetTensor<void>(), mem, handle->GetTensorInfo().GetNumBytes()); - break; - } -#ifdef ARMCOMPUTECL_ENABLED - case armnn::ITensorHandle::CL: - { - using armnn::armcomputetensorutils::CopyArmComputeITensorData; - auto handle = boost::polymorphic_downcast<armnn::IClTensorHandle*>(tensorHandle); - handle->Map(true); - switch(handle->GetDataType()) - { - case arm_compute::DataType::F32: - CopyArmComputeITensorData(static_cast<const float*>(mem), handle->GetTensor()); - break; - case arm_compute::DataType::QASYMM8: - CopyArmComputeITensorData(static_cast<const uint8_t*>(mem), handle->GetTensor()); - break; - case arm_compute::DataType::F16: - CopyArmComputeITensorData(static_cast<const armnn::Half*>(mem), handle->GetTensor()); - break; - default: - { - throw armnn::UnimplementedException(); - } - } - handle->Unmap(); - break; - } -#endif -#if ARMCOMPUTENEON_ENABLED - case armnn::ITensorHandle::Neon: - { - using armnn::armcomputetensorutils::CopyArmComputeITensorData; - auto handle = boost::polymorphic_downcast<armnn::INeonTensorHandle*>(tensorHandle); - switch (handle->GetDataType()) - { - case arm_compute::DataType::F32: - CopyArmComputeITensorData(static_cast<const float*>(mem), handle->GetTensor()); - break; - case arm_compute::DataType::QASYMM8: - CopyArmComputeITensorData(static_cast<const uint8_t*>(mem), handle->GetTensor()); - break; - default: - { - throw armnn::UnimplementedException(); - } - } - break; - } -#endif - default: - { - throw armnn::UnimplementedException(); - } - } + tensorHandle->CopyInFrom(memory); } -void CopyDataFromITensorHandle(void* mem, const armnn::ITensorHandle* tensorHandle) +void CopyDataFromITensorHandle(void* memory, const armnn::ITensorHandle* tensorHandle) { - switch (tensorHandle->GetType()) - { - case armnn::ITensorHandle::Cpu: - { - auto handle = boost::polymorphic_downcast<const armnn::ScopedCpuTensorHandle*>(tensorHandle); - memcpy(mem, handle->GetTensor<void>(), handle->GetTensorInfo().GetNumBytes()); - break; - } -#ifdef ARMCOMPUTECL_ENABLED - case armnn::ITensorHandle::CL: - { - using armnn::armcomputetensorutils::CopyArmComputeITensorData; - auto handle = boost::polymorphic_downcast<const armnn::IClTensorHandle*>(tensorHandle); - const_cast<armnn::IClTensorHandle*>(handle)->Map(true); - switch(handle->GetDataType()) - { - case arm_compute::DataType::F32: - CopyArmComputeITensorData(handle->GetTensor(), static_cast<float*>(mem)); - break; - case arm_compute::DataType::QASYMM8: - CopyArmComputeITensorData(handle->GetTensor(), static_cast<uint8_t*>(mem)); - break; - case arm_compute::DataType::F16: - CopyArmComputeITensorData(handle->GetTensor(), static_cast<armnn::Half*>(mem)); - break; - default: - { - throw armnn::UnimplementedException(); - } - } - const_cast<armnn::IClTensorHandle*>(handle)->Unmap(); - break; - } -#endif -#if ARMCOMPUTENEON_ENABLED - case armnn::ITensorHandle::Neon: - { - using armnn::armcomputetensorutils::CopyArmComputeITensorData; - auto handle = boost::polymorphic_downcast<const armnn::INeonTensorHandle*>(tensorHandle); - switch (handle->GetDataType()) - { - case arm_compute::DataType::F32: - CopyArmComputeITensorData(handle->GetTensor(), static_cast<float*>(mem)); - break; - case arm_compute::DataType::QASYMM8: - CopyArmComputeITensorData(handle->GetTensor(), static_cast<uint8_t*>(mem)); - break; - default: - { - throw armnn::UnimplementedException(); - } - } - break; - } -#endif - default: - { - throw armnn::UnimplementedException(); - } - } + tensorHandle->CopyOutTo(memory); } -void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem) +void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* memory) { tensorHandle->Allocate(); - CopyDataToITensorHandle(tensorHandle, mem); + CopyDataToITensorHandle(tensorHandle, memory); } |