aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNattapat Chaimanowong <nattapat.chaimanowong@arm.com>2018-10-16 13:21:27 +0100
committerMatthew Bentham <matthew.bentham@arm.com>2018-10-22 16:57:54 +0100
commit177d8d26925a58a579943e010d28d1ceaa033d64 (patch)
tree73bc6bb530621f33a6b7cd27ae303a3f92fb556c
parent97a06fd57e7864a882ef5e37a1bf7286f5be5185 (diff)
downloadarmnn-177d8d26925a58a579943e010d28d1ceaa033d64.tar.gz
IVGCVSW-1951 Update NeonWorkloadUtils
Change-Id: I147dbf6811f84ec4588264d636a36efc8ec56f72
-rw-r--r--src/backends/neon/backend.mk3
-rw-r--r--src/backends/neon/workloads/CMakeLists.txt1
-rw-r--r--src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp10
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dBaseWorkload.cpp25
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dFloatWorkload.cpp3
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dUint8Workload.cpp2
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionFloatWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionUint8Workload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp9
-rw-r--r--src/backends/neon/workloads/NeonLstmFloatWorkload.cpp68
-rw-r--r--src/backends/neon/workloads/NeonWorkloadUtils.cpp60
-rw-r--r--src/backends/neon/workloads/NeonWorkloadUtils.hpp58
12 files changed, 91 insertions, 156 deletions
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index e63baa0541..6dfd951957 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -37,5 +37,4 @@ BACKEND_SOURCES := \
workloads/NeonSoftmaxBaseWorkload.cpp \
workloads/NeonSoftmaxFloatWorkload.cpp \
workloads/NeonSoftmaxUint8Workload.cpp \
- workloads/NeonSubtractionFloatWorkload.cpp \
- workloads/NeonWorkloadUtils.cpp
+ workloads/NeonSubtractionFloatWorkload.cpp
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index fddbcb5d97..d847df70fd 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -57,7 +57,6 @@ list(APPEND armnnNeonBackendWorkloads_sources
NeonSubtractionFloatWorkload.cpp
NeonSubtractionFloatWorkload.hpp
NeonWorkloads.hpp
- NeonWorkloadUtils.cpp
NeonWorkloadUtils.hpp
)
diff --git a/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp
index 2383e78df3..f7056a515b 100644
--- a/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonBatchNormalizationFloatWorkload.cpp
@@ -66,10 +66,10 @@ NeonBatchNormalizationFloatWorkload::NeonBatchNormalizationFloatWorkload(
m_Gamma.get(),
m_Data.m_Parameters.m_Eps);
- InitializeArmComputeTensorDataForFloatTypes(*m_Mean, m_Data.m_Mean);
- InitializeArmComputeTensorDataForFloatTypes(*m_Variance, m_Data.m_Variance);
- InitializeArmComputeTensorDataForFloatTypes(*m_Gamma, m_Data.m_Gamma);
- InitializeArmComputeTensorDataForFloatTypes(*m_Beta, m_Data.m_Beta);
+ InitializeArmComputeTensorData(*m_Mean, m_Data.m_Mean);
+ InitializeArmComputeTensorData(*m_Variance, m_Data.m_Variance);
+ InitializeArmComputeTensorData(*m_Gamma, m_Data.m_Gamma);
+ InitializeArmComputeTensorData(*m_Beta, m_Data.m_Beta);
// Force Compute Library to perform the necessary copying and reshaping, after which
// delete all the input tensors that will no longer be needed
@@ -92,5 +92,3 @@ void NeonBatchNormalizationFloatWorkload::FreeUnusedTensors()
}
} //namespace armnn
-
-
diff --git a/src/backends/neon/workloads/NeonConvolution2dBaseWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dBaseWorkload.cpp
index 8da3e47249..b11d10fd2f 100644
--- a/src/backends/neon/workloads/NeonConvolution2dBaseWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dBaseWorkload.cpp
@@ -109,30 +109,8 @@ NeonConvolution2dBaseWorkload<dataTypes...>::NeonConvolution2dBaseWorkload(
}
BOOST_ASSERT(m_ConvolutionLayer);
- armnn::DataType dataType = m_Data.m_Weight->GetTensorInfo().GetDataType();
+ InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
- switch (dataType)
- {
- case DataType::Float16:
- {
- InitialiseArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight->template GetConstTensor<Half>());
- break;
- }
- case DataType::Float32:
- {
- InitialiseArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight->template GetConstTensor<float>());
- break;
- }
- case DataType::QuantisedAsymm8:
- {
- InitialiseArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight->template GetConstTensor<uint8_t>());
- break;
- }
- default:
- {
- BOOST_ASSERT_MSG(false, "Unknown DataType.");
- }
- }
}
template<armnn::DataType... dataTypes>
@@ -147,4 +125,3 @@ template class NeonConvolution2dBaseWorkload<armnn::DataType::Float16, armnn::Da
template class NeonConvolution2dBaseWorkload<armnn::DataType::QuantisedAsymm8>;
} //namespace armnn
-
diff --git a/src/backends/neon/workloads/NeonConvolution2dFloatWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dFloatWorkload.cpp
index cd26f8d536..9969154421 100644
--- a/src/backends/neon/workloads/NeonConvolution2dFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dFloatWorkload.cpp
@@ -18,7 +18,7 @@ NeonConvolution2dFloatWorkload::NeonConvolution2dFloatWorkload(const Convolution
{
if (m_Data.m_Parameters.m_BiasEnabled)
{
- InitializeArmComputeTensorDataForFloatTypes(*m_BiasTensor, m_Data.m_Bias);
+ InitializeArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias);
}
m_ConvolutionLayer->prepare();
@@ -37,4 +37,3 @@ void NeonConvolution2dFloatWorkload::ValidateData() const
}
} //namespace armnn
-
diff --git a/src/backends/neon/workloads/NeonConvolution2dUint8Workload.cpp b/src/backends/neon/workloads/NeonConvolution2dUint8Workload.cpp
index 5affe682b4..8572cbfb08 100644
--- a/src/backends/neon/workloads/NeonConvolution2dUint8Workload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dUint8Workload.cpp
@@ -14,7 +14,7 @@ NeonConvolution2dUint8Workload::NeonConvolution2dUint8Workload(const Convolution
{
if (m_Data.m_Parameters.m_BiasEnabled)
{
- InitialiseArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias->template GetConstTensor<int32_t>());
+ InitializeArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias);
}
m_ConvolutionLayer->prepare();
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionFloatWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionFloatWorkload.cpp
index 4b266f3288..9790998ebe 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionFloatWorkload.cpp
@@ -69,11 +69,11 @@ NeonDepthwiseConvolutionFloatWorkload::NeonDepthwiseConvolutionFloatWorkload(
BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
- InitializeArmComputeTensorDataForFloatTypes(*m_KernelTensor, m_Data.m_Weight);
+ InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
if (m_BiasTensor)
{
- InitializeArmComputeTensorDataForFloatTypes(*m_BiasTensor, m_Data.m_Bias);
+ InitializeArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias);
}
m_pDepthwiseConvolutionLayer->prepare();
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionUint8Workload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionUint8Workload.cpp
index 6c6c2dfb6c..25d00f92ca 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionUint8Workload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionUint8Workload.cpp
@@ -69,11 +69,11 @@ NeonDepthwiseConvolutionUint8Workload::NeonDepthwiseConvolutionUint8Workload(
BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
- InitialiseArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight->GetConstTensor<uint8_t>());
+ InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
if (m_BiasTensor)
{
- InitialiseArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias->GetConstTensor<int32_t>());
+ InitializeArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias);
}
m_pDepthwiseConvolutionLayer->prepare();
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
index 8cebb4f48f..51fd7af362 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
@@ -69,22 +69,22 @@ NeonFullyConnectedWorkload::NeonFullyConnectedWorkload(const FullyConnectedQueue
// Allocate
if (m_Data.m_Weight->GetTensorInfo().GetDataType() == DataType::QuantisedAsymm8)
{
- InitialiseArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight->GetConstTensor<uint8_t>());
+ InitializeArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight);
}
else
{
- InitializeArmComputeTensorDataForFloatTypes(*m_WeightsTensor, m_Data.m_Weight);
+ InitializeArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight);
}
if (m_BiasesTensor)
{
if (m_Data.m_Bias->GetTensorInfo().GetDataType() == DataType::Signed32)
{
- InitialiseArmComputeTensorData(*m_BiasesTensor, m_Data.m_Bias->GetConstTensor<int32_t>());
+ InitializeArmComputeTensorData(*m_BiasesTensor, m_Data.m_Bias);
}
else
{
- InitializeArmComputeTensorDataForFloatTypes(*m_BiasesTensor, m_Data.m_Bias);
+ InitializeArmComputeTensorData(*m_BiasesTensor, m_Data.m_Bias);
}
}
@@ -107,4 +107,3 @@ void NeonFullyConnectedWorkload::FreeUnusedTensors()
}
} //namespace armnn
-
diff --git a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
index 5899f13780..7745cec89b 100644
--- a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
@@ -169,57 +169,57 @@ NeonLstmFloatWorkload::NeonLstmFloatWorkload(const LstmQueueDescriptor &descript
armcomputetensorutils::InitialiseArmComputeTensorEmpty(*m_ScratchBuffer);
- InitialiseArmComputeTensorData(*m_InputToForgetWeightsTensor,
- m_Data.m_InputToForgetWeights->GetConstTensor<float>());
- InitialiseArmComputeTensorData(*m_InputToCellWeightsTensor,
- m_Data.m_InputToCellWeights->GetConstTensor<float>());
- InitialiseArmComputeTensorData(*m_InputToOutputWeightsTensor,
- m_Data.m_InputToOutputWeights->GetConstTensor<float>());
- InitialiseArmComputeTensorData(*m_RecurrentToForgetWeightsTensor,
- m_Data.m_RecurrentToForgetWeights->GetConstTensor<float>());
- InitialiseArmComputeTensorData(*m_RecurrentToCellWeightsTensor,
- m_Data.m_RecurrentToCellWeights->GetConstTensor<float>());
- InitialiseArmComputeTensorData(*m_RecurrentToOutputWeightsTensor,
- m_Data.m_RecurrentToOutputWeights->GetConstTensor<float>());
- InitialiseArmComputeTensorData(*m_ForgetGateBiasTensor,
- m_Data.m_ForgetGateBias->GetConstTensor<float>());
- InitialiseArmComputeTensorData(*m_CellBiasTensor,
- m_Data.m_CellBias->GetConstTensor<float>());
- InitialiseArmComputeTensorData(*m_OutputGateBiasTensor,
- m_Data.m_OutputGateBias->GetConstTensor<float>());
+ InitializeArmComputeTensorData(*m_InputToForgetWeightsTensor,
+ m_Data.m_InputToForgetWeights);
+ InitializeArmComputeTensorData(*m_InputToCellWeightsTensor,
+ m_Data.m_InputToCellWeights);
+ InitializeArmComputeTensorData(*m_InputToOutputWeightsTensor,
+ m_Data.m_InputToOutputWeights);
+ InitializeArmComputeTensorData(*m_RecurrentToForgetWeightsTensor,
+ m_Data.m_RecurrentToForgetWeights);
+ InitializeArmComputeTensorData(*m_RecurrentToCellWeightsTensor,
+ m_Data.m_RecurrentToCellWeights);
+ InitializeArmComputeTensorData(*m_RecurrentToOutputWeightsTensor,
+ m_Data.m_RecurrentToOutputWeights);
+ InitializeArmComputeTensorData(*m_ForgetGateBiasTensor,
+ m_Data.m_ForgetGateBias);
+ InitializeArmComputeTensorData(*m_CellBiasTensor,
+ m_Data.m_CellBias);
+ InitializeArmComputeTensorData(*m_OutputGateBiasTensor,
+ m_Data.m_OutputGateBias);
if (!m_Data.m_Parameters.m_CifgEnabled)
{
- InitialiseArmComputeTensorData(*m_InputToInputWeightsTensor,
- m_Data.m_InputToInputWeights->GetConstTensor<float>());
- InitialiseArmComputeTensorData(*m_RecurrentToInputWeightsTensor,
- m_Data.m_RecurrentToInputWeights->GetConstTensor<float>());
+ InitializeArmComputeTensorData(*m_InputToInputWeightsTensor,
+ m_Data.m_InputToInputWeights);
+ InitializeArmComputeTensorData(*m_RecurrentToInputWeightsTensor,
+ m_Data.m_RecurrentToInputWeights);
if (m_Data.m_CellToInputWeights != nullptr)
{
- InitialiseArmComputeTensorData(*m_CellToInputWeightsTensor,
- m_Data.m_CellToInputWeights->GetConstTensor<float>());
+ InitializeArmComputeTensorData(*m_CellToInputWeightsTensor,
+ m_Data.m_CellToInputWeights);
}
- InitialiseArmComputeTensorData(*m_InputGateBiasTensor,
- m_Data.m_InputGateBias->GetConstTensor<float>());
+ InitializeArmComputeTensorData(*m_InputGateBiasTensor,
+ m_Data.m_InputGateBias);
}
if (m_Data.m_Parameters.m_ProjectionEnabled)
{
- InitialiseArmComputeTensorData(*m_ProjectionWeightsTensor,
- m_Data.m_ProjectionWeights->GetConstTensor<float>());
+ InitializeArmComputeTensorData(*m_ProjectionWeightsTensor,
+ m_Data.m_ProjectionWeights);
if (m_Data.m_ProjectionBias != nullptr)
{
- InitialiseArmComputeTensorData(*m_ProjectionBiasTensor,
- m_Data.m_ProjectionBias->GetConstTensor<float>());
+ InitializeArmComputeTensorData(*m_ProjectionBiasTensor,
+ m_Data.m_ProjectionBias);
}
}
if (m_Data.m_Parameters.m_PeepholeEnabled)
{
- InitialiseArmComputeTensorData(*m_CellToForgetWeightsTensor,
- m_Data.m_CellToForgetWeights->GetConstTensor<float>());
- InitialiseArmComputeTensorData(*m_CellToOutputWeightsTensor,
- m_Data.m_CellToOutputWeights->GetConstTensor<float>());
+ InitializeArmComputeTensorData(*m_CellToForgetWeightsTensor,
+ m_Data.m_CellToForgetWeights);
+ InitializeArmComputeTensorData(*m_CellToOutputWeightsTensor,
+ m_Data.m_CellToOutputWeights);
}
// Force Compute Library to perform the necessary copying and reshaping, after which
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.cpp b/src/backends/neon/workloads/NeonWorkloadUtils.cpp
deleted file mode 100644
index 195f090171..0000000000
--- a/src/backends/neon/workloads/NeonWorkloadUtils.cpp
+++ /dev/null
@@ -1,60 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include "NeonWorkloadUtils.hpp"
-#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
-#include <backends/aclCommon/ArmComputeUtils.hpp>
-#include <backends/neon/NeonTensorHandle.hpp>
-#include <backends/neon/NeonLayerSupport.hpp>
-#include <backends/CpuTensorHandle.hpp>
-
-#include <armnn/Utils.hpp>
-#include <armnn/Exceptions.hpp>
-
-#include <cstring>
-#include <boost/assert.hpp>
-#include <boost/cast.hpp>
-#include <boost/format.hpp>
-
-#include "Profiling.hpp"
-
-#include <armnn/Types.hpp>
-#include <Half.hpp>
-
-using namespace armnn::armcomputetensorutils;
-
-namespace armnn
-{
-
-// Allocates a tensor and copy the contents in data to the tensor contents.
-template<typename T>
-void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const T* data)
-{
- InitialiseArmComputeTensorEmpty(tensor);
- CopyArmComputeITensorData(data, tensor);
-}
-
-template void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const Half* data);
-template void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const float* data);
-template void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const uint8_t* data);
-template void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const int32_t* data);
-
-void InitializeArmComputeTensorDataForFloatTypes(arm_compute::Tensor& tensor,
- const ConstCpuTensorHandle* handle)
-{
- BOOST_ASSERT(handle);
- switch(handle->GetTensorInfo().GetDataType())
- {
- case DataType::Float16:
- InitialiseArmComputeTensorData(tensor, handle->GetConstTensor<Half>());
- break;
- case DataType::Float32:
- InitialiseArmComputeTensorData(tensor, handle->GetConstTensor<float>());
- break;
- default:
- BOOST_ASSERT_MSG(false, "Unexpected floating point type.");
- }
-};
-
-} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
index c4accd6c89..48ec753546 100644
--- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp
+++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
@@ -5,30 +5,54 @@
#pragma once
#include <backends/Workload.hpp>
-
+#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
#include <backends/neon/NeonTensorHandle.hpp>
#include <backends/neon/NeonTimer.hpp>
-
-#include <arm_compute/core/Types.h>
-#include <arm_compute/core/Helpers.h>
+#include <backends/CpuTensorHandle.hpp>
#include <arm_compute/runtime/NEON/NEFunctions.h>
-#include <arm_compute/runtime/SubTensor.h>
-#include <boost/cast.hpp>
+#include <Half.hpp>
+
+#define ARMNN_SCOPED_PROFILING_EVENT_NEON(name) \
+ ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
+ name, \
+ armnn::NeonTimer(), \
+ armnn::WallClockTimer())
+
+using namespace armnn::armcomputetensorutils;
namespace armnn
{
-class Layer;
-
-template<typename T>
-void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const T* data);
-void InitializeArmComputeTensorDataForFloatTypes(arm_compute::Tensor& tensor, const ConstCpuTensorHandle* handle);
-} //namespace armnn
+template <typename T>
+void CopyArmComputeTensorData(arm_compute::Tensor& dstTensor, const T* srcData)
+{
+ InitialiseArmComputeTensorEmpty(dstTensor);
+ CopyArmComputeITensorData(srcData, dstTensor);
+}
+inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
+ const ConstCpuTensorHandle* handle)
+{
+ BOOST_ASSERT(handle);
+
+ switch(handle->GetTensorInfo().GetDataType())
+ {
+ case DataType::Float16:
+ CopyArmComputeTensorData(tensor, handle->GetConstTensor<armnn::Half>());
+ break;
+ case DataType::Float32:
+ CopyArmComputeTensorData(tensor, handle->GetConstTensor<float>());
+ break;
+ case DataType::QuantisedAsymm8:
+ CopyArmComputeTensorData(tensor, handle->GetConstTensor<uint8_t>());
+ break;
+ case DataType::Signed32:
+ CopyArmComputeTensorData(tensor, handle->GetConstTensor<int32_t>());
+ break;
+ default:
+ BOOST_ASSERT_MSG(false, "Unexpected tensor type.");
+ }
+};
-#define ARMNN_SCOPED_PROFILING_EVENT_NEON(name) \
- ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
- name, \
- armnn::NeonTimer(), \
- armnn::WallClockTimer())
+} //namespace armnn