aboutsummaryrefslogtreecommitdiff
path: root/src/backends/neon/workloads
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/neon/workloads')
-rw-r--r--src/backends/neon/workloads/NeonActivationWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonAdditionWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonConcatWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonConvolution2dWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonDequantizeWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonFloorFloatWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonGreaterWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonLstmFloatWorkload.cpp12
-rw-r--r--src/backends/neon/workloads/NeonMaximumWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonMeanWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonMinimumWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonMultiplicationWorkload.cpp6
-rw-r--r--src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonPadWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonPermuteWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonPooling2dWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonQuantizeWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonReshapeWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonSplitterWorkload.cpp4
-rw-r--r--src/backends/neon/workloads/NeonSubtractionWorkload.cpp6
27 files changed, 64 insertions, 64 deletions
diff --git a/src/backends/neon/workloads/NeonActivationWorkload.cpp b/src/backends/neon/workloads/NeonActivationWorkload.cpp
index 7715e5fb33..916d67449c 100644
--- a/src/backends/neon/workloads/NeonActivationWorkload.cpp
+++ b/src/backends/neon/workloads/NeonActivationWorkload.cpp
@@ -36,8 +36,8 @@ NeonActivationWorkload::NeonActivationWorkload(const ActivationQueueDescriptor&
const arm_compute::ActivationLayerInfo activationLayerInfo =
ConvertActivationDescriptorToAclActivationLayerInfo(m_Data.m_Parameters);
- arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
auto layer = std::make_unique<arm_compute::NEActivationLayer>();
layer->configure(&input, &output, activationLayerInfo);
diff --git a/src/backends/neon/workloads/NeonAdditionWorkload.cpp b/src/backends/neon/workloads/NeonAdditionWorkload.cpp
index fa537819a9..a025c0b8f5 100644
--- a/src/backends/neon/workloads/NeonAdditionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonAdditionWorkload.cpp
@@ -35,9 +35,9 @@ NeonAdditionWorkload::NeonAdditionWorkload(const AdditionQueueDescriptor& descri
{
m_Data.ValidateInputsOutputs("NeonAdditionWorkload", 2, 1);
- arm_compute::ITensor& input1 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& input2 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input1 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& input2 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
auto layer = std::make_unique<arm_compute::NEArithmeticAddition>();
layer->configure(&input1, &input2, &output, arm_compute::ConvertPolicy::SATURATE);
diff --git a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp
index fc80f413e8..cd931e3797 100644
--- a/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp
+++ b/src/backends/neon/workloads/NeonBatchNormalizationWorkload.cpp
@@ -53,8 +53,8 @@ NeonBatchNormalizationWorkload::NeonBatchNormalizationWorkload(
{
m_Data.ValidateInputsOutputs("NeonBatchNormalizationWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
input.info()->set_data_layout(aclDataLayout);
diff --git a/src/backends/neon/workloads/NeonConcatWorkload.cpp b/src/backends/neon/workloads/NeonConcatWorkload.cpp
index 8ea535b40a..4a9f68798c 100644
--- a/src/backends/neon/workloads/NeonConcatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConcatWorkload.cpp
@@ -73,10 +73,10 @@ const ConcatQueueDescriptor& descriptor, const WorkloadInfo& info)
std::vector<arm_compute::ITensor *> aclInputs;
for (auto input : m_Data.m_Inputs)
{
- arm_compute::ITensor& aclInput = boost::polymorphic_pointer_downcast<INeonTensorHandle>(input)->GetTensor();
+ arm_compute::ITensor& aclInput = boost::polymorphic_pointer_downcast<IAclTensorHandle>(input)->GetTensor();
aclInputs.emplace_back(&aclInput);
}
- arm_compute::ITensor& output = boost::polymorphic_pointer_downcast<INeonTensorHandle>(
+ arm_compute::ITensor& output = boost::polymorphic_pointer_downcast<IAclTensorHandle>(
m_Data.m_Outputs[0])->GetTensor();
// Create the layer function
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
index 1080f320e7..a8137a27ad 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
@@ -60,8 +60,8 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload(
// todo: check tensor shapes match.
- arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
input.info()->set_data_layout(aclDataLayout);
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index d15b48571a..a685b8a49d 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -107,8 +107,8 @@ NeonDepthwiseConvolutionWorkload::NeonDepthwiseConvolutionWorkload(
m_Data.ValidateInputsOutputs("NeonDepthwiseConvolutionWorkload", 1, 1);
- INeonTensorHandle* inputTensorHandle = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[0]);
- INeonTensorHandle* outputTensorHandle = static_cast<INeonTensorHandle*>(m_Data.m_Outputs[0]);
+ IAclTensorHandle* inputTensorHandle = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0]);
+ IAclTensorHandle* outputTensorHandle = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[0]);
arm_compute::ITensor& input = inputTensorHandle->GetTensor();
arm_compute::ITensor& output = outputTensorHandle->GetTensor();
diff --git a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp
index 9840b48dd2..aa454c921b 100644
--- a/src/backends/neon/workloads/NeonDequantizeWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDequantizeWorkload.cpp
@@ -30,8 +30,8 @@ NeonDequantizeWorkload::NeonDequantizeWorkload(const DequantizeQueueDescriptor&
{
m_Data.ValidateInputsOutputs("NeonDequantizeWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
m_Layer.reset(new arm_compute::NEDequantizationLayer());
m_Layer->configure(&input, &output);
diff --git a/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp b/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp
index f024fef2a8..5b4e9094fd 100644
--- a/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFloorFloatWorkload.cpp
@@ -19,8 +19,8 @@ NeonFloorFloatWorkload::NeonFloorFloatWorkload(const FloorQueueDescriptor& descr
{
m_Data.ValidateInputsOutputs("NeonFloorFloatWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
auto layer = std::make_unique<arm_compute::NEFloor>();
layer->configure(&input, &output);
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
index 7395270400..56e5552dd3 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
@@ -51,8 +51,8 @@ NeonFullyConnectedWorkload::NeonFullyConnectedWorkload(const FullyConnectedQueue
{
m_Data.ValidateInputsOutputs("NeonFullyConnectedWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
m_WeightsTensor = std::make_unique<arm_compute::Tensor>();
BuildArmComputeTensor(*m_WeightsTensor, m_Data.m_Weight->GetTensorInfo());
diff --git a/src/backends/neon/workloads/NeonGreaterWorkload.cpp b/src/backends/neon/workloads/NeonGreaterWorkload.cpp
index ece52d6465..62396261e1 100644
--- a/src/backends/neon/workloads/NeonGreaterWorkload.cpp
+++ b/src/backends/neon/workloads/NeonGreaterWorkload.cpp
@@ -29,9 +29,9 @@ NeonGreaterWorkload<T>::NeonGreaterWorkload(const GreaterQueueDescriptor& descri
{
m_Data.ValidateInputsOutputs("NeonGreaterWorkload", 2, 1);
- arm_compute::ITensor& input0 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& input1 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input0 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& input1 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
m_GreaterLayer.configure(&input0, &input1, &output);
}
diff --git a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
index efc21ed112..e6249b3075 100644
--- a/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonL2NormalizationFloatWorkload.cpp
@@ -33,8 +33,8 @@ NeonL2NormalizationFloatWorkload::NeonL2NormalizationFloatWorkload(const L2Norma
{
m_Data.ValidateInputsOutputs("NeonL2NormalizationFloatWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
input.info()->set_data_layout(aclDataLayout);
diff --git a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
index 1ab269ff56..c7f5f090ce 100644
--- a/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonLstmFloatWorkload.cpp
@@ -97,13 +97,13 @@ NeonLstmFloatWorkload::NeonLstmFloatWorkload(const LstmQueueDescriptor &descript
lstm_param.set_peephole_params(m_CellToForgetWeightsTensor.get(), m_CellToOutputWeightsTensor.get());
}
- const arm_compute::ITensor& input = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- const arm_compute::ITensor& output_state_in = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
- const arm_compute::ITensor& cell_state_in = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
+ const arm_compute::ITensor& input = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ const arm_compute::ITensor& output_state_in = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ const arm_compute::ITensor& cell_state_in = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
- arm_compute::ITensor& output_state_out = static_cast<INeonTensorHandle*>(m_Data.m_Outputs[1])->GetTensor();
- arm_compute::ITensor& cell_state_out = static_cast<INeonTensorHandle*>(m_Data.m_Outputs[2])->GetTensor();
- arm_compute::ITensor& output = static_cast<INeonTensorHandle*>(m_Data.m_Outputs[3])->GetTensor();
+ arm_compute::ITensor& output_state_out = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[1])->GetTensor();
+ arm_compute::ITensor& cell_state_out = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[2])->GetTensor();
+ arm_compute::ITensor& output = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[3])->GetTensor();
// Get the batch_size and the num_units from the cellStateIn dimensions
const TensorInfo& inputTensorInfo = info.m_InputTensorInfos[2];
diff --git a/src/backends/neon/workloads/NeonMaximumWorkload.cpp b/src/backends/neon/workloads/NeonMaximumWorkload.cpp
index 9994af98d1..c433d81973 100644
--- a/src/backends/neon/workloads/NeonMaximumWorkload.cpp
+++ b/src/backends/neon/workloads/NeonMaximumWorkload.cpp
@@ -29,9 +29,9 @@ NeonMaximumWorkload::NeonMaximumWorkload(const MaximumQueueDescriptor& descripto
{
m_Data.ValidateInputsOutputs("NeonMaximumWorkload", 2, 1);
- arm_compute::ITensor& input0 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& input1 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input0 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& input1 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
m_MaxLayer.configure(&input0, &input1, &output);
}
diff --git a/src/backends/neon/workloads/NeonMeanWorkload.cpp b/src/backends/neon/workloads/NeonMeanWorkload.cpp
index d736e42e0e..bb0870d9ef 100644
--- a/src/backends/neon/workloads/NeonMeanWorkload.cpp
+++ b/src/backends/neon/workloads/NeonMeanWorkload.cpp
@@ -34,8 +34,8 @@ NeonMeanWorkload::NeonMeanWorkload(const MeanQueueDescriptor& descriptor, const
{
m_Data.ValidateInputsOutputs("NeonMeanWorkload", 1, 1);
- arm_compute::ITensor& input = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = static_cast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
arm_compute::Coordinates coords = BuildArmComputeReductionCoordinates(input.info()->num_dimensions(),
info.m_InputTensorInfos[0].GetNumDimensions(),
diff --git a/src/backends/neon/workloads/NeonMinimumWorkload.cpp b/src/backends/neon/workloads/NeonMinimumWorkload.cpp
index 9b97bf5863..2867a8079f 100644
--- a/src/backends/neon/workloads/NeonMinimumWorkload.cpp
+++ b/src/backends/neon/workloads/NeonMinimumWorkload.cpp
@@ -29,9 +29,9 @@ NeonMinimumWorkload::NeonMinimumWorkload(const MinimumQueueDescriptor& descripto
{
m_Data.ValidateInputsOutputs("NeonMinimumWorkload", 2, 1);
- arm_compute::ITensor& input0 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& input1 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input0 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& input1 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
m_MinLayer.configure(&input0, &input1, &output);
}
diff --git a/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp b/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp
index 6398b65946..66fbedfa63 100644
--- a/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp
+++ b/src/backends/neon/workloads/NeonMultiplicationWorkload.cpp
@@ -37,9 +37,9 @@ NeonMultiplicationWorkload::NeonMultiplicationWorkload(const MultiplicationQueue
{
m_Data.ValidateInputsOutputs("NeonMultiplicationWorkload", 2, 1);
- arm_compute::ITensor& input1 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& input2 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input1 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& input2 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
// At the time of writing, configure() will fail if a rounding policy other than TO_ZERO is supplied to it,
// when providing a scale of 1.0 for F32 tensors, even though the provided rounding policy appears to be
diff --git a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp
index 92c0396d86..8cb4ec975d 100644
--- a/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonNormalizationFloatWorkload.cpp
@@ -77,8 +77,8 @@ NeonNormalizationFloatWorkload::NeonNormalizationFloatWorkload(const Normalizati
throw InvalidArgumentException("Normalization requires input and output tensors to have equal dimensionality.");
}
- arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
input.info()->set_data_layout(aclDataLayout);
output.info()->set_data_layout(aclDataLayout);
diff --git a/src/backends/neon/workloads/NeonPadWorkload.cpp b/src/backends/neon/workloads/NeonPadWorkload.cpp
index 79645f148d..32b15e9d06 100644
--- a/src/backends/neon/workloads/NeonPadWorkload.cpp
+++ b/src/backends/neon/workloads/NeonPadWorkload.cpp
@@ -21,8 +21,8 @@ NeonPadWorkload::NeonPadWorkload(const PadQueueDescriptor& descriptor, const Wor
{
m_Data.ValidateInputsOutputs("NeonPadWorkload", 1, 1);
- arm_compute::ITensor& input = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = static_cast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
std::vector<std::pair<unsigned int, unsigned int>> reversed_PadList(descriptor.m_Parameters.m_PadList.size());
diff --git a/src/backends/neon/workloads/NeonPermuteWorkload.cpp b/src/backends/neon/workloads/NeonPermuteWorkload.cpp
index d2e62eed39..a5ecbcb2c0 100644
--- a/src/backends/neon/workloads/NeonPermuteWorkload.cpp
+++ b/src/backends/neon/workloads/NeonPermuteWorkload.cpp
@@ -32,8 +32,8 @@ NeonPermuteWorkload::NeonPermuteWorkload(const PermuteQueueDescriptor& descripto
m_Data.ValidateInputsOutputs(GetName(), 1, 1);
- const arm_compute::ITensor& input = static_cast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = static_cast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ const arm_compute::ITensor& input = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
const armnn::PermutationVector& mappings = m_Data.m_Parameters.m_DimMappings;
// Run the layer.
diff --git a/src/backends/neon/workloads/NeonPooling2dWorkload.cpp b/src/backends/neon/workloads/NeonPooling2dWorkload.cpp
index 75bceb1bc7..9934c29a41 100644
--- a/src/backends/neon/workloads/NeonPooling2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonPooling2dWorkload.cpp
@@ -37,8 +37,8 @@ NeonPooling2dWorkload::NeonPooling2dWorkload(
{
m_Data.ValidateInputsOutputs("NeonPooling2dWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
input.info()->set_data_layout(aclDataLayout);
diff --git a/src/backends/neon/workloads/NeonQuantizeWorkload.cpp b/src/backends/neon/workloads/NeonQuantizeWorkload.cpp
index ef24a7f40b..cb8393b819 100644
--- a/src/backends/neon/workloads/NeonQuantizeWorkload.cpp
+++ b/src/backends/neon/workloads/NeonQuantizeWorkload.cpp
@@ -28,9 +28,9 @@ NeonQuantizeWorkload::NeonQuantizeWorkload(const QuantizeQueueDescriptor& descri
const WorkloadInfo& workloadInfo)
: BaseWorkload<QuantizeQueueDescriptor>(descriptor, workloadInfo)
{
- arm_compute::ITensor& input = boost::polymorphic_pointer_downcast<INeonTensorHandle>(
+ arm_compute::ITensor& input = boost::polymorphic_pointer_downcast<IAclTensorHandle>(
m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_pointer_downcast<INeonTensorHandle>(
+ arm_compute::ITensor& output = boost::polymorphic_pointer_downcast<IAclTensorHandle>(
m_Data.m_Outputs[0])->GetTensor();
m_Layer.reset(new arm_compute::NEQuantizationLayer());
diff --git a/src/backends/neon/workloads/NeonReshapeWorkload.cpp b/src/backends/neon/workloads/NeonReshapeWorkload.cpp
index 40fbef6520..7f2056c8e2 100644
--- a/src/backends/neon/workloads/NeonReshapeWorkload.cpp
+++ b/src/backends/neon/workloads/NeonReshapeWorkload.cpp
@@ -20,8 +20,8 @@ NeonReshapeWorkload::NeonReshapeWorkload(const ReshapeQueueDescriptor& descripto
{
m_Data.ValidateInputsOutputs("NeonReshapeWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
auto layer = std::make_unique<arm_compute::NEReshapeLayer>();
layer->configure(&input, &output);
diff --git a/src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp b/src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp
index 37f97bfe0e..6943d872e4 100644
--- a/src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp
+++ b/src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp
@@ -35,8 +35,8 @@ NeonResizeBilinearWorkload::NeonResizeBilinearWorkload(const ResizeBilinearQueue
{
m_Data.ValidateInputsOutputs("NeonResizeBilinearWorkload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
input.info()->set_data_layout(aclDataLayout);
diff --git a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
index afc6135e0f..19c50db15b 100644
--- a/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSoftmaxFloatWorkload.cpp
@@ -20,8 +20,8 @@ NeonSoftmaxFloatWorkload::NeonSoftmaxFloatWorkload(const SoftmaxQueueDescriptor&
m_Data.ValidateInputsOutputs("NeonSoftmaxFloatWorkload", 1, 1);
// The ArmCompute softmax layer uses 2D input/output tensors, so flatten the first three dimensions.
- arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
unsigned int aclAxis = ComputeSoftmaxAclAxis(info.m_InputTensorInfos[0]);
auto layer = std::make_unique<arm_compute::NESoftmaxLayer>(memoryManager);
diff --git a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
index 7f295d6548..7b2d29086e 100644
--- a/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
+++ b/src/backends/neon/workloads/NeonSoftmaxUint8Workload.cpp
@@ -20,8 +20,8 @@ NeonSoftmaxUint8Workload::NeonSoftmaxUint8Workload(const SoftmaxQueueDescriptor&
{
m_Data.ValidateInputsOutputs("NeonSoftmaxUint8Workload", 1, 1);
- arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
const auto outputQuantization = output.info()->quantization_info();
diff --git a/src/backends/neon/workloads/NeonSplitterWorkload.cpp b/src/backends/neon/workloads/NeonSplitterWorkload.cpp
index bf35939127..5b5614662d 100644
--- a/src/backends/neon/workloads/NeonSplitterWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSplitterWorkload.cpp
@@ -73,12 +73,12 @@ NeonSplitterWorkload::NeonSplitterWorkload(const SplitterQueueDescriptor& descri
return;
}
- arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& input = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
std::vector<arm_compute::ITensor *> aclOutputs;
for (auto output : m_Data.m_Outputs)
{
- arm_compute::ITensor& aclOutput = boost::polymorphic_pointer_downcast<INeonTensorHandle>(output)->GetTensor();
+ arm_compute::ITensor& aclOutput = boost::polymorphic_pointer_downcast<IAclTensorHandle>(output)->GetTensor();
aclOutputs.emplace_back(&aclOutput);
}
diff --git a/src/backends/neon/workloads/NeonSubtractionWorkload.cpp b/src/backends/neon/workloads/NeonSubtractionWorkload.cpp
index 39176aac61..f4b4707633 100644
--- a/src/backends/neon/workloads/NeonSubtractionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSubtractionWorkload.cpp
@@ -34,9 +34,9 @@ NeonSubtractionWorkload::NeonSubtractionWorkload(const SubtractionQueueDescripto
{
m_Data.ValidateInputsOutputs("NeonSubtractionWorkload", 2, 1);
- arm_compute::ITensor& input1 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& input2 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::ITensor& input1 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& input2 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
auto layer = std::make_unique<arm_compute::NEArithmeticSubtraction>();
layer->configure(&input1, &input2, &output, arm_compute::ConvertPolicy::SATURATE);