aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2017-12-06 11:53:03 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:42:17 +0000
commit631c41a4e3645a948b0f597caa77e8fa91ca0efc (patch)
tree164fd113818b8e890fc16bad97240056cb71e747 /src/runtime
parent57f249b08fd65af761d5c8bfe62de117d67a14c7 (diff)
downloadComputeLibrary-631c41a4e3645a948b0f597caa77e8fa91ca0efc.tar.gz
COMPMID-556: Rename Error to Status and inverse logic
Change-Id: Ib57d4f7177cc6179302bda7ad870acb8bd3825f5 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/112115 Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/CL/functions/CLActivationLayer.cpp2
-rw-r--r--src/runtime/CL/functions/CLArithmeticAddition.cpp2
-rw-r--r--src/runtime/CL/functions/CLArithmeticSubtraction.cpp2
-rw-r--r--src/runtime/CL/functions/CLBatchNormalizationLayer.cpp8
-rw-r--r--src/runtime/CL/functions/CLDirectConvolutionLayer.cpp2
-rw-r--r--src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp4
-rw-r--r--src/runtime/CL/functions/CLNormalizationLayer.cpp2
-rw-r--r--src/runtime/CL/functions/CLPixelWiseMultiplication.cpp4
-rw-r--r--src/runtime/CL/functions/CLPoolingLayer.cpp2
-rw-r--r--src/runtime/CL/functions/CLSoftmaxLayer.cpp4
-rw-r--r--src/runtime/CL/functions/CLTranspose.cpp2
-rw-r--r--src/runtime/CPP/functions/CPPPermute.cpp2
-rw-r--r--src/runtime/NEON/functions/NEArithmeticAddition.cpp2
-rw-r--r--src/runtime/NEON/functions/NEArithmeticSubtraction.cpp2
-rw-r--r--src/runtime/NEON/functions/NEBatchNormalizationLayer.cpp4
-rw-r--r--src/runtime/NEON/functions/NECol2Im.cpp2
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp4
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp4
-rw-r--r--src/runtime/NEON/functions/NEIm2Col.cpp2
-rw-r--r--src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp2
-rw-r--r--src/runtime/NEON/functions/NETranspose.cpp2
-rw-r--r--src/runtime/TensorAllocator.cpp4
22 files changed, 32 insertions, 32 deletions
diff --git a/src/runtime/CL/functions/CLActivationLayer.cpp b/src/runtime/CL/functions/CLActivationLayer.cpp
index 5369a59211..eaf2ca586c 100644
--- a/src/runtime/CL/functions/CLActivationLayer.cpp
+++ b/src/runtime/CL/functions/CLActivationLayer.cpp
@@ -36,7 +36,7 @@ void CLActivationLayer::configure(ICLTensor *input, ICLTensor *output, Activatio
_kernel = std::move(k);
}
-Error CLActivationLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info)
+Status CLActivationLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
return CLActivationLayerKernel::validate(input, output, act_info);
}
diff --git a/src/runtime/CL/functions/CLArithmeticAddition.cpp b/src/runtime/CL/functions/CLArithmeticAddition.cpp
index 5fa0b8c33a..5c2e582ba2 100644
--- a/src/runtime/CL/functions/CLArithmeticAddition.cpp
+++ b/src/runtime/CL/functions/CLArithmeticAddition.cpp
@@ -37,7 +37,7 @@ void CLArithmeticAddition::configure(const ICLTensor *input1, const ICLTensor *i
_kernel = std::move(k);
}
-Error CLArithmeticAddition::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
+Status CLArithmeticAddition::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
{
return CLArithmeticAdditionKernel::validate(input1, input2, output, policy);
}
diff --git a/src/runtime/CL/functions/CLArithmeticSubtraction.cpp b/src/runtime/CL/functions/CLArithmeticSubtraction.cpp
index 12a6b80691..5fca30c4f9 100644
--- a/src/runtime/CL/functions/CLArithmeticSubtraction.cpp
+++ b/src/runtime/CL/functions/CLArithmeticSubtraction.cpp
@@ -37,7 +37,7 @@ void CLArithmeticSubtraction::configure(const ICLTensor *input1, const ICLTensor
_kernel = std::move(k);
}
-Error CLArithmeticSubtraction::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
+Status CLArithmeticSubtraction::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
{
return CLArithmeticSubtractionKernel::validate(input1, input2, output, policy);
}
diff --git a/src/runtime/CL/functions/CLBatchNormalizationLayer.cpp b/src/runtime/CL/functions/CLBatchNormalizationLayer.cpp
index c4e307e541..58215c3c3e 100644
--- a/src/runtime/CL/functions/CLBatchNormalizationLayer.cpp
+++ b/src/runtime/CL/functions/CLBatchNormalizationLayer.cpp
@@ -42,10 +42,10 @@ void CLBatchNormalizationLayer::configure(ICLTensor *input, ICLTensor *output, c
_norm_kernel.configure(input, output, mean, var, beta, gamma, epsilon);
}
-Error CLBatchNormalizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output,
- const ITensorInfo *mean, const ITensorInfo *var,
- const ITensorInfo *beta, const ITensorInfo *gamma,
- float epsilon)
+Status CLBatchNormalizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output,
+ const ITensorInfo *mean, const ITensorInfo *var,
+ const ITensorInfo *beta, const ITensorInfo *gamma,
+ float epsilon)
{
return CLBatchNormalizationLayerKernel::validate(input, output, mean, var, beta, gamma, epsilon);
}
diff --git a/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp b/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp
index 50b7b46d99..d6a335c1ec 100644
--- a/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLDirectConvolutionLayer.cpp
@@ -54,7 +54,7 @@ void CLDirectConvolutionLayer::configure(ICLTensor *input, const ICLTensor *weig
_input_border_handler.configure(input, _direct_conv_kernel.border_size(), BorderMode::CONSTANT, zero_value);
}
-Error CLDirectConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info)
+Status CLDirectConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info)
{
return CLDirectConvolutionLayerKernel::validate(input, weights, biases, output, conv_info, CLScheduler::get().target());
}
diff --git a/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp b/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp
index f26ff5f938..16d8678386 100644
--- a/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp
+++ b/src/runtime/CL/functions/CLGEMMLowpOutputStage.cpp
@@ -37,7 +37,7 @@ void CLGEMMLowpQuantizeDownInt32ToUint8Scale::configure(const ICLTensor *input,
_kernel = std::move(k);
}
-Error CLGEMMLowpQuantizeDownInt32ToUint8Scale::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
+Status CLGEMMLowpQuantizeDownInt32ToUint8Scale::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
{
return CLGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::validate(input, bias, output, min, max);
}
@@ -50,7 +50,7 @@ void CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::configure(const ICLTen
_kernel = std::move(k);
}
-Error CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
+Status CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
{
return CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::validate(input, bias, output, min, max);
} \ No newline at end of file
diff --git a/src/runtime/CL/functions/CLNormalizationLayer.cpp b/src/runtime/CL/functions/CLNormalizationLayer.cpp
index eefdec4ba4..32d8f15344 100644
--- a/src/runtime/CL/functions/CLNormalizationLayer.cpp
+++ b/src/runtime/CL/functions/CLNormalizationLayer.cpp
@@ -48,7 +48,7 @@ void CLNormalizationLayer::configure(ICLTensor *input, ICLTensor *output, const
_border_handler.configure(input, _norm_kernel.border_size(), BorderMode::CONSTANT, PixelValue(0));
}
-Error CLNormalizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const NormalizationLayerInfo &norm_info)
+Status CLNormalizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const NormalizationLayerInfo &norm_info)
{
return CLNormalizationLayerKernel::validate(input, output, norm_info);
}
diff --git a/src/runtime/CL/functions/CLPixelWiseMultiplication.cpp b/src/runtime/CL/functions/CLPixelWiseMultiplication.cpp
index 164ff153ed..c78f94476e 100644
--- a/src/runtime/CL/functions/CLPixelWiseMultiplication.cpp
+++ b/src/runtime/CL/functions/CLPixelWiseMultiplication.cpp
@@ -38,8 +38,8 @@ void CLPixelWiseMultiplication::configure(const ICLTensor *input1, const ICLTens
_kernel = std::move(k);
}
-Error CLPixelWiseMultiplication::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
- ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)
+Status CLPixelWiseMultiplication::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
+ ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)
{
return CLPixelWiseMultiplicationKernel::validate(input1, input2, output, scale, overflow_policy, rounding_policy);
} \ No newline at end of file
diff --git a/src/runtime/CL/functions/CLPoolingLayer.cpp b/src/runtime/CL/functions/CLPoolingLayer.cpp
index 20564f6c9d..2341633362 100644
--- a/src/runtime/CL/functions/CLPoolingLayer.cpp
+++ b/src/runtime/CL/functions/CLPoolingLayer.cpp
@@ -52,7 +52,7 @@ void CLPoolingLayer::configure(ICLTensor *input, ICLTensor *output, const Poolin
_border_handler.configure(input, _kernel->border_size(), border_mode, PixelValue(border_value));
}
-Error CLPoolingLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info)
+Status CLPoolingLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info)
{
return CLPoolingLayerKernel::validate(input, output, pool_info);
} \ No newline at end of file
diff --git a/src/runtime/CL/functions/CLSoftmaxLayer.cpp b/src/runtime/CL/functions/CLSoftmaxLayer.cpp
index f74b778e95..414c4d6ce5 100644
--- a/src/runtime/CL/functions/CLSoftmaxLayer.cpp
+++ b/src/runtime/CL/functions/CLSoftmaxLayer.cpp
@@ -84,7 +84,7 @@ void CLSoftmaxLayer::configure(const ICLTensor *input, ICLTensor *output, float
_sum.allocator()->allocate();
}
-Error CLSoftmaxLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
+Status CLSoftmaxLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
@@ -109,7 +109,7 @@ Error CLSoftmaxLayer::validate(const ITensorInfo *input, const ITensorInfo *outp
}
ARM_COMPUTE_RETURN_ON_ERROR(CLLogits1DNormKernel::validate(&tensor_info_tmp, &tensor_info_sum, output));
- return Error{};
+ return Status{};
}
void CLSoftmaxLayer::run()
diff --git a/src/runtime/CL/functions/CLTranspose.cpp b/src/runtime/CL/functions/CLTranspose.cpp
index ad5c04124d..ecb59f7d46 100644
--- a/src/runtime/CL/functions/CLTranspose.cpp
+++ b/src/runtime/CL/functions/CLTranspose.cpp
@@ -37,7 +37,7 @@ void CLTranspose::configure(const ICLTensor *input, ICLTensor *output)
_kernel = std::move(k);
}
-Error CLTranspose::validate(const ITensorInfo *input, const ITensorInfo *output)
+Status CLTranspose::validate(const ITensorInfo *input, const ITensorInfo *output)
{
return CLTransposeKernel::validate(input, output);
} \ No newline at end of file
diff --git a/src/runtime/CPP/functions/CPPPermute.cpp b/src/runtime/CPP/functions/CPPPermute.cpp
index 5b92718542..bafcd2fec9 100644
--- a/src/runtime/CPP/functions/CPPPermute.cpp
+++ b/src/runtime/CPP/functions/CPPPermute.cpp
@@ -35,7 +35,7 @@ void CPPPermute::configure(const ITensor *input, ITensor *output, const Permutat
_kernel = std::move(k);
}
-Error CPPPermute::validate(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm)
+Status CPPPermute::validate(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm)
{
return CPPPermuteKernel::validate(input, output, perm);
}
diff --git a/src/runtime/NEON/functions/NEArithmeticAddition.cpp b/src/runtime/NEON/functions/NEArithmeticAddition.cpp
index 85119ea17d..b5dd4d0d06 100644
--- a/src/runtime/NEON/functions/NEArithmeticAddition.cpp
+++ b/src/runtime/NEON/functions/NEArithmeticAddition.cpp
@@ -36,7 +36,7 @@ void NEArithmeticAddition::configure(const ITensor *input1, const ITensor *input
k->configure(input1, input2, output, policy);
_kernel = std::move(k);
}
-Error NEArithmeticAddition::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
+Status NEArithmeticAddition::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
{
return NEArithmeticAdditionKernel::validate(input1, input2, output, policy);
}
diff --git a/src/runtime/NEON/functions/NEArithmeticSubtraction.cpp b/src/runtime/NEON/functions/NEArithmeticSubtraction.cpp
index be264d54b4..5c0491ec6f 100644
--- a/src/runtime/NEON/functions/NEArithmeticSubtraction.cpp
+++ b/src/runtime/NEON/functions/NEArithmeticSubtraction.cpp
@@ -36,7 +36,7 @@ void NEArithmeticSubtraction::configure(const ITensor *input1, const ITensor *in
k->configure(input1, input2, output, policy);
_kernel = std::move(k);
}
-Error NEArithmeticSubtraction::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
+Status NEArithmeticSubtraction::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)
{
return NEArithmeticSubtractionKernel::validate(input1, input2, output, policy);
}
diff --git a/src/runtime/NEON/functions/NEBatchNormalizationLayer.cpp b/src/runtime/NEON/functions/NEBatchNormalizationLayer.cpp
index cfab12c33b..f6be00169d 100644
--- a/src/runtime/NEON/functions/NEBatchNormalizationLayer.cpp
+++ b/src/runtime/NEON/functions/NEBatchNormalizationLayer.cpp
@@ -43,8 +43,8 @@ void NEBatchNormalizationLayer::configure(ITensor *input, ITensor *output, const
_norm_kernel.configure(input, output, mean, var, beta, gamma, epsilon);
}
-Error NEBatchNormalizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *mean, const ITensorInfo *var, const ITensorInfo *beta, const ITensorInfo *gamma,
- float epsilon)
+Status NEBatchNormalizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *mean, const ITensorInfo *var, const ITensorInfo *beta, const ITensorInfo *gamma,
+ float epsilon)
{
return NEBatchNormalizationLayerKernel::validate(input, output, mean, var, beta, gamma, epsilon);
}
diff --git a/src/runtime/NEON/functions/NECol2Im.cpp b/src/runtime/NEON/functions/NECol2Im.cpp
index 2a923f3730..78c6bc0475 100644
--- a/src/runtime/NEON/functions/NECol2Im.cpp
+++ b/src/runtime/NEON/functions/NECol2Im.cpp
@@ -35,7 +35,7 @@ void NECol2Im::configure(const ITensor *input, ITensor *output, const Size2D &co
_kernel = std::move(k);
}
-Error NECol2Im::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &convolved_dims)
+Status NECol2Im::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &convolved_dims)
{
return NECol2ImKernel::validate(input, output, convolved_dims);
}
diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
index a18f48d9a7..bee3831353 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
@@ -199,7 +199,7 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
}
}
-Error NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *output, const GEMMInfo &gemm_info)
+Status NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *output, const GEMMInfo &gemm_info)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(a, 1, DataType::QASYMM8);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
@@ -287,7 +287,7 @@ Error NEGEMMLowpMatrixMultiplyCore::validate(const ITensorInfo *a, const ITensor
b_offset == 0 ? nullptr : &info_vector_sum_row,
a_offset, b_offset));
- return Error{};
+ return Status{};
}
void NEGEMMLowpMatrixMultiplyCore::run()
diff --git a/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp b/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp
index 53e5ae2f5a..8c02436bec 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpOutputStage.cpp
@@ -37,7 +37,7 @@ void NEGEMMLowpQuantizeDownInt32ToUint8Scale::configure(const ITensor *input, co
_kernel = std::move(k);
}
-Error NEGEMMLowpQuantizeDownInt32ToUint8Scale::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
+Status NEGEMMLowpQuantizeDownInt32ToUint8Scale::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
{
return NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::validate(input, bias, output, min, max);
}
@@ -50,7 +50,7 @@ void NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::configure(const ITenso
_kernel = std::move(k);
}
-Error NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
+Status NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint::validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min, int max)
{
return NEGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel::validate(input, bias, output, min, max);
} \ No newline at end of file
diff --git a/src/runtime/NEON/functions/NEIm2Col.cpp b/src/runtime/NEON/functions/NEIm2Col.cpp
index 354415daa3..8e90e66dcc 100644
--- a/src/runtime/NEON/functions/NEIm2Col.cpp
+++ b/src/runtime/NEON/functions/NEIm2Col.cpp
@@ -35,7 +35,7 @@ void NEIm2Col::configure(const ITensor *input, ITensor *output, const Size2D &ke
_kernel = std::move(k);
}
-Error NEIm2Col::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias)
+Status NEIm2Col::validate(const ITensorInfo *input, const ITensorInfo *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias)
{
return NEIm2ColKernel::validate(input, output, kernel_dims, conv_info, has_bias);
}
diff --git a/src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp b/src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp
index 9cd9d59c79..5a474e4a83 100644
--- a/src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp
+++ b/src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp
@@ -36,7 +36,7 @@ void NEPixelWiseMultiplication::configure(const ITensor *input1, const ITensor *
k->configure(input1, input2, output, scale, overflow_policy, rounding_policy);
_kernel = std::move(k);
}
-Error NEPixelWiseMultiplication::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)
+Status NEPixelWiseMultiplication::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)
{
return NEPixelWiseMultiplicationKernel::validate(input1, input2, output, scale, overflow_policy, rounding_policy);
}
diff --git a/src/runtime/NEON/functions/NETranspose.cpp b/src/runtime/NEON/functions/NETranspose.cpp
index 14bca69f33..b5b28e8e18 100644
--- a/src/runtime/NEON/functions/NETranspose.cpp
+++ b/src/runtime/NEON/functions/NETranspose.cpp
@@ -37,7 +37,7 @@ void NETranspose::configure(const ITensor *input, ITensor *output)
_kernel = std::move(k);
}
-Error NETranspose::validate(const ITensorInfo *input, const ITensorInfo *output)
+Status NETranspose::validate(const ITensorInfo *input, const ITensorInfo *output)
{
return NETransposeKernel::validate(input, output);
} \ No newline at end of file
diff --git a/src/runtime/TensorAllocator.cpp b/src/runtime/TensorAllocator.cpp
index 25bd479c84..a0d41b28ee 100644
--- a/src/runtime/TensorAllocator.cpp
+++ b/src/runtime/TensorAllocator.cpp
@@ -152,14 +152,14 @@ void TensorAllocator::free()
info().set_is_resizable(true);
}
-arm_compute::Error TensorAllocator::import_memory(Memory memory)
+arm_compute::Status TensorAllocator::import_memory(Memory memory)
{
ARM_COMPUTE_RETURN_ERROR_ON(memory.buffer() == nullptr);
ARM_COMPUTE_RETURN_ERROR_ON(_associated_memory_group != nullptr);
_memory = memory;
info().set_is_resizable(false);
- return Error{};
+ return Status{};
}
void TensorAllocator::set_associated_memory_group(MemoryGroup *associated_memory_group)