aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/runtime/CL/ICLOperator.h3
-rw-r--r--arm_compute/runtime/CL/functions/CLActivationLayer.h5
-rw-r--r--arm_compute/runtime/CL/functions/CLConcatenateLayer.h5
-rw-r--r--arm_compute/runtime/CL/functions/CLElementWiseUnaryLayer.h35
-rw-r--r--arm_compute/runtime/CL/functions/CLReshapeLayer.h7
-rw-r--r--arm_compute/runtime/CL/functions/CLSlice.h3
-rw-r--r--arm_compute/runtime/CL/functions/CLStridedSlice.h3
-rw-r--r--arm_compute/runtime/NEON/INEOperator.h3
-rw-r--r--arm_compute/runtime/NEON/functions/NEActivationLayer.h3
-rw-r--r--arm_compute/runtime/NEON/functions/NEArithmeticAddition.h3
-rw-r--r--arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h3
-rw-r--r--arm_compute/runtime/NEON/functions/NEConcatenateLayer.h5
-rw-r--r--arm_compute/runtime/NEON/functions/NEElementwiseOperations.h111
-rw-r--r--arm_compute/runtime/NEON/functions/NEPReluLayer.h5
-rw-r--r--arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h6
-rw-r--r--arm_compute/runtime/NEON/functions/NEReshapeLayer.h5
-rw-r--r--arm_compute/runtime/NEON/functions/NESlice.h3
-rw-r--r--arm_compute/runtime/NEON/functions/NEStridedSlice.h3
-rw-r--r--src/runtime/CL/CLOperator.cpp5
-rw-r--r--src/runtime/CL/functions/CLActivationLayer.cpp21
-rw-r--r--src/runtime/CL/functions/CLConcatenateLayer.cpp27
-rw-r--r--src/runtime/CL/functions/CLElementWiseUnaryLayer.cpp133
-rw-r--r--src/runtime/CL/functions/CLReshapeLayer.cpp19
-rw-r--r--src/runtime/CL/functions/CLSlice.cpp5
-rw-r--r--src/runtime/CL/functions/CLStridedSlice.cpp5
-rw-r--r--src/runtime/NEON/INEOperator.cpp5
-rw-r--r--src/runtime/NEON/functions/NEActivationLayer.cpp5
-rw-r--r--src/runtime/NEON/functions/NEArithmeticAddition.cpp4
-rw-r--r--src/runtime/NEON/functions/NEArithmeticSubtraction.cpp5
-rw-r--r--src/runtime/NEON/functions/NEConcatenateLayer.cpp27
-rw-r--r--src/runtime/NEON/functions/NEElementwiseOperators.cpp89
-rw-r--r--src/runtime/NEON/functions/NEPReluLayer.cpp21
-rw-r--r--src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp10
-rw-r--r--src/runtime/NEON/functions/NEReshapeLayer.cpp19
-rw-r--r--src/runtime/NEON/functions/NESlice.cpp5
-rw-r--r--src/runtime/NEON/functions/NEStridedSlice.cpp5
36 files changed, 194 insertions, 427 deletions
diff --git a/arm_compute/runtime/CL/ICLOperator.h b/arm_compute/runtime/CL/ICLOperator.h
index c9fdd864de..2d6c96e815 100644
--- a/arm_compute/runtime/CL/ICLOperator.h
+++ b/arm_compute/runtime/CL/ICLOperator.h
@@ -55,7 +55,8 @@ public:
// Inherited methods overridden:
void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override;
- void prepare(OperatorTensorMap constants) override final;
+ void prepare(OperatorTensorMap constants) override;
+ MemoryRequirements workspace() const override;
protected:
std::unique_ptr<ICLKernel> _kernel;
diff --git a/arm_compute/runtime/CL/functions/CLActivationLayer.h b/arm_compute/runtime/CL/functions/CLActivationLayer.h
index d7cc67a647..632487c78d 100644
--- a/arm_compute/runtime/CL/functions/CLActivationLayer.h
+++ b/arm_compute/runtime/CL/functions/CLActivationLayer.h
@@ -98,7 +98,7 @@ private:
namespace experimental
{
/** Basic function to run @ref CLActivationLayerKernel */
-class CLActivationLayer : public ICLOperator
+class CLActivation : public ICLOperator
{
public:
/** Set the input and output tensor.
@@ -120,9 +120,6 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info);
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
} // namespace experimental
} // namespace arm_compute
diff --git a/arm_compute/runtime/CL/functions/CLConcatenateLayer.h b/arm_compute/runtime/CL/functions/CLConcatenateLayer.h
index 4e8a95be43..99a2053a5a 100644
--- a/arm_compute/runtime/CL/functions/CLConcatenateLayer.h
+++ b/arm_compute/runtime/CL/functions/CLConcatenateLayer.h
@@ -113,11 +113,11 @@ namespace experimental
* -# @ref CLDepthConcatenateLayerKernel (if underlying concatenation axis is 2).
* -# @ref CLBatchConcatenateLayerKernel (if underlying concatenation axis is 3).
*/
-class CLConcatenateLayer : public ICLOperator
+class CLConcatenation : public ICLOperator
{
public:
/** Default constructor */
- CLConcatenateLayer();
+ CLConcatenation();
/** Initialise the kernel's inputs vector and output.
*
* @note Input and output tensor dimensions preconditions defer depending on the concatenation axis.
@@ -144,7 +144,6 @@ public:
static Status validate(const std::vector<const ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis);
// Inherited methods overridden:
- MemoryRequirements workspace() const override;
void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override;
private:
diff --git a/arm_compute/runtime/CL/functions/CLElementWiseUnaryLayer.h b/arm_compute/runtime/CL/functions/CLElementWiseUnaryLayer.h
index e4268c1592..5208bfe404 100644
--- a/arm_compute/runtime/CL/functions/CLElementWiseUnaryLayer.h
+++ b/arm_compute/runtime/CL/functions/CLElementWiseUnaryLayer.h
@@ -356,7 +356,7 @@ private:
namespace experimental
{
/** Basic function to perform inverse square root on an input tensor. */
-class CLRsqrtLayer : public ICLOperator
+class CLRsqrt : public ICLOperator
{
public:
/** Initialize the function
@@ -380,13 +380,10 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output);
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
/** Basic function to perform exponential on an input tensor. */
-class CLExpLayer : public ICLOperator
+class CLExp : public ICLOperator
{
public:
/** Initialize the function
@@ -410,13 +407,10 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output);
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
/** Basic function to negate an input tensor. */
-class CLNegLayer : public ICLOperator
+class CLNeg : public ICLOperator
{
public:
/** Initialize the function
@@ -440,13 +434,10 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output);
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
/** Basic function to calculate sine of an input tensor. */
-class CLSinLayer : public ICLOperator
+class CLSin : public ICLOperator
{
public:
/** Initialize the function
@@ -470,13 +461,10 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output);
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
/** Basic function to perform elementwise log on an input tensor. */
-class CLLogLayer : public ICLOperator
+class CLLog : public ICLOperator
{
public:
/** Initialize the function
@@ -500,13 +488,10 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output);
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
/** Basic function to get the absolute value of an input tensor. */
-class CLAbsLayer : public ICLOperator
+class CLAbs : public ICLOperator
{
public:
/** Initialize the function
@@ -530,13 +515,10 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output);
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
/** Basic function to get the round (to the nearest even) value of an input tensor. */
-class CLRoundLayer : public ICLOperator
+class CLRound : public ICLOperator
{
public:
/** Initialize the function
@@ -560,9 +542,6 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output);
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
} // namespace experimental
} // namespace arm_compute
diff --git a/arm_compute/runtime/CL/functions/CLReshapeLayer.h b/arm_compute/runtime/CL/functions/CLReshapeLayer.h
index 175fbffd8d..7fc6c3b864 100644
--- a/arm_compute/runtime/CL/functions/CLReshapeLayer.h
+++ b/arm_compute/runtime/CL/functions/CLReshapeLayer.h
@@ -81,7 +81,7 @@ private:
namespace experimental
{
/** Basic function to run @ref CLReshapeLayerKernel */
-class CLReshapeLayer : public ICLOperator
+class CLReshape : public ICLOperator
{
public:
/** Initialise the kernel's inputs and outputs
@@ -100,10 +100,7 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output);
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
} // namespace experimental
-}
+} // namespace arm_compute
#endif /*ARM_COMPUTE_CLRESHAPELAYER_H */
diff --git a/arm_compute/runtime/CL/functions/CLSlice.h b/arm_compute/runtime/CL/functions/CLSlice.h
index 6fe62acaf5..23c398cb41 100644
--- a/arm_compute/runtime/CL/functions/CLSlice.h
+++ b/arm_compute/runtime/CL/functions/CLSlice.h
@@ -68,9 +68,6 @@ public:
* @return A status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const Coordinates &starts, const Coordinates &ends);
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
} // namespace experimental
diff --git a/arm_compute/runtime/CL/functions/CLStridedSlice.h b/arm_compute/runtime/CL/functions/CLStridedSlice.h
index 394d8c4f59..fdbef81f7d 100644
--- a/arm_compute/runtime/CL/functions/CLStridedSlice.h
+++ b/arm_compute/runtime/CL/functions/CLStridedSlice.h
@@ -156,9 +156,6 @@ public:
static Status validate(const ITensorInfo *input, const ITensorInfo *output,
const Coordinates &starts, const Coordinates &ends, const BiStrides &strides,
int32_t begin_mask = 0, int32_t end_mask = 0, int32_t shrink_axis_mask = 0);
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
} // namespace experimental
} // namespace arm_compute
diff --git a/arm_compute/runtime/NEON/INEOperator.h b/arm_compute/runtime/NEON/INEOperator.h
index 004abb245f..f91305543f 100644
--- a/arm_compute/runtime/NEON/INEOperator.h
+++ b/arm_compute/runtime/NEON/INEOperator.h
@@ -55,7 +55,8 @@ public:
// Inherited methods overridden:
void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override;
- void prepare(OperatorTensorMap constants) override final;
+ void prepare(OperatorTensorMap constants) override;
+ MemoryRequirements workspace() const override;
protected:
std::unique_ptr<INEKernel> _kernel;
diff --git a/arm_compute/runtime/NEON/functions/NEActivationLayer.h b/arm_compute/runtime/NEON/functions/NEActivationLayer.h
index e5a1e4edea..cfece5c392 100644
--- a/arm_compute/runtime/NEON/functions/NEActivationLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEActivationLayer.h
@@ -109,9 +109,6 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info);
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
} // namespace experimental
} // namespace arm_compute
diff --git a/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h b/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h
index 6c8dbed2c8..e10771ef4b 100644
--- a/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h
+++ b/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h
@@ -72,9 +72,6 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
} // namespace experimental
diff --git a/arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h b/arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h
index 0dbbc51555..a38335c59b 100644
--- a/arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h
+++ b/arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h
@@ -89,9 +89,6 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
} // namespace experimental
diff --git a/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
index 02c27e20e6..73c62330c5 100644
--- a/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
@@ -103,11 +103,11 @@ namespace experimental
* -# @ref NEDepthConcatenateLayerKernel (if underlying concatenation axis is 2).
* -# @ref NEBatchConcatenateLayerKernel (if underlying concatenation axis is 3).
*/
-class NEConcatenateLayer : public INEOperator
+class NEConcatenation : public INEOperator
{
public:
/** Default constructor */
- NEConcatenateLayer();
+ NEConcatenation();
/** Initialise the kernel's inputs vector and output.
*
* @note Input and output tensor dimensions preconditions defer depending on the concatenation axis.
@@ -132,7 +132,6 @@ public:
static Status validate(const std::vector<const ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis);
// Inherited methods overridden:
- MemoryRequirements workspace() const override;
void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override;
private:
diff --git a/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h b/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h
index ac88a10d24..7d9dac761f 100644
--- a/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h
+++ b/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h
@@ -386,25 +386,20 @@ class NEElementwiseMax : public INEOperator
public:
/** Initialise the kernel's inputs, output and conversion policy.
*
- * @param[in, out] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
- * @param[in, out] input2 Second tensor input info. Data types supported: Same as @p input1.
- * @param[out] output Output tensor info. Data types supported: Same as @p input1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+ * @param[in, out] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+ * @param[in, out] input2 Second tensor input info. Data types supported: Same as @p input1.
+ * @param[out] output Output tensor info. Data types supported: Same as @p input1.
*/
- void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+ void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output);
/** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel for max
*
- * @param[in] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
- * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
- * @param[in] output Output tensor info. Data types supported: Same as @p input1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+ * @param[in] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
+ * @param[in] output Output tensor info. Data types supported: Same as @p input1.
*
* @return a status
*/
- static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
};
/** Basic function to run @ref NEArithmeticOperationKernel for min
@@ -417,25 +412,20 @@ class NEElementwiseMin : public INEOperator
public:
/** Initialise the kernel's inputs, output and conversion policy.
*
- * @param[in, out] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
- * @param[in, out] input2 Second tensor input info. Data types supported: Same as @p input1.
- * @param[out] output Output tensor info. Data types supported: Same as @p input1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+ * @param[in, out] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+ * @param[in, out] input2 Second tensor input info. Data types supported: Same as @p input1.
+ * @param[out] output Output tensor info. Data types supported: Same as @p input1.
*/
- void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+ void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output);
/** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel for min
*
- * @param[in] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
- * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
- * @param[in] output Output tensor info. Data types supported: Same as @p input1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+ * @param[in] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
+ * @param[in] output Output tensor info. Data types supported: Same as @p input1.
*
* @return a status
*/
- static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
};
/** Basic function to run @ref NEArithmeticOperationKernel for squared difference
@@ -448,25 +438,20 @@ class NEElementwiseSquaredDiff : public INEOperator
public:
/** Initialise the kernel's inputs, output and conversion policy.
*
- * @param[in, out] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
- * @param[in, out] input2 Second tensor input info. Data types supported: Same as @p input1.
- * @param[out] output Output tensor info. Data types supported: Same as @p input1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+ * @param[in, out] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+ * @param[in, out] input2 Second tensor input info. Data types supported: Same as @p input1.
+ * @param[out] output Output tensor info. Data types supported: Same as @p input1.
*/
- void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+ void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output);
/** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel for squared difference
*
- * @param[in] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
- * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
- * @param[in] output Output tensor info. Data types supported: Same as @p input1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+ * @param[in] input1 First tensor input info. Data types supported: QASYMM8/QASYMM8_SIGNED/S16/F16/S32/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
+ * @param[in] output Output tensor info. Data types supported: Same as @p input1.
*
* @return a status
*/
- static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
};
/** Basic function to run @ref NEArithmeticOperationKernel for division
@@ -479,25 +464,20 @@ class NEElementwiseDivision : public INEOperator
public:
/** Initialise the kernel's inputs, output and conversion policy.
*
- * @param[in, out] input1 First tensor input info. Data types supported: F16/F32.
- * @param[in, out] input2 Second tensor input info. Data types supported: Same as @p input1.
- * @param[out] output Output tensor info. Data types supported: Same as @p input1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+ * @param[in, out] input1 First tensor input info. Data types supported: F16/F32.
+ * @param[in, out] input2 Second tensor input info. Data types supported: Same as @p input1.
+ * @param[out] output Output tensor info. Data types supported: Same as @p input1.
*/
- void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+ void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output);
/** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel for division
*
- * @param[in] input1 First tensor input info. Data types supported: F16/F32.
- * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
- * @param[in] output Output tensor info. Data types supported: Same as @p input1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+ * @param[in] input1 First tensor input info. Data types supported: F16/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
+ * @param[in] output Output tensor info. Data types supported: Same as @p input1.
*
* @return a status
*/
- static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
};
/** Basic function to run @ref NEArithmeticOperationKernel for power
@@ -511,25 +491,20 @@ class NEElementwisePower : public INEOperator
public:
/** Initialise the kernel's inputs, output and conversion policy.
*
- * @param[in, out] input1 First tensor input info. Data types supported: F16/F32.
- * @param[in, out] input2 Second tensor input info. Data types supported: Same as @p input1.
- * @param[out] output Output tensor info. Data types supported: Same as @p input1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+ * @param[in, out] input1 First tensor input info. Data types supported: F16/F32.
+ * @param[in, out] input2 Second tensor input info. Data types supported: Same as @p input1.
+ * @param[out] output Output tensor info. Data types supported: Same as @p input1.
*/
- void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+ void configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output);
/** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticOperationKernel for power
*
- * @param[in] input1 First tensor input info. Data types supported: F16/F32.
- * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
- * @param[in] output Output tensor info. Data types supported: Same as @p input1.
- * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
+ * @param[in] input1 First tensor input info. Data types supported: F16/F32.
+ * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
+ * @param[in] output Output tensor info. Data types supported: Same as @p input1.
*
* @return a status
*/
- static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
+ static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
};
/** Basic function to run @ref NEComparisonOperationKernel.
@@ -558,9 +533,6 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ComparisonOperation op);
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
/** Basic function to run @ref NEComparisonOperationKernel
@@ -588,9 +560,6 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
/** Basic function to run equal comparison. */
diff --git a/arm_compute/runtime/NEON/functions/NEPReluLayer.h b/arm_compute/runtime/NEON/functions/NEPReluLayer.h
index 3ec6eb2bc7..756058b5ec 100644
--- a/arm_compute/runtime/NEON/functions/NEPReluLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEPReluLayer.h
@@ -38,7 +38,7 @@ namespace experimental
*
* @note The function implements an activation layer with the PRELU activation function.
*/
-class NEPReluLayer : public INEOperator
+class NEPRelu : public INEOperator
{
public:
/** Set the input and output tensor.
@@ -57,9 +57,6 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *alpha, const ITensorInfo *output);
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
} // namespace experimental
diff --git a/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h b/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
index 29677b7462..3c1aa5220c 100644
--- a/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
+++ b/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
@@ -102,9 +102,6 @@ public:
*/
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy,
const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
/** Basic function to run @ref NEComplexPixelWiseMultiplicationKernel. */
@@ -129,9 +126,6 @@ public:
* @param[in] act_info (Optional) Activation layer information in case of a fused activation. Currently not supported.
*/
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
} // namespace experimental
diff --git a/arm_compute/runtime/NEON/functions/NEReshapeLayer.h b/arm_compute/runtime/NEON/functions/NEReshapeLayer.h
index f13c75f55f..2ca6660139 100644
--- a/arm_compute/runtime/NEON/functions/NEReshapeLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEReshapeLayer.h
@@ -78,7 +78,7 @@ private:
namespace experimental
{
/** Basic function to run @ref NEReshapeLayerKernel */
-class NEReshapeLayer : public INEOperator
+class NEReshape : public INEOperator
{
public:
/** Initialise the kernel's inputs and outputs
@@ -96,9 +96,6 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output);
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
} // namespace experimental
} // namespace arm_compute
diff --git a/arm_compute/runtime/NEON/functions/NESlice.h b/arm_compute/runtime/NEON/functions/NESlice.h
index 0bb639dfd7..28628778cb 100644
--- a/arm_compute/runtime/NEON/functions/NESlice.h
+++ b/arm_compute/runtime/NEON/functions/NESlice.h
@@ -67,9 +67,6 @@ public:
* @return A status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output, const Coordinates &starts, const Coordinates &ends);
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
} // namespace experimental
diff --git a/arm_compute/runtime/NEON/functions/NEStridedSlice.h b/arm_compute/runtime/NEON/functions/NEStridedSlice.h
index 15ff0b9aaf..f9c94f5301 100644
--- a/arm_compute/runtime/NEON/functions/NEStridedSlice.h
+++ b/arm_compute/runtime/NEON/functions/NEStridedSlice.h
@@ -73,9 +73,6 @@ public:
static Status validate(const ITensorInfo *input, const ITensorInfo *output,
const Coordinates &starts, const Coordinates &ends, const BiStrides &strides,
int32_t begin_mask = 0, int32_t end_mask = 0, int32_t shrink_axis_mask = 0);
-
- // Inherited methods overridden:
- MemoryRequirements workspace() const override;
};
} // namespace experimental
diff --git a/src/runtime/CL/CLOperator.cpp b/src/runtime/CL/CLOperator.cpp
index 0052f1aaf7..11ee30eae9 100644
--- a/src/runtime/CL/CLOperator.cpp
+++ b/src/runtime/CL/CLOperator.cpp
@@ -49,5 +49,10 @@ void ICLOperator::prepare(OperatorTensorMap constants)
{
ARM_COMPUTE_UNUSED(constants);
}
+
+MemoryRequirements ICLOperator::workspace() const
+{
+ return {};
+}
} // namespace experimental
} // namespace arm_compute
diff --git a/src/runtime/CL/functions/CLActivationLayer.cpp b/src/runtime/CL/functions/CLActivationLayer.cpp
index 640841e1d5..784473d426 100644
--- a/src/runtime/CL/functions/CLActivationLayer.cpp
+++ b/src/runtime/CL/functions/CLActivationLayer.cpp
@@ -33,30 +33,25 @@ namespace arm_compute
{
namespace experimental
{
-void CLActivationLayer::configure(const CLCompileContext &compile_context, ITensorInfo *input, ITensorInfo *output, ActivationLayerInfo act_info)
+void CLActivation::configure(const CLCompileContext &compile_context, ITensorInfo *input, ITensorInfo *output, ActivationLayerInfo act_info)
{
auto k = arm_compute::support::cpp14::make_unique<CLActivationLayerKernel>();
k->configure(compile_context, input, output, act_info);
_kernel = std::move(k);
}
-Status CLActivationLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info)
+Status CLActivation::validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
return CLActivationLayerKernel::validate(input, output, act_info);
}
-
-MemoryRequirements CLActivationLayer::workspace() const
-{
- return MemoryRequirements{};
-}
} // namespace experimental
struct CLActivationLayer::Impl
{
- const ICLTensor *src{ nullptr };
- ICLTensor *dst{ nullptr };
- CLRuntimeContext *ctx{ nullptr };
- std::unique_ptr<experimental::CLActivationLayer> op{ nullptr };
+ const ICLTensor *src{ nullptr };
+ ICLTensor *dst{ nullptr };
+ CLRuntimeContext *ctx{ nullptr };
+ std::unique_ptr<experimental::CLActivation> op{ nullptr };
};
CLActivationLayer::CLActivationLayer(CLRuntimeContext *ctx)
@@ -83,13 +78,13 @@ void CLActivationLayer::configure(const CLCompileContext &compile_context, ICLTe
_impl->src = input;
_impl->dst = output == nullptr ? input : output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLActivationLayer>();
+ _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLActivation>();
_impl->op->configure(compile_context, _impl->src->info(), _impl->dst->info(), act_info);
}
Status CLActivationLayer::validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
- return experimental::CLActivationLayer::validate(input, output, act_info);
+ return experimental::CLActivation::validate(input, output, act_info);
}
void CLActivationLayer::run()
diff --git a/src/runtime/CL/functions/CLConcatenateLayer.cpp b/src/runtime/CL/functions/CLConcatenateLayer.cpp
index 06903d2ff2..1ddda021bc 100644
--- a/src/runtime/CL/functions/CLConcatenateLayer.cpp
+++ b/src/runtime/CL/functions/CLConcatenateLayer.cpp
@@ -42,14 +42,14 @@ namespace arm_compute
{
namespace experimental
{
-CLConcatenateLayer::CLConcatenateLayer()
+CLConcatenation::CLConcatenation()
: _concat_kernels(),
_num_inputs(0),
_axis(Window::DimX)
{
}
-void CLConcatenateLayer::configure(const CLCompileContext &compile_context, const std::vector<ITensorInfo *> &inputs_vector, ITensorInfo *output, size_t axis)
+void CLConcatenation::configure(const CLCompileContext &compile_context, const std::vector<ITensorInfo *> &inputs_vector, ITensorInfo *output, size_t axis)
{
ARM_COMPUTE_ERROR_ON(output == nullptr);
_axis = axis;
@@ -143,7 +143,7 @@ void CLConcatenateLayer::configure(const CLCompileContext &compile_context, cons
}
}
-Status CLConcatenateLayer::validate(const std::vector<const ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis)
+Status CLConcatenation::validate(const std::vector<const ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis)
{
ARM_COMPUTE_RETURN_ERROR_ON(output == nullptr);
const unsigned int num_inputs = inputs_vector.size();
@@ -220,12 +220,7 @@ Status CLConcatenateLayer::validate(const std::vector<const ITensorInfo *> &inpu
return Status{};
}
-MemoryRequirements CLConcatenateLayer::workspace() const
-{
- return MemoryRequirements{};
-}
-
-void CLConcatenateLayer::run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace)
+void CLConcatenation::run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace)
{
ARM_COMPUTE_UNUSED(workspace);
@@ -259,11 +254,11 @@ void CLConcatenateLayer::run(InputTensorMap inputs, OutputTensorMap outputs, Ope
struct CLConcatenateLayer::Impl
{
- std::vector<const ICLTensor *> srcs{};
- ICLTensor *dst{ nullptr };
- unsigned int num_inputs{ 0 };
- unsigned int axis{ 0 };
- std::unique_ptr<experimental::CLConcatenateLayer> op{ nullptr };
+ std::vector<const ICLTensor *> srcs{};
+ ICLTensor *dst{ nullptr };
+ unsigned int num_inputs{ 0 };
+ unsigned int axis{ 0 };
+ std::unique_ptr<experimental::CLConcatenation> op{ nullptr };
};
CLConcatenateLayer::CLConcatenateLayer()
@@ -290,7 +285,7 @@ void CLConcatenateLayer::configure(const CLCompileContext &compile_context, std:
_impl->dst = output;
_impl->axis = axis;
_impl->num_inputs = inputs_vector.size();
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLConcatenateLayer>();
+ _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLConcatenation>();
std::vector<ITensorInfo *> inputs_vector_info;
for(unsigned int i = 0; i < inputs_vector.size(); ++i)
@@ -303,7 +298,7 @@ void CLConcatenateLayer::configure(const CLCompileContext &compile_context, std:
Status CLConcatenateLayer::validate(const std::vector<const ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis)
{
- return experimental::CLConcatenateLayer::validate(inputs_vector, output, axis);
+ return experimental::CLConcatenation::validate(inputs_vector, output, axis);
}
void CLConcatenateLayer::run()
diff --git a/src/runtime/CL/functions/CLElementWiseUnaryLayer.cpp b/src/runtime/CL/functions/CLElementWiseUnaryLayer.cpp
index 402b9648a7..f8e9694b1c 100644
--- a/src/runtime/CL/functions/CLElementWiseUnaryLayer.cpp
+++ b/src/runtime/CL/functions/CLElementWiseUnaryLayer.cpp
@@ -32,131 +32,96 @@ namespace arm_compute
{
namespace experimental
{
-void CLRsqrtLayer::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
+void CLRsqrt::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
{
auto k = arm_compute::support::cpp14::make_unique<CLElementWiseUnaryLayerKernel>();
k->configure(compile_context, input, output, ElementWiseUnary::RSQRT);
_kernel = std::move(k);
}
-Status CLRsqrtLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
+Status CLRsqrt::validate(const ITensorInfo *input, const ITensorInfo *output)
{
return arm_compute::CLElementWiseUnaryLayerKernel::validate(input, output, ElementWiseUnary::RSQRT);
}
-MemoryRequirements CLRsqrtLayer::workspace() const
-{
- return MemoryRequirements{};
-}
-
-void CLExpLayer::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
+void CLExp::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
{
auto k = arm_compute::support::cpp14::make_unique<CLElementWiseUnaryLayerKernel>();
k->configure(compile_context, input, output, ElementWiseUnary::EXP);
_kernel = std::move(k);
}
-Status CLExpLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
+Status CLExp::validate(const ITensorInfo *input, const ITensorInfo *output)
{
return arm_compute::CLElementWiseUnaryLayerKernel::validate(input, output, ElementWiseUnary::EXP);
}
-MemoryRequirements CLExpLayer::workspace() const
-{
- return MemoryRequirements{};
-}
-
-void CLNegLayer::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
+void CLNeg::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
{
auto k = arm_compute::support::cpp14::make_unique<CLElementWiseUnaryLayerKernel>();
k->configure(compile_context, input, output, ElementWiseUnary::NEG);
_kernel = std::move(k);
}
-Status CLNegLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
+Status CLNeg::validate(const ITensorInfo *input, const ITensorInfo *output)
{
return arm_compute::CLElementWiseUnaryLayerKernel::validate(input, output, ElementWiseUnary::NEG);
}
-MemoryRequirements CLNegLayer::workspace() const
-{
- return MemoryRequirements{};
-}
-
-void CLSinLayer::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
+void CLSin::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
{
auto k = arm_compute::support::cpp14::make_unique<CLElementWiseUnaryLayerKernel>();
k->configure(compile_context, input, output, ElementWiseUnary::SIN);
_kernel = std::move(k);
}
-Status CLSinLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
+Status CLSin::validate(const ITensorInfo *input, const ITensorInfo *output)
{
return arm_compute::CLElementWiseUnaryLayerKernel::validate(input, output, ElementWiseUnary::SIN);
}
-MemoryRequirements CLSinLayer::workspace() const
-{
- return MemoryRequirements{};
-}
-
-void CLAbsLayer::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
+void CLAbs::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
{
auto k = arm_compute::support::cpp14::make_unique<CLElementWiseUnaryLayerKernel>();
k->configure(compile_context, input, output, ElementWiseUnary::ABS);
_kernel = std::move(k);
}
-Status CLAbsLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
+Status CLAbs::validate(const ITensorInfo *input, const ITensorInfo *output)
{
return arm_compute::CLElementWiseUnaryLayerKernel::validate(input, output, ElementWiseUnary::ABS);
}
-MemoryRequirements CLAbsLayer::workspace() const
-{
- return MemoryRequirements{};
-}
-
-void CLLogLayer::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
+void CLLog::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
{
auto k = arm_compute::support::cpp14::make_unique<CLElementWiseUnaryLayerKernel>();
k->configure(compile_context, input, output, ElementWiseUnary::LOG);
_kernel = std::move(k);
}
-Status CLLogLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
+Status CLLog::validate(const ITensorInfo *input, const ITensorInfo *output)
{
return arm_compute::CLElementWiseUnaryLayerKernel::validate(input, output, ElementWiseUnary::LOG);
}
-MemoryRequirements CLLogLayer::workspace() const
-{
- return MemoryRequirements{};
-}
-
-void CLRoundLayer::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
+void CLRound::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
{
auto k = arm_compute::support::cpp14::make_unique<CLElementWiseUnaryLayerKernel>();
k->configure(compile_context, input, output, ElementWiseUnary::ROUND);
_kernel = std::move(k);
}
-Status CLRoundLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
+Status CLRound::validate(const ITensorInfo *input, const ITensorInfo *output)
{
return arm_compute::CLElementWiseUnaryLayerKernel::validate(input, output, ElementWiseUnary::ROUND);
}
-
-MemoryRequirements CLRoundLayer::workspace() const
-{
- return MemoryRequirements{};
-}
} // namespace experimental
struct CLRsqrtLayer::Impl
{
- const ICLTensor *src{ nullptr };
- ICLTensor *dst{ nullptr };
- std::unique_ptr<experimental::CLRsqrtLayer> op{ nullptr };
+ const ICLTensor *src{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<experimental::CLRsqrt> op{ nullptr };
};
CLRsqrtLayer::CLRsqrtLayer()
@@ -177,13 +142,13 @@ void CLRsqrtLayer::configure(const CLCompileContext &compile_context, const ICLT
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLRsqrtLayer>();
+ _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLRsqrt>();
_impl->op->configure(compile_context, input->info(), output->info());
}
Status CLRsqrtLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
{
- return experimental::CLRsqrtLayer::validate(input, output);
+ return experimental::CLRsqrt::validate(input, output);
}
void CLRsqrtLayer::run()
@@ -196,9 +161,9 @@ void CLRsqrtLayer::run()
struct CLExpLayer::Impl
{
- const ICLTensor *src{ nullptr };
- ICLTensor *dst{ nullptr };
- std::unique_ptr<experimental::CLExpLayer> op{ nullptr };
+ const ICLTensor *src{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<experimental::CLExp> op{ nullptr };
};
CLExpLayer::CLExpLayer()
@@ -219,13 +184,13 @@ void CLExpLayer::configure(const CLCompileContext &compile_context, const ICLTen
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLExpLayer>();
+ _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLExp>();
_impl->op->configure(compile_context, input->info(), output->info());
}
Status CLExpLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
{
- return experimental::CLExpLayer::validate(input, output);
+ return experimental::CLExp::validate(input, output);
}
void CLExpLayer::run()
@@ -238,9 +203,9 @@ void CLExpLayer::run()
struct CLNegLayer::Impl
{
- const ICLTensor *src{ nullptr };
- ICLTensor *dst{ nullptr };
- std::unique_ptr<experimental::CLNegLayer> op{ nullptr };
+ const ICLTensor *src{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<experimental::CLNeg> op{ nullptr };
};
CLNegLayer::CLNegLayer()
@@ -261,12 +226,12 @@ void CLNegLayer::configure(const CLCompileContext &compile_context, const ICLTen
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLNegLayer>();
+ _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLNeg>();
_impl->op->configure(compile_context, input->info(), output->info());
}
Status CLNegLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
{
- return experimental::CLNegLayer::validate(input, output);
+ return experimental::CLNeg::validate(input, output);
}
void CLNegLayer::run()
@@ -279,9 +244,9 @@ void CLNegLayer::run()
struct CLSinLayer::Impl
{
- const ICLTensor *src{ nullptr };
- ICLTensor *dst{ nullptr };
- std::unique_ptr<experimental::CLSinLayer> op{ nullptr };
+ const ICLTensor *src{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<experimental::CLSin> op{ nullptr };
};
CLSinLayer::CLSinLayer()
@@ -302,12 +267,12 @@ void CLSinLayer::configure(const CLCompileContext &compile_context, const ICLTen
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLSinLayer>();
+ _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLSin>();
_impl->op->configure(compile_context, input->info(), output->info());
}
Status CLSinLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
{
- return experimental::CLSinLayer::validate(input, output);
+ return experimental::CLSin::validate(input, output);
}
void CLSinLayer::run()
@@ -320,9 +285,9 @@ void CLSinLayer::run()
struct CLAbsLayer::Impl
{
- const ICLTensor *src{ nullptr };
- ICLTensor *dst{ nullptr };
- std::unique_ptr<experimental::CLAbsLayer> op{ nullptr };
+ const ICLTensor *src{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<experimental::CLAbs> op{ nullptr };
};
CLAbsLayer::CLAbsLayer()
@@ -343,12 +308,12 @@ void CLAbsLayer::configure(const CLCompileContext &compile_context, const ICLTen
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLAbsLayer>();
+ _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLAbs>();
_impl->op->configure(compile_context, input->info(), output->info());
}
Status CLAbsLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
{
- return experimental::CLAbsLayer::validate(input, output);
+ return experimental::CLAbs::validate(input, output);
}
void CLAbsLayer::run()
@@ -361,9 +326,9 @@ void CLAbsLayer::run()
struct CLLogLayer::Impl
{
- const ICLTensor *src{ nullptr };
- ICLTensor *dst{ nullptr };
- std::unique_ptr<experimental::CLLogLayer> op{ nullptr };
+ const ICLTensor *src{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<experimental::CLLog> op{ nullptr };
};
CLLogLayer::CLLogLayer()
@@ -384,12 +349,12 @@ void CLLogLayer::configure(const CLCompileContext &compile_context, const ICLTen
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLLogLayer>();
+ _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLLog>();
_impl->op->configure(compile_context, input->info(), output->info());
}
Status CLLogLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
{
- return experimental::CLLogLayer::validate(input, output);
+ return experimental::CLLog::validate(input, output);
}
void CLLogLayer::run()
@@ -402,9 +367,9 @@ void CLLogLayer::run()
struct CLRoundLayer::Impl
{
- const ICLTensor *src{ nullptr };
- ICLTensor *dst{ nullptr };
- std::unique_ptr<experimental::CLRoundLayer> op{ nullptr };
+ const ICLTensor *src{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<experimental::CLRound> op{ nullptr };
};
CLRoundLayer::CLRoundLayer()
@@ -425,12 +390,12 @@ void CLRoundLayer::configure(const CLCompileContext &compile_context, const ICLT
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLRoundLayer>();
+ _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLRound>();
_impl->op->configure(compile_context, input->info(), output->info());
}
Status CLRoundLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
{
- return experimental::CLRoundLayer::validate(input, output);
+ return experimental::CLRound::validate(input, output);
}
void CLRoundLayer::run()
diff --git a/src/runtime/CL/functions/CLReshapeLayer.cpp b/src/runtime/CL/functions/CLReshapeLayer.cpp
index d75e798183..ac8b176963 100644
--- a/src/runtime/CL/functions/CLReshapeLayer.cpp
+++ b/src/runtime/CL/functions/CLReshapeLayer.cpp
@@ -32,29 +32,24 @@ namespace arm_compute
{
namespace experimental
{
-void CLReshapeLayer::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
+void CLReshape::configure(const CLCompileContext &compile_context, const ITensorInfo *input, ITensorInfo *output)
{
auto k = arm_compute::support::cpp14::make_unique<CLReshapeLayerKernel>();
k->configure(compile_context, input, output);
_kernel = std::move(k);
}
-Status CLReshapeLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
+Status CLReshape::validate(const ITensorInfo *input, const ITensorInfo *output)
{
return arm_compute::CLReshapeLayerKernel::validate(input, output);
}
-
-MemoryRequirements CLReshapeLayer::workspace() const
-{
- return MemoryRequirements{};
-}
} // namespace experimental
struct CLReshapeLayer::Impl
{
- const ICLTensor *src{ nullptr };
- ICLTensor *dst{ nullptr };
- std::unique_ptr<experimental::CLReshapeLayer> op{ nullptr };
+ const ICLTensor *src{ nullptr };
+ ICLTensor *dst{ nullptr };
+ std::unique_ptr<experimental::CLReshape> op{ nullptr };
};
CLReshapeLayer::CLReshapeLayer()
@@ -75,14 +70,14 @@ void CLReshapeLayer::configure(const CLCompileContext &compile_context, const IC
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLReshapeLayer>();
+ _impl->op = arm_compute::support::cpp14::make_unique<experimental::CLReshape>();
_impl->op->configure(compile_context, input->info(), output->info());
}
Status CLReshapeLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ON_ERROR(experimental::CLReshapeLayer::validate(input, output));
+ ARM_COMPUTE_RETURN_ON_ERROR(experimental::CLReshape::validate(input, output));
return Status{};
}
diff --git a/src/runtime/CL/functions/CLSlice.cpp b/src/runtime/CL/functions/CLSlice.cpp
index b60daeee44..3689707bd0 100644
--- a/src/runtime/CL/functions/CLSlice.cpp
+++ b/src/runtime/CL/functions/CLSlice.cpp
@@ -60,11 +60,6 @@ Status CLSlice::validate(const ITensorInfo *input, const ITensorInfo *output, co
return CLStridedSliceKernel::validate(input, output, starts, ends, BiStrides(), 0, slice_end_mask, 0);
}
-
-MemoryRequirements CLSlice::workspace() const
-{
- return MemoryRequirements{};
-}
} // namespace experimental
struct CLSlice::Impl
diff --git a/src/runtime/CL/functions/CLStridedSlice.cpp b/src/runtime/CL/functions/CLStridedSlice.cpp
index d1b16700ff..bdef0785ec 100644
--- a/src/runtime/CL/functions/CLStridedSlice.cpp
+++ b/src/runtime/CL/functions/CLStridedSlice.cpp
@@ -47,11 +47,6 @@ Status CLStridedSlice::validate(const ITensorInfo *input, const ITensorInfo *out
{
return CLStridedSliceKernel::validate(input, output, starts, ends, strides, begin_mask, end_mask, shrink_axis_mask);
}
-
-MemoryRequirements CLStridedSlice::workspace() const
-{
- return MemoryRequirements{};
-}
} // namespace experimental
struct CLStridedSlice::Impl
diff --git a/src/runtime/NEON/INEOperator.cpp b/src/runtime/NEON/INEOperator.cpp
index 3ace8a6294..1d819977c8 100644
--- a/src/runtime/NEON/INEOperator.cpp
+++ b/src/runtime/NEON/INEOperator.cpp
@@ -49,5 +49,10 @@ void INEOperator::prepare(OperatorTensorMap constants)
{
ARM_COMPUTE_UNUSED(constants);
}
+
+MemoryRequirements INEOperator::workspace() const
+{
+ return {};
+}
} // namespace experimental
} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEActivationLayer.cpp b/src/runtime/NEON/functions/NEActivationLayer.cpp
index afa220fd72..0e75e58b3b 100644
--- a/src/runtime/NEON/functions/NEActivationLayer.cpp
+++ b/src/runtime/NEON/functions/NEActivationLayer.cpp
@@ -45,11 +45,6 @@ Status NEActivationLayer::validate(const ITensorInfo *input, const ITensorInfo *
{
return NEActivationLayerKernel::validate(input, output, activation_info);
}
-
-MemoryRequirements NEActivationLayer::workspace() const
-{
- return MemoryRequirements{};
-}
} // namespace experimental
struct NEActivationLayer::Impl
diff --git a/src/runtime/NEON/functions/NEArithmeticAddition.cpp b/src/runtime/NEON/functions/NEArithmeticAddition.cpp
index 95cee0e1ef..b18309ef1d 100644
--- a/src/runtime/NEON/functions/NEArithmeticAddition.cpp
+++ b/src/runtime/NEON/functions/NEArithmeticAddition.cpp
@@ -45,10 +45,6 @@ Status NEArithmeticAddition::validate(const ITensorInfo *input1, const ITensorIn
ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
return NEArithmeticAdditionKernel::validate(input1, input2, output, policy);
}
-MemoryRequirements NEArithmeticAddition::workspace() const
-{
- return MemoryRequirements{};
-}
} // namespace experimental
struct NEArithmeticAddition::Impl
diff --git a/src/runtime/NEON/functions/NEArithmeticSubtraction.cpp b/src/runtime/NEON/functions/NEArithmeticSubtraction.cpp
index a69e78def3..c7f492bcbc 100644
--- a/src/runtime/NEON/functions/NEArithmeticSubtraction.cpp
+++ b/src/runtime/NEON/functions/NEArithmeticSubtraction.cpp
@@ -46,11 +46,6 @@ Status NEArithmeticSubtraction::validate(const ITensorInfo *input1, const ITenso
ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
return NEArithmeticSubtractionKernel::validate(input1, input2, output, policy);
}
-
-MemoryRequirements NEArithmeticSubtraction::workspace() const
-{
- return MemoryRequirements{};
-}
} // namespace experimental
struct NEArithmeticSubtraction::Impl
diff --git a/src/runtime/NEON/functions/NEConcatenateLayer.cpp b/src/runtime/NEON/functions/NEConcatenateLayer.cpp
index 37cdd15529..9f8a2a1b8e 100644
--- a/src/runtime/NEON/functions/NEConcatenateLayer.cpp
+++ b/src/runtime/NEON/functions/NEConcatenateLayer.cpp
@@ -41,12 +41,12 @@ namespace arm_compute
{
namespace experimental
{
-NEConcatenateLayer::NEConcatenateLayer()
+NEConcatenation::NEConcatenation()
: _concat_kernels(), _num_inputs(0), _axis(0)
{
}
-void NEConcatenateLayer::configure(const std::vector<const ITensorInfo *> &inputs_vector, ITensorInfo *output, size_t axis)
+void NEConcatenation::configure(const std::vector<const ITensorInfo *> &inputs_vector, ITensorInfo *output, size_t axis)
{
ARM_COMPUTE_ERROR_ON(output == nullptr);
@@ -100,7 +100,7 @@ void NEConcatenateLayer::configure(const std::vector<const ITensorInfo *> &input
}
}
-Status NEConcatenateLayer::validate(const std::vector<const ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis)
+Status NEConcatenation::validate(const std::vector<const ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
ARM_COMPUTE_RETURN_ERROR_ON(inputs_vector.size() < 2);
@@ -146,12 +146,7 @@ Status NEConcatenateLayer::validate(const std::vector<const ITensorInfo *> &inpu
return Status{};
}
-MemoryRequirements NEConcatenateLayer::workspace() const
-{
- return MemoryRequirements{};
-}
-
-void NEConcatenateLayer::run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace)
+void NEConcatenation::run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace)
{
ARM_COMPUTE_UNUSED(workspace);
@@ -177,11 +172,11 @@ void NEConcatenateLayer::run(InputTensorMap inputs, OutputTensorMap outputs, Ope
struct NEConcatenateLayer::Impl
{
- std::vector<const ITensor *> srcs{};
- ITensor *dst{ nullptr };
- unsigned int num_inputs{ 0 };
- unsigned int axis{ 0 };
- std::unique_ptr<experimental::NEConcatenateLayer> op{ nullptr };
+ std::vector<const ITensor *> srcs{};
+ ITensor *dst{ nullptr };
+ unsigned int num_inputs{ 0 };
+ unsigned int axis{ 0 };
+ std::unique_ptr<experimental::NEConcatenation> op{ nullptr };
};
NEConcatenateLayer::NEConcatenateLayer()
@@ -203,7 +198,7 @@ void NEConcatenateLayer::configure(std::vector<const ITensor *> inputs_vector, I
_impl->dst = output;
_impl->axis = axis;
_impl->num_inputs = inputs_vector.size();
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEConcatenateLayer>();
+ _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEConcatenation>();
std::vector<const ITensorInfo *> inputs_vector_info;
for(unsigned int i = 0; i < inputs_vector.size(); ++i)
@@ -216,7 +211,7 @@ void NEConcatenateLayer::configure(std::vector<const ITensor *> inputs_vector, I
Status NEConcatenateLayer::validate(const std::vector<const ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis)
{
- return experimental::NEConcatenateLayer::validate(inputs_vector, output, axis);
+ return experimental::NEConcatenation::validate(inputs_vector, output, axis);
}
void NEConcatenateLayer::run()
diff --git a/src/runtime/NEON/functions/NEElementwiseOperators.cpp b/src/runtime/NEON/functions/NEElementwiseOperators.cpp
index 28039d68f8..9340cc09d4 100644
--- a/src/runtime/NEON/functions/NEElementwiseOperators.cpp
+++ b/src/runtime/NEON/functions/NEElementwiseOperators.cpp
@@ -34,101 +34,66 @@ namespace arm_compute
{
namespace experimental
{
-void NEElementwiseMax::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info)
+void NEElementwiseMax::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output)
{
- ARM_COMPUTE_UNUSED(act_info);
auto k = arm_compute::support::cpp14::make_unique<NEArithmeticOperationKernel>();
k->configure(ArithmeticOperation::MAX, input1, input2, output);
_kernel = std::move(k);
}
-Status NEElementwiseMax::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
+Status NEElementwiseMax::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
{
- ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
return NEArithmeticOperationKernel::validate(ArithmeticOperation::MAX, input1, input2, output);
}
-MemoryRequirements NEElementwiseMax::workspace() const
-{
- return MemoryRequirements{};
-}
-
-void NEElementwiseMin::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info)
+void NEElementwiseMin::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output)
{
- ARM_COMPUTE_UNUSED(act_info);
auto k = arm_compute::support::cpp14::make_unique<NEArithmeticOperationKernel>();
k->configure(ArithmeticOperation::MIN, input1, input2, output);
_kernel = std::move(k);
}
-Status NEElementwiseMin::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
+Status NEElementwiseMin::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
{
- ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
return NEArithmeticOperationKernel::validate(ArithmeticOperation::MIN, input1, input2, output);
}
-MemoryRequirements NEElementwiseMin::workspace() const
-{
- return MemoryRequirements{};
-}
-
-void NEElementwiseSquaredDiff::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info)
+void NEElementwiseSquaredDiff::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output)
{
- ARM_COMPUTE_UNUSED(act_info);
auto k = arm_compute::support::cpp14::make_unique<NEArithmeticOperationKernel>();
k->configure(ArithmeticOperation::SQUARED_DIFF, input1, input2, output);
_kernel = std::move(k);
}
-Status NEElementwiseSquaredDiff::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
+Status NEElementwiseSquaredDiff::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
{
- ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
return NEArithmeticOperationKernel::validate(ArithmeticOperation::SQUARED_DIFF, input1, input2, output);
}
-MemoryRequirements NEElementwiseSquaredDiff::workspace() const
-{
- return MemoryRequirements{};
-}
-
-void NEElementwiseDivision::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info)
+void NEElementwiseDivision::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output)
{
- ARM_COMPUTE_UNUSED(act_info);
auto k = arm_compute::support::cpp14::make_unique<NEDivisionOperationKernel>();
k->configure(input1, input2, output);
_kernel = std::move(k);
}
-Status NEElementwiseDivision::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
+Status NEElementwiseDivision::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
{
- ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
return NEDivisionOperationKernel::validate(input1, input2, output);
}
-MemoryRequirements NEElementwiseDivision::workspace() const
+void NEElementwisePower::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output)
{
- return MemoryRequirements{};
-}
-
-void NEElementwisePower::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info)
-{
- ARM_COMPUTE_UNUSED(act_info);
auto k = arm_compute::support::cpp14::make_unique<NEPowerOperationKernel>();
k->configure(input1, input2, output);
_kernel = std::move(k);
}
-Status NEElementwisePower::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
+Status NEElementwisePower::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
{
- ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
return NEPowerOperationKernel::validate(input1, input2, output);
}
-MemoryRequirements NEElementwisePower::workspace() const
-{
- return MemoryRequirements{};
-}
-
template <ComparisonOperation COP>
void NEElementwiseComparisonStatic<COP>::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output)
{
@@ -143,12 +108,6 @@ Status NEElementwiseComparisonStatic<COP>::validate(const ITensorInfo *input1, c
return NEComparisonOperationKernel::validate(COP, input1, input2, output);
}
-template <ComparisonOperation COP>
-MemoryRequirements NEElementwiseComparisonStatic<COP>::workspace() const
-{
- return MemoryRequirements{};
-}
-
void NEElementwiseComparison::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output, ComparisonOperation op)
{
auto k = arm_compute::support::cpp14::make_unique<NEComparisonOperationKernel>();
@@ -161,11 +120,6 @@ Status NEElementwiseComparison::validate(const ITensorInfo *input1, const ITenso
return NEComparisonOperationKernel::validate(op, input1, input2, output);
}
-MemoryRequirements NEElementwiseComparison::workspace() const
-{
- return MemoryRequirements{};
-}
-
// Supported Specializations
template class NEElementwiseComparisonStatic<ComparisonOperation::Equal>;
template class NEElementwiseComparisonStatic<ComparisonOperation::NotEqual>;
@@ -193,17 +147,18 @@ NEElementwiseMax::~NEElementwiseMax() = default;
void NEElementwiseMax::configure(ITensor *input1, ITensor *input2, ITensor *output, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_UNUSED(act_info);
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
_impl->op = arm_compute::support::cpp14::make_unique<experimental::NEElementwiseMax>();
- _impl->op->configure(input1->info(), input2->info(), output->info(), act_info);
+ _impl->op->configure(input1->info(), input2->info(), output->info());
}
Status NEElementwiseMax::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
- return experimental::NEElementwiseMax::validate(input1, input2, output, act_info);
+ return experimental::NEElementwiseMax::validate(input1, input2, output);
}
void NEElementwiseMax::run()
@@ -231,17 +186,18 @@ NEElementwiseMin::~NEElementwiseMin() = default;
void NEElementwiseMin::configure(ITensor *input1, ITensor *input2, ITensor *output, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_UNUSED(act_info);
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
_impl->op = arm_compute::support::cpp14::make_unique<experimental::NEElementwiseMin>();
- _impl->op->configure(input1->info(), input2->info(), output->info(), act_info);
+ _impl->op->configure(input1->info(), input2->info(), output->info());
}
Status NEElementwiseMin::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
- return experimental::NEElementwiseMin::validate(input1, input2, output, act_info);
+ return experimental::NEElementwiseMin::validate(input1, input2, output);
}
void NEElementwiseMin::run()
@@ -269,17 +225,18 @@ NEElementwiseSquaredDiff::~NEElementwiseSquaredDiff()
void NEElementwiseSquaredDiff::configure(ITensor *input1, ITensor *input2, ITensor *output, const ActivationLayerInfo &act_info)
{
+ ARM_COMPUTE_UNUSED(act_info);
_impl->src_0 = input1;
_impl->src_1 = input2;
_impl->dst = output;
_impl->op = arm_compute::support::cpp14::make_unique<experimental::NEElementwiseSquaredDiff>();
- _impl->op->configure(input1->info(), input2->info(), output->info(), act_info);
+ _impl->op->configure(input1->info(), input2->info(), output->info());
}
Status NEElementwiseSquaredDiff::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
- return experimental::NEElementwiseSquaredDiff::validate(input1, input2, output, act_info);
+ return experimental::NEElementwiseSquaredDiff::validate(input1, input2, output);
}
void NEElementwiseSquaredDiff::run()
@@ -312,13 +269,13 @@ void NEElementwiseDivision::configure(ITensor *input1, ITensor *input2, ITensor
_impl->src_1 = input2;
_impl->dst = output;
_impl->op = arm_compute::support::cpp14::make_unique<experimental::NEElementwiseDivision>();
- _impl->op->configure(input1->info(), input2->info(), output->info(), act_info);
+ _impl->op->configure(input1->info(), input2->info(), output->info());
}
Status NEElementwiseDivision::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
- return experimental::NEElementwiseDivision::validate(input1, input2, output, act_info);
+ return experimental::NEElementwiseDivision::validate(input1, input2, output);
}
void NEElementwiseDivision::run()
@@ -351,13 +308,13 @@ void NEElementwisePower::configure(ITensor *input1, ITensor *input2, ITensor *ou
_impl->src_1 = input2;
_impl->dst = output;
_impl->op = arm_compute::support::cpp14::make_unique<experimental::NEElementwisePower>();
- _impl->op->configure(input1->info(), input2->info(), output->info(), act_info);
+ _impl->op->configure(input1->info(), input2->info(), output->info());
}
Status NEElementwisePower::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
- return experimental::NEElementwisePower::validate(input1, input2, output, act_info);
+ return experimental::NEElementwisePower::validate(input1, input2, output);
}
void NEElementwisePower::run()
diff --git a/src/runtime/NEON/functions/NEPReluLayer.cpp b/src/runtime/NEON/functions/NEPReluLayer.cpp
index c54c70615a..15d9fd9959 100644
--- a/src/runtime/NEON/functions/NEPReluLayer.cpp
+++ b/src/runtime/NEON/functions/NEPReluLayer.cpp
@@ -31,30 +31,25 @@ namespace arm_compute
{
namespace experimental
{
-void NEPReluLayer::configure(const ITensorInfo *input, const ITensorInfo *alpha, ITensorInfo *output)
+void NEPRelu::configure(const ITensorInfo *input, const ITensorInfo *alpha, ITensorInfo *output)
{
auto k = arm_compute::support::cpp14::make_unique<NEArithmeticOperationKernel>();
k->configure(ArithmeticOperation::PRELU, input, alpha, output);
_kernel = std::move(k);
}
-Status NEPReluLayer::validate(const ITensorInfo *input, const ITensorInfo *alpha, const ITensorInfo *output)
+Status NEPRelu::validate(const ITensorInfo *input, const ITensorInfo *alpha, const ITensorInfo *output)
{
return NEArithmeticOperationKernel::validate(ArithmeticOperation::PRELU, input, alpha, output);
}
-
-MemoryRequirements NEPReluLayer::workspace() const
-{
- return MemoryRequirements{};
-}
} // nsamespace experimental
struct NEPReluLayer::Impl
{
- const ITensor *src_0{ nullptr };
- const ITensor *src_1{ nullptr };
- ITensor *dst{ nullptr };
- std::unique_ptr<experimental::NEPReluLayer> op{ nullptr };
+ const ITensor *src_0{ nullptr };
+ const ITensor *src_1{ nullptr };
+ ITensor *dst{ nullptr };
+ std::unique_ptr<experimental::NEPRelu> op{ nullptr };
};
NEPReluLayer::NEPReluLayer()
@@ -70,7 +65,7 @@ void NEPReluLayer::configure(const ITensor *input, const ITensor *alpha, ITensor
_impl->src_0 = input;
_impl->src_1 = alpha;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEPReluLayer>();
+ _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEPRelu>();
_impl->op->configure(input->info(), alpha->info(), output->info());
}
@@ -83,6 +78,6 @@ void NEPReluLayer::run()
Status NEPReluLayer::validate(const ITensorInfo *input, const ITensorInfo *alpha, const ITensorInfo *output)
{
- return experimental::NEPReluLayer::validate(input, alpha, output);
+ return experimental::NEPRelu::validate(input, alpha, output);
}
} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp b/src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp
index 3d51e0d0fd..ba5dd7cdee 100644
--- a/src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp
+++ b/src/runtime/NEON/functions/NEPixelWiseMultiplication.cpp
@@ -48,11 +48,6 @@ Status NEPixelWiseMultiplication::validate(const ITensorInfo *input1, const ITen
return NEPixelWiseMultiplicationKernel::validate(input1, input2, output, scale, overflow_policy, rounding_policy);
}
-MemoryRequirements NEPixelWiseMultiplication::workspace() const
-{
- return MemoryRequirements{};
-}
-
void NEComplexPixelWiseMultiplication::configure(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info)
{
ARM_COMPUTE_UNUSED(act_info);
@@ -66,11 +61,6 @@ Status NEComplexPixelWiseMultiplication::validate(const ITensorInfo *input1, con
ARM_COMPUTE_RETURN_ERROR_ON(act_info.enabled());
return NEComplexPixelWiseMultiplicationKernel::validate(input1, input2, output);
}
-
-MemoryRequirements NEComplexPixelWiseMultiplication::workspace() const
-{
- return MemoryRequirements{};
-}
} // namespace experimental
struct NEPixelWiseMultiplication::Impl
diff --git a/src/runtime/NEON/functions/NEReshapeLayer.cpp b/src/runtime/NEON/functions/NEReshapeLayer.cpp
index 101fdbdf4d..47d5519274 100644
--- a/src/runtime/NEON/functions/NEReshapeLayer.cpp
+++ b/src/runtime/NEON/functions/NEReshapeLayer.cpp
@@ -35,29 +35,24 @@ namespace arm_compute
{
namespace experimental
{
-void NEReshapeLayer::configure(const ITensorInfo *input, ITensorInfo *output)
+void NEReshape::configure(const ITensorInfo *input, ITensorInfo *output)
{
auto k = arm_compute::support::cpp14::make_unique<NEReshapeLayerKernel>();
k->configure(input, output);
_kernel = std::move(k);
}
-Status NEReshapeLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
+Status NEReshape::validate(const ITensorInfo *input, const ITensorInfo *output)
{
return arm_compute::NEReshapeLayerKernel::validate(input, output);
}
-
-MemoryRequirements NEReshapeLayer::workspace() const
-{
- return MemoryRequirements{};
-}
} // namespace experimental
struct NEReshapeLayer::Impl
{
- const ITensor *src{ nullptr };
- ITensor *dst{ nullptr };
- std::unique_ptr<experimental::NEReshapeLayer> op{ nullptr };
+ const ITensor *src{ nullptr };
+ ITensor *dst{ nullptr };
+ std::unique_ptr<experimental::NEReshape> op{ nullptr };
};
NEReshapeLayer::NEReshapeLayer()
@@ -75,14 +70,14 @@ void NEReshapeLayer::configure(const ITensor *input, ITensor *output)
{
_impl->src = input;
_impl->dst = output;
- _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEReshapeLayer>();
+ _impl->op = arm_compute::support::cpp14::make_unique<experimental::NEReshape>();
_impl->op->configure(input->info(), output->info());
}
Status NEReshapeLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
- ARM_COMPUTE_RETURN_ON_ERROR(experimental::NEReshapeLayer::validate(input, output));
+ ARM_COMPUTE_RETURN_ON_ERROR(experimental::NEReshape::validate(input, output));
return Status{};
}
diff --git a/src/runtime/NEON/functions/NESlice.cpp b/src/runtime/NEON/functions/NESlice.cpp
index 15fbe8d432..7c3252178b 100644
--- a/src/runtime/NEON/functions/NESlice.cpp
+++ b/src/runtime/NEON/functions/NESlice.cpp
@@ -62,11 +62,6 @@ Status NESlice::validate(const ITensorInfo *input, const ITensorInfo *output, co
return NEStridedSliceKernel::validate(input, output, starts, ends, BiStrides(), 0, slice_end_mask, 0);
}
-
-MemoryRequirements NESlice::workspace() const
-{
- return MemoryRequirements{};
-}
} // namespace experimental
struct NESlice::Impl
diff --git a/src/runtime/NEON/functions/NEStridedSlice.cpp b/src/runtime/NEON/functions/NEStridedSlice.cpp
index 243c01780c..37e3590446 100644
--- a/src/runtime/NEON/functions/NEStridedSlice.cpp
+++ b/src/runtime/NEON/functions/NEStridedSlice.cpp
@@ -47,11 +47,6 @@ Status NEStridedSlice::validate(const ITensorInfo *input, const ITensorInfo *out
{
return NEStridedSliceKernel::validate(input, output, starts, ends, strides, begin_mask, end_mask, shrink_axis_mask);
}
-
-MemoryRequirements NEStridedSlice::workspace() const
-{
- return MemoryRequirements{};
-}
} // namespace experimental
struct NEStridedSlice::Impl