aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute/core')
-rw-r--r--arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h48
-rw-r--r--arm_compute/core/utils/misc/InfoHelpers.h10
2 files changed, 29 insertions, 29 deletions
diff --git a/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h b/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
index bb98eb83bf..86159fc915 100644
--- a/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
+++ b/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
@@ -62,16 +62,16 @@ public:
* - (QSYMM16,QSYMM16) -> QSYMM16
* - (QSYMM16,QSYMM16) -> S32
*
- * @param[in] input1 An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
- * @param[in] input2 An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
- * @param[out] output The output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
+ * @param[in] input1 An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
+ * @param[in] input2 An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
+ * @param[out] output The output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
* @param[in] scale Scale to apply after multiplication.
* Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
* @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
* @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
* @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*/
- void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float scale,
+ void configure(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, float scale,
ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
/** Initialise the kernel's input, output and border mode.
*
@@ -90,16 +90,16 @@ public:
* - (QSYMM16,QSYMM16) -> S32
*
* @param[in] compile_context The compile context to be used.
- * @param[in] input1 An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
- * @param[in] input2 An input tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
- * @param[out] output The output tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
+ * @param[in] input1 An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
+ * @param[in] input2 An input tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
+ * @param[out] output The output tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/F16/F32.
* @param[in] scale Scale to apply after multiplication.
* Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
* @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
* @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
* @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float scale,
+ void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, float scale,
ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
/** Static function to check if given info will lead to a valid configuration of @ref CLPixelWiseMultiplicationKernel
*
@@ -132,13 +132,13 @@ public:
ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
// Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
+ void run_op(const InputTensorMap &inputs, const OutputTensorMap &outputs, const Window &window, cl::CommandQueue &queue) override;
BorderSize border_size() const override;
private:
- const ICLTensor *_input1;
- const ICLTensor *_input2;
- ICLTensor *_output;
+ const ITensorInfo *_input1;
+ const ITensorInfo *_input2;
+ ITensorInfo *_output;
};
/** Interface for the complex pixelwise multiplication kernel. */
@@ -157,21 +157,21 @@ public:
CLComplexPixelWiseMultiplicationKernel &operator=(CLComplexPixelWiseMultiplicationKernel &&) = default;
/** Initialise the kernel's input, output and border mode.
*
- * @param[in] input1 An input tensor. Data types supported: F32. Number of channels supported: 2.
- * @param[in] input2 An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
- * @param[out] output The output tensor, Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ * @param[in] input1 An input tensor info. Data types supported: F32. Number of channels supported: 2.
+ * @param[in] input2 An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ * @param[out] output The output tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
* @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*/
- void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+ void configure(ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
/** Initialise the kernel's input, output and border mode.
*
* @param[in] compile_context The compile context to be used.
- * @param[in] input1 An input tensor. Data types supported: F32. Number of channels supported: 2.
- * @param[in] input2 An input tensor. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
- * @param[out] output The output tensor, Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ * @param[in] input1 An input tensor info. Data types supported: F32. Number of channels supported: 2.
+ * @param[in] input2 An input tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
+ * @param[out] output The output tensor info. Data types supported: same as @p input1. Number of channels supported: same as @p input1.
* @param[in] act_info (Optional) Activation layer information in case of a fused activation.
*/
- void configure(const CLCompileContext &compile_context, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
+ void configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
/** Static function to check if given info will lead to a valid configuration of @ref CLComplexPixelWiseMultiplicationKernel
*
* @param[in] input1 An input tensor info. Data types supported: F32. Number of channels supported: 2.
@@ -184,13 +184,13 @@ public:
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
// Inherited methods overridden:
- void run(const Window &window, cl::CommandQueue &queue) override;
+ void run_op(const InputTensorMap &inputs, const OutputTensorMap &outputs, const Window &window, cl::CommandQueue &queue) override;
BorderSize border_size() const override;
private:
- const ICLTensor *_input1;
- const ICLTensor *_input2;
- ICLTensor *_output;
+ const ITensorInfo *_input1;
+ const ITensorInfo *_input2;
+ ITensorInfo *_output;
};
} // namespace arm_compute
#endif /*ARM_COMPUTE_CLPIXELWISEMULTIPLICATIONKERNEL_H */
diff --git a/arm_compute/core/utils/misc/InfoHelpers.h b/arm_compute/core/utils/misc/InfoHelpers.h
index ffde82bce2..ced0d24b56 100644
--- a/arm_compute/core/utils/misc/InfoHelpers.h
+++ b/arm_compute/core/utils/misc/InfoHelpers.h
@@ -86,7 +86,7 @@ inline void build_lstm_params_tensor_info(const LSTMParams<T> &lstm_params,
{
ARM_COMPUTE_ERROR_ON_NULLPTR(lstm_params.input_to_input_weights(), lstm_params.recurrent_to_input_weights(), lstm_params.input_gate_bias());
- const ITensorInfo *cell_to_input_weights_info = (lstm_params.has_peephole_opt()) ? lstm_params.cell_to_input_weights()->info() : nullptr;
+ ITensorInfo *cell_to_input_weights_info = (lstm_params.has_peephole_opt()) ? lstm_params.cell_to_input_weights()->info() : nullptr;
lstm_params_info->set_cifg_params(lstm_params.input_to_input_weights()->info(), lstm_params.recurrent_to_input_weights()->info(),
cell_to_input_weights_info, lstm_params.input_gate_bias()->info());
}
@@ -100,10 +100,10 @@ inline void build_lstm_params_tensor_info(const LSTMParams<T> &lstm_params,
ARM_COMPUTE_ERROR_ON_NULLPTR(lstm_params.input_layer_norm_weights());
}
- const ITensorInfo *forget_info = lstm_params.forget_layer_norm_weights()->info();
- const ITensorInfo *cell_info = lstm_params.cell_layer_norm_weights()->info();
- const ITensorInfo *output_info = lstm_params.output_layer_norm_weights()->info();
- const ITensorInfo *input_info = lstm_params.has_cifg_opt() ? nullptr : lstm_params.input_layer_norm_weights()->info();
+ ITensorInfo *forget_info = lstm_params.forget_layer_norm_weights()->info();
+ ITensorInfo *cell_info = lstm_params.cell_layer_norm_weights()->info();
+ ITensorInfo *output_info = lstm_params.output_layer_norm_weights()->info();
+ ITensorInfo *input_info = lstm_params.has_cifg_opt() ? nullptr : lstm_params.input_layer_norm_weights()->info();
lstm_params_info->set_layer_normalization_params(input_info, forget_info, cell_info, output_info);
}