aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'arm_compute/runtime')
-rw-r--r--arm_compute/runtime/CL/CLScheduler.h7
-rw-r--r--arm_compute/runtime/CL/CLTuner.h7
-rw-r--r--arm_compute/runtime/CL/ICLOperator.h4
-rw-r--r--arm_compute/runtime/CL/ICLTuner.h5
-rw-r--r--arm_compute/runtime/CL/functions/CLConcatenateLayer.h2
-rw-r--r--arm_compute/runtime/CL/functions/CLElementwiseOperations.h14
-rw-r--r--arm_compute/runtime/CL/functions/CLPReluLayer.h2
-rw-r--r--arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h4
-rw-r--r--arm_compute/runtime/CL/tuners/BifrostTuner.h2
-rw-r--r--arm_compute/runtime/CL/tuners/MidgardTuner.h2
-rw-r--r--arm_compute/runtime/CPP/CPPScheduler.h4
-rw-r--r--arm_compute/runtime/IOperator.h9
-rw-r--r--arm_compute/runtime/IScheduler.h5
-rw-r--r--arm_compute/runtime/NEON/INEOperator.h5
-rw-r--r--arm_compute/runtime/NEON/functions/NEConcatenateLayer.h2
-rw-r--r--arm_compute/runtime/OMP/OMPScheduler.h5
-rw-r--r--arm_compute/runtime/SingleThreadScheduler.h5
17 files changed, 39 insertions, 45 deletions
diff --git a/arm_compute/runtime/CL/CLScheduler.h b/arm_compute/runtime/CL/CLScheduler.h
index 93595c65c7..8a22832792 100644
--- a/arm_compute/runtime/CL/CLScheduler.h
+++ b/arm_compute/runtime/CL/CLScheduler.h
@@ -76,11 +76,10 @@ public:
/** Schedule the execution of the passed kernel if possible.
*
* @param[in] kernel Kernel to execute.
- * @param[in] inputs Vector containing the input tensors.
- * @param[in] outputs Vector containing the output tensors.
+ * @param[in] tensors Vector containing the tensors to operate on.
* @param[in] flush (Optional) Specifies if the command queue will be flushed after running the kernel.
*/
- void enqueue_op(ICLKernel &kernel, const InputTensorMap &inputs, const OutputTensorMap &outputs, bool flush = true);
+ void enqueue_op(ICLKernel &kernel, ITensorPack &tensors, bool flush = true);
/** Initialises the context and command queue to be used by the scheduler.
*
@@ -152,7 +151,7 @@ public:
bool is_initialised() const;
private:
- void enqueue_common(ICLKernel &kernel, const InputTensorMap &inputs, const OutputTensorMap &outputs, bool flush);
+ void enqueue_common(ICLKernel &kernel, ITensorPack &tensors, bool flush);
/** Flag to ensure symbols initialisation is happening before Scheduler creation */
static std::once_flag _initialize_symbols;
diff --git a/arm_compute/runtime/CL/CLTuner.h b/arm_compute/runtime/CL/CLTuner.h
index aa31181d2d..3b45a2177e 100644
--- a/arm_compute/runtime/CL/CLTuner.h
+++ b/arm_compute/runtime/CL/CLTuner.h
@@ -116,7 +116,7 @@ public:
// Inherited methods overridden:
void tune_kernel_static(ICLKernel &kernel) override;
void tune_kernel_dynamic(ICLKernel &kernel) override;
- void tune_kernel_dynamic(ICLKernel &kernel, const InputTensorMap &inputs, const OutputTensorMap &outputs) override;
+ void tune_kernel_dynamic(ICLKernel &kernel, ITensorPack &tensors) override;
/** Is the kernel_event set ?
*
@@ -127,11 +127,12 @@ public:
private:
/** Find optimal LWS using brute-force approach
*
- * @param[in] kernel OpenCL kernel to be tuned with LWS
+ * @param[in] kernel OpenCL kernel to be tuned with LWS
+ * @param[in,out] tensors Tensors for the kernel to operate on
*
* @return The optimal LWS to use
*/
- cl::NDRange find_optimal_lws(ICLKernel &kernel, const InputTensorMap &inputs, const OutputTensorMap &outputs);
+ cl::NDRange find_optimal_lws(ICLKernel &kernel, ITensorPack &tensors);
std::unordered_map<std::string, cl::NDRange> _lws_table;
cl::Event _kernel_event;
diff --git a/arm_compute/runtime/CL/ICLOperator.h b/arm_compute/runtime/CL/ICLOperator.h
index 2d6c96e815..526b7e93e9 100644
--- a/arm_compute/runtime/CL/ICLOperator.h
+++ b/arm_compute/runtime/CL/ICLOperator.h
@@ -54,8 +54,8 @@ public:
ICLOperator &operator=(ICLOperator &&) = default;
// Inherited methods overridden:
- void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override;
- void prepare(OperatorTensorMap constants) override;
+ void run(ITensorPack &tensors) override;
+ void prepare(ITensorPack &constants) override;
MemoryRequirements workspace() const override;
protected:
diff --git a/arm_compute/runtime/CL/ICLTuner.h b/arm_compute/runtime/CL/ICLTuner.h
index 4bc8ddf632..0f951c384e 100644
--- a/arm_compute/runtime/CL/ICLTuner.h
+++ b/arm_compute/runtime/CL/ICLTuner.h
@@ -54,10 +54,9 @@ public:
/** Tune OpenCL kernel dynamically
*
* @param[in] kernel Kernel to tune
- * @param[in] inputs Inputs for the kernel to use
- * @param[in, out] outputs Outputs for the kernel to use
+ * @param[in, out] tensors Tensors for the kernel to use
*/
- virtual void tune_kernel_dynamic(ICLKernel &kernel, const InputTensorMap &inputs, const OutputTensorMap &outputs) = 0;
+ virtual void tune_kernel_dynamic(ICLKernel &kernel, ITensorPack &tensors) = 0;
};
} // namespace arm_compute
#endif /*ARM_COMPUTE_ICLTUNER_H */
diff --git a/arm_compute/runtime/CL/functions/CLConcatenateLayer.h b/arm_compute/runtime/CL/functions/CLConcatenateLayer.h
index 99a2053a5a..f535c8ea97 100644
--- a/arm_compute/runtime/CL/functions/CLConcatenateLayer.h
+++ b/arm_compute/runtime/CL/functions/CLConcatenateLayer.h
@@ -144,7 +144,7 @@ public:
static Status validate(const std::vector<const ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis);
// Inherited methods overridden:
- void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override;
+ void run(ITensorPack &tensors) override;
private:
std::vector<std::unique_ptr<ICLKernel>> _concat_kernels;
diff --git a/arm_compute/runtime/CL/functions/CLElementwiseOperations.h b/arm_compute/runtime/CL/functions/CLElementwiseOperations.h
index 5af24c90ac..2d9d43863d 100644
--- a/arm_compute/runtime/CL/functions/CLElementwiseOperations.h
+++ b/arm_compute/runtime/CL/functions/CLElementwiseOperations.h
@@ -98,7 +98,7 @@ public:
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
// Inherited methods overridden:
- void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override;
+ void run(ITensorPack &tensors) override;
private:
CLFillBorderKernel _border_handler;
@@ -168,7 +168,7 @@ public:
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
// Inherited methods overridden:
- void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override;
+ void run(ITensorPack &tensors) override;
private:
CLFillBorderKernel _border_handler;
@@ -207,7 +207,7 @@ public:
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
// Inherited methods overridden:
- void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override;
+ void run(ITensorPack &tensors) override;
private:
CLFillBorderKernel _border_handler;
@@ -246,7 +246,7 @@ public:
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
// Inherited methods overridden:
- void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override;
+ void run(ITensorPack &tensors) override;
private:
CLFillBorderKernel _border_handler;
@@ -285,7 +285,7 @@ public:
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
// Inherited methods overridden:
- void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override;
+ void run(ITensorPack &tensors) override;
private:
CLFillBorderKernel _border_handler;
@@ -324,7 +324,7 @@ public:
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
// Inherited methods overridden:
- void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override;
+ void run(ITensorPack &tensors) override;
private:
CLFillBorderKernel _border_handler;
@@ -363,7 +363,7 @@ public:
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
// Inherited methods overridden:
- void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override;
+ void run(ITensorPack &tensors) override;
private:
CLFillBorderKernel _border_handler;
diff --git a/arm_compute/runtime/CL/functions/CLPReluLayer.h b/arm_compute/runtime/CL/functions/CLPReluLayer.h
index 08567cccfb..84743508df 100644
--- a/arm_compute/runtime/CL/functions/CLPReluLayer.h
+++ b/arm_compute/runtime/CL/functions/CLPReluLayer.h
@@ -64,7 +64,7 @@ public:
static Status validate(const ITensorInfo *input, const ITensorInfo *alpha, const ITensorInfo *output);
// Inherited methods overridden:
- void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override;
+ void run(ITensorPack &tensors) override;
private:
CLFillBorderKernel _border_handler;
diff --git a/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h b/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h
index ca8d77e6b7..2066012306 100644
--- a/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h
+++ b/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h
@@ -103,7 +103,7 @@ public:
ConvertPolicy overflow_policy, RoundingPolicy rounding_policy, const ActivationLayerInfo &act_info = ActivationLayerInfo());
// Inherited methods overridden:
- void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override;
+ void run(ITensorPack &tensors) override;
private:
CLFillBorderKernel _border_handler;
@@ -136,7 +136,7 @@ public:
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo());
// Inherited methods overridden:
- void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override;
+ void run(ITensorPack &tensors) override;
private:
CLFillBorderKernel _border_handler;
diff --git a/arm_compute/runtime/CL/tuners/BifrostTuner.h b/arm_compute/runtime/CL/tuners/BifrostTuner.h
index 830f7d9067..237693fb88 100644
--- a/arm_compute/runtime/CL/tuners/BifrostTuner.h
+++ b/arm_compute/runtime/CL/tuners/BifrostTuner.h
@@ -37,7 +37,7 @@ public:
// Inherited overriden methods
void tune_kernel_static(ICLKernel &kernel) override;
void tune_kernel_dynamic(ICLKernel &kernel) override;
- void tune_kernel_dynamic(ICLKernel &kernel, const InputTensorMap &inputs, const OutputTensorMap &outputs) override;
+ void tune_kernel_dynamic(ICLKernel &kernel, ITensorPack &tensors) override;
};
} // namespace tuners
} // namespace arm_compute
diff --git a/arm_compute/runtime/CL/tuners/MidgardTuner.h b/arm_compute/runtime/CL/tuners/MidgardTuner.h
index c702e7a2aa..86d46044c2 100644
--- a/arm_compute/runtime/CL/tuners/MidgardTuner.h
+++ b/arm_compute/runtime/CL/tuners/MidgardTuner.h
@@ -37,7 +37,7 @@ public:
// Inherited overriden methods
void tune_kernel_static(ICLKernel &kernel) override;
void tune_kernel_dynamic(ICLKernel &kernel) override;
- void tune_kernel_dynamic(ICLKernel &kernel, const InputTensorMap &inputs, const OutputTensorMap &outputs) override;
+ void tune_kernel_dynamic(ICLKernel &kernel, ITensorPack &tensors) override;
};
} // namespace tuners
} // namespace arm_compute
diff --git a/arm_compute/runtime/CPP/CPPScheduler.h b/arm_compute/runtime/CPP/CPPScheduler.h
index 9d55ed448e..e8ad427eba 100644
--- a/arm_compute/runtime/CPP/CPPScheduler.h
+++ b/arm_compute/runtime/CPP/CPPScheduler.h
@@ -52,7 +52,7 @@ public:
void set_num_threads_with_affinity(unsigned int num_threads, BindFunc func) override;
unsigned int num_threads() const override;
void schedule(ICPPKernel *kernel, const Hints &hints) override;
- void schedule_op(ICPPKernel *kernel, const Hints &hints, const InputTensorMap &inputs, const OutputTensorMap &outputs) override;
+ void schedule_op(ICPPKernel *kernel, const Hints &hints, ITensorPack &tensors) override;
protected:
/** Will run the workloads in parallel using num_threads
@@ -62,7 +62,7 @@ protected:
void run_workloads(std::vector<Workload> &workloads) override;
private:
- void schedule_common(ICPPKernel *kernel, const Hints &hints, const InputTensorMap &inputs, const OutputTensorMap &outputs);
+ void schedule_common(ICPPKernel *kernel, const Hints &hints, ITensorPack &tensors);
struct Impl;
std::unique_ptr<Impl> _impl;
};
diff --git a/arm_compute/runtime/IOperator.h b/arm_compute/runtime/IOperator.h
index d72fca4399..e7952bb748 100644
--- a/arm_compute/runtime/IOperator.h
+++ b/arm_compute/runtime/IOperator.h
@@ -40,13 +40,10 @@ public:
virtual ~IOperator() = default;
/** Run the kernels contained in the function
*
- *
- * @param[in] inputs Vector that contains the input tensors.
- * @param[in] outputs Vector that contains the output tensors.
- * @param[in] workspace Vector that contains the workspace tensors.
+ * @param[in] tensors Vector that contains the tensors to operate on.
*
*/
- virtual void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) = 0;
+ virtual void run(ITensorPack &tensors) = 0;
/** Prepare the function for executing
*
* Any one off pre-processing step required by the function is handled here
@@ -55,7 +52,7 @@ public:
*
* @note Prepare stage might not need all the function's buffers' backing memory to be available in order to execute
*/
- virtual void prepare(OperatorTensorMap constants) = 0;
+ virtual void prepare(ITensorPack &constants) = 0;
/** Return the memory requirements required by the workspace
*/
diff --git a/arm_compute/runtime/IScheduler.h b/arm_compute/runtime/IScheduler.h
index fff77274bd..98627538e8 100644
--- a/arm_compute/runtime/IScheduler.h
+++ b/arm_compute/runtime/IScheduler.h
@@ -168,10 +168,9 @@ public:
*
* @param[in] kernel Kernel to execute.
* @param[in] hints Hints for the scheduler.
- * @param[in] inputs Vector containing the input tensors.
- * @param[in] outputs Vector containing the output tensors.
+ * @param[in] tensors Vector containing the tensors to operate on.
*/
- virtual void schedule_op(ICPPKernel *kernel, const Hints &hints, const InputTensorMap &inputs, const OutputTensorMap &outputs) = 0;
+ virtual void schedule_op(ICPPKernel *kernel, const Hints &hints, ITensorPack &tensors) = 0;
/** Execute all the passed workloads
*
diff --git a/arm_compute/runtime/NEON/INEOperator.h b/arm_compute/runtime/NEON/INEOperator.h
index f91305543f..415e767eec 100644
--- a/arm_compute/runtime/NEON/INEOperator.h
+++ b/arm_compute/runtime/NEON/INEOperator.h
@@ -24,6 +24,7 @@
#ifndef ARM_COMPUTE_INEOPERATOR_H
#define ARM_COMPUTE_INEOPERATOR_H
+#include "../../core/ITensor.h"
#include "arm_compute/core/NEON/INEKernel.h"
#include "arm_compute/runtime/IOperator.h"
#include "arm_compute/runtime/IRuntimeContext.h"
@@ -54,8 +55,8 @@ public:
INEOperator &operator=(INEOperator &&) = default;
// Inherited methods overridden:
- void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override;
- void prepare(OperatorTensorMap constants) override;
+ void run(ITensorPack &tensors) override;
+ void prepare(ITensorPack &constants) override;
MemoryRequirements workspace() const override;
protected:
diff --git a/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
index 73c62330c5..1d703ae729 100644
--- a/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEConcatenateLayer.h
@@ -132,7 +132,7 @@ public:
static Status validate(const std::vector<const ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis);
// Inherited methods overridden:
- void run(InputTensorMap inputs, OutputTensorMap outputs, OperatorTensorMap workspace) override;
+ void run(ITensorPack &tensors) override;
private:
std::vector<std::unique_ptr<INEKernel>> _concat_kernels;
diff --git a/arm_compute/runtime/OMP/OMPScheduler.h b/arm_compute/runtime/OMP/OMPScheduler.h
index 56bd6baaa6..56a31cc076 100644
--- a/arm_compute/runtime/OMP/OMPScheduler.h
+++ b/arm_compute/runtime/OMP/OMPScheduler.h
@@ -63,10 +63,9 @@ public:
*
* @param[in] kernel Kernel to execute.
* @param[in] hints Hints for the scheduler.
- * @param[in] inputs Vector containing the input tensors.
- * @param[in] outputs Vector containing the output tensors.
+ * @param[in] tensors Vector containing the tensors to operate on.
*/
- void schedule_op(ICPPKernel *kernel, const Hints &hints, const InputTensorMap &inputs, const OutputTensorMap &outputs) override;
+ void schedule_op(ICPPKernel *kernel, const Hints &hints, ITensorPack &tensors) override;
protected:
/** Execute all the passed workloads
diff --git a/arm_compute/runtime/SingleThreadScheduler.h b/arm_compute/runtime/SingleThreadScheduler.h
index 42fc742062..d45730e499 100644
--- a/arm_compute/runtime/SingleThreadScheduler.h
+++ b/arm_compute/runtime/SingleThreadScheduler.h
@@ -54,10 +54,9 @@ public:
*
* @param[in] kernel Kernel to execute.
* @param[in] hints Hints for the scheduler.
- * @param[in] inputs Vector containing the input tensors.
- * @param[in] outputs Vector containing the output tensors.
+ * @param[in] tensors Vector containing the tensors to operate on.
*/
- void schedule_op(ICPPKernel *kernel, const Hints &hints, const InputTensorMap &inputs, const OutputTensorMap &outputs) override;
+ void schedule_op(ICPPKernel *kernel, const Hints &hints, ITensorPack &tensors) override;
protected:
/** Will run the workloads sequentially and in order.