aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/runtime
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2018-03-14 17:55:27 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:49:16 +0000
commit7fad9b1d00f3ee1488ba4038d1371f6ea219f8b7 (patch)
treeded71e1cfa8e0c085f8bce5dfc26a99786d60e52 /arm_compute/runtime
parent1562be3e8a449360a90af75f6f1481a30d41be75 (diff)
downloadComputeLibrary-7fad9b1d00f3ee1488ba4038d1371f6ea219f8b7.tar.gz
COMPMID-1021: CPUInfo refactoring.
Removed CPUTarget in favor of the CPUModel type. CPUInfo now holds a vector of N CPUs. CPUInfo autoinitialise upon construction with 1 GENERIC CPU. CPPScheduler fills CPUInfo's vector upon construction (runtime). IScheduler has a single CPUInfo obj and ThreadInfo always gets a pointer to it (avoid copying the vector) Change-Id: I30f293258c959c87f6bac5eac8b963beb6a4d365 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/124626 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'arm_compute/runtime')
-rw-r--r--arm_compute/runtime/CPUUtils.h44
-rw-r--r--arm_compute/runtime/IScheduler.h10
-rw-r--r--arm_compute/runtime/NEON/AssemblyHelper.h80
3 files changed, 71 insertions, 63 deletions
diff --git a/arm_compute/runtime/CPUUtils.h b/arm_compute/runtime/CPUUtils.h
new file mode 100644
index 0000000000..70211a5817
--- /dev/null
+++ b/arm_compute/runtime/CPUUtils.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_RUNTIME_CPU_UTILS_H__
+#define __ARM_COMPUTE_RUNTIME_CPU_UTILS_H__
+
+namespace arm_compute
+{
+class CPUInfo;
+/** This function will try to detect the CPU configuration on the system and will fill
+ * the cpuinfo object accordingly to reflect this.
+ *
+ * @param[out] cpuinfo @ref CPUInfo to be used to hold the system's cpu configuration.
+ */
+void get_cpu_configuration(CPUInfo &cpuinfo);
+/** Some systems have both big and small cores, this fuction computes the minimum number of cores
+ * that are exactly the same on the system. To maximize performance the library attempts to process
+ * workloads concurrently using as many threads as big cores are available on the system.
+ *
+ * @return The minumum number of common cores.
+ */
+unsigned int get_threads_hint();
+}
+#endif /* __ARM_COMPUTE_RUNTIME_CPU_UTILS_H__ */
diff --git a/arm_compute/runtime/IScheduler.h b/arm_compute/runtime/IScheduler.h
index 1dd7c2cfb2..a0bcada722 100644
--- a/arm_compute/runtime/IScheduler.h
+++ b/arm_compute/runtime/IScheduler.h
@@ -59,17 +59,11 @@ public:
*/
virtual void schedule(ICPPKernel *kernel, unsigned int split_dimension) = 0;
- /** Sets the target CPU architecture.
- *
- * @param[in] target Target CPU.
- */
- void set_target(CPUTarget target);
-
/** Get CPU info.
*
* @return CPU info.
*/
- CPUInfo cpu_info() const;
+ CPUInfo &cpu_info();
/** Get a hint for the best possible number of execution threads
*
* @warning In case we can't work out the best number of threads,
@@ -80,7 +74,7 @@ public:
unsigned int num_threads_hint() const;
protected:
- CPUInfo _info{};
+ CPUInfo _cpu_info;
private:
unsigned int _num_threads_hint = {};
diff --git a/arm_compute/runtime/NEON/AssemblyHelper.h b/arm_compute/runtime/NEON/AssemblyHelper.h
index e2d27cf941..40f28587c2 100644
--- a/arm_compute/runtime/NEON/AssemblyHelper.h
+++ b/arm_compute/runtime/NEON/AssemblyHelper.h
@@ -127,70 +127,32 @@ inline void allocate_workspace(size_t workspace_size, Tensor &workspace, MemoryG
/** Create a wrapper kernel.
*
- * @param[in] a Input tensor A.
- * @param[in] b Input tensor B.
- * @param[in] c (Optional) Input tensor C.
- * @param[out] d Output tensor.
- * @param[in] alpha Alpha value.
- * @param[in] beta Beta value.
- *
- * @return the wrapper kernel.
- */
-template <typename T>
-std::unique_ptr<NEGEMMAssemblyWrapper<T>> create_wrapper_kernel(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta)
-{
- // rework this function, why are we checking data type and other things here ? should we create another function can_run_optimised_kernel() ?
-#if defined(__arm__)
- if(NEScheduler::get().cpu_info().CPU == CPUTarget::ARMV7 && a->info()->data_type() == DataType::F32 && (c == nullptr || beta == 0.f))
- {
- return support::cpp14::make_unique<NEGEMMAssemblyWrapper<T>>();
- }
-#elif defined(__aarch64__)
- if(NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV8 && a->info()->data_type() == DataType::F32 && (c == nullptr || beta == 0.f))
- {
- return support::cpp14::make_unique<NEGEMMAssemblyWrapper<T>>();
- }
- else if(a->info()->data_type() == DataType::F16 && (c == nullptr || beta == 0.f))
- {
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
- return support::cpp14::make_unique<NEGEMMAssemblyWrapper<T>>();
-#else /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
- ARM_COMPUTE_ERROR("Recompile the library with arch=arm64-v8.2-a to enable support for FP16.");
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
- }
-#endif /* defined(__arm__) || defined(__aarch64__) */
- return nullptr;
-}
-
-/** Setup assembly kernel.
- *
* @param[in] a Input tensor A.
* @param[in] b Input tensor B.
- * @param[in] c (Optional) Input tensor C.
- * @param[in] d Output tensor.
+ * @param[out] d Output tensor.
* @param[in] alpha Alpha value.
* @param[in] beta Beta value.
* @param[out] workspace Workspace tensor
* @param[in] memory_group Tensor memory group.
* @param[out] asm_glue Assembly glue kernel.
*
- * @return True if the assembly kernel is setup correctly.
+ * @return the wrapper kernel.
*/
template <typename T>
-inline bool setup_assembly_kernel(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta,
+inline bool setup_assembly_kernel(const ITensor *a, const ITensor *b, ITensor *d, float alpha, float beta,
Tensor &workspace, MemoryGroup &memory_group, T &asm_glue)
{
- const ::CPUInfo *ci = get_CPUInfo();
- const int M = d->info()->tensor_shape().y();
- const int N = d->info()->tensor_shape().x();
- const int K = a->info()->tensor_shape().x();
- unsigned int num_threads = NEScheduler::get().num_threads();
+ const CPUInfo &ci = NEScheduler::get().cpu_info();
+ const int M = d->info()->tensor_shape().y();
+ const int N = d->info()->tensor_shape().x();
+ const int K = a->info()->tensor_shape().x();
+ unsigned int num_threads = NEScheduler::get().num_threads();
// unique_ptr to a Gemm object
- std::unique_ptr<typename T::AssemblyGemm> asm_gemm(arm_gemm::gemm<typename T::TypeOperator, typename T::TypeResult>(*ci, M, N, K, false, false, alpha, beta, num_threads,
- false));
-
+ std::unique_ptr<typename T::AssemblyGemm>
+ asm_gemm(arm_gemm::gemm<typename T::TypeOperator, typename T::TypeResult>(ci, M, N, K, false, false, alpha, beta, num_threads, false));
// arm_compute wrapper for the Gemm object (see above)
- std::unique_ptr<NEGEMMAssemblyWrapper<typename T::AssemblyGemm>> acl_gemm_wrapper = create_wrapper_kernel<typename T::AssemblyGemm>(a, b, c, d, alpha, beta);
+ std::unique_ptr<NEGEMMAssemblyWrapper<typename T::AssemblyGemm>>
+ acl_gemm_wrapper = support::cpp14::make_unique<NEGEMMAssemblyWrapper<typename T::AssemblyGemm>>();
if(acl_gemm_wrapper != nullptr && asm_gemm != nullptr)
{
acl_gemm_wrapper->configure(asm_gemm.get());
@@ -198,15 +160,23 @@ inline bool setup_assembly_kernel(const ITensor *a, const ITensor *b, const ITen
if(workspace_size)
{
// Allocate workspace
- allocate_workspace(workspace_size, workspace, memory_group, 4096, num_threads);
+ const unsigned int alignment = 4096;
+ allocate_workspace(workspace_size, workspace, memory_group, alignment, num_threads);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(workspace.buffer());
asm_gemm->set_working_space(reinterpret_cast<typename T::TypeResult *>(workspace.buffer()));
}
- const unsigned int window_size = asm_gemm->get_window_size();
- if(window_size < num_threads)
+
+ //if we disable this code below in brackets then ConvLayer deadlocks when threads > 1 and
+ //the shapes are In=1x1x1024 Weights=1x1x1024x1001 Biases=1001 Out=1x1x1001
{
- num_threads = window_size;
- asm_gemm->set_nthreads(num_threads);
+ const unsigned int window_size = asm_gemm->get_window_size();
+ if(window_size < num_threads)
+ {
+ num_threads = window_size;
+ asm_gemm->set_nthreads(num_threads);
+ }
}
+
asm_glue._gemm_kernel_asm = std::move(asm_gemm);
asm_glue._optimised_kernel = std::move(acl_gemm_wrapper);
// We need to setup the ptrs in the run() method