aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2018-03-14 17:55:27 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:49:16 +0000
commit7fad9b1d00f3ee1488ba4038d1371f6ea219f8b7 (patch)
treeded71e1cfa8e0c085f8bce5dfc26a99786d60e52 /src/runtime
parent1562be3e8a449360a90af75f6f1481a30d41be75 (diff)
downloadComputeLibrary-7fad9b1d00f3ee1488ba4038d1371f6ea219f8b7.tar.gz
COMPMID-1021: CPUInfo refactoring.
Removed CPUTarget in favor of the CPUModel type. CPUInfo now holds a vector of N CPUs. CPUInfo autoinitialise upon construction with 1 GENERIC CPU. CPPScheduler fills CPUInfo's vector upon construction (runtime). IScheduler has a single CPUInfo obj and ThreadInfo always gets a pointer to it (avoid copying the vector) Change-Id: I30f293258c959c87f6bac5eac8b963beb6a4d365 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/124626 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/CPP/CPPScheduler.cpp4
-rw-r--r--src/runtime/CPP/SingleThreadScheduler.cpp4
-rw-r--r--src/runtime/CPUUtils.cpp404
-rw-r--r--src/runtime/IScheduler.cpp190
-rw-r--r--src/runtime/NEON/functions/NEGEMM.cpp3
-rw-r--r--src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp18
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp4
-rw-r--r--src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp8
-rw-r--r--src/runtime/OMP/OMPScheduler.cpp6
9 files changed, 429 insertions, 212 deletions
diff --git a/src/runtime/CPP/CPPScheduler.cpp b/src/runtime/CPP/CPPScheduler.cpp
index 168ed6e30f..92dce34c71 100644
--- a/src/runtime/CPP/CPPScheduler.cpp
+++ b/src/runtime/CPP/CPPScheduler.cpp
@@ -27,6 +27,7 @@
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/Utils.h"
+#include "arm_compute/runtime/CPUUtils.h"
#include <condition_variable>
#include <iostream>
@@ -159,6 +160,7 @@ CPPScheduler::CPPScheduler()
: _num_threads(num_threads_hint()),
_threads(_num_threads - 1)
{
+ get_cpu_configuration(_cpu_info);
}
void CPPScheduler::set_num_threads(unsigned int num_threads)
@@ -178,7 +180,7 @@ void CPPScheduler::schedule(ICPPKernel *kernel, unsigned int split_dimension)
/** [Scheduler example] */
ThreadInfo info;
- info.cpu_info = _info;
+ info.cpu_info = &_cpu_info;
const Window &max_window = kernel->window();
const unsigned int num_iterations = max_window.num_iterations(split_dimension);
diff --git a/src/runtime/CPP/SingleThreadScheduler.cpp b/src/runtime/CPP/SingleThreadScheduler.cpp
index c8285b43a7..2adc14ce80 100644
--- a/src/runtime/CPP/SingleThreadScheduler.cpp
+++ b/src/runtime/CPP/SingleThreadScheduler.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,7 @@ void SingleThreadScheduler::schedule(ICPPKernel *kernel, unsigned int split_dime
{
ARM_COMPUTE_UNUSED(split_dimension);
ThreadInfo info;
- info.cpu_info = cpu_info();
+ info.cpu_info = &_cpu_info;
kernel->run(kernel->window(), info);
}
diff --git a/src/runtime/CPUUtils.cpp b/src/runtime/CPUUtils.cpp
new file mode 100644
index 0000000000..7e8bf2bb3f
--- /dev/null
+++ b/src/runtime/CPUUtils.cpp
@@ -0,0 +1,404 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/CPUUtils.h"
+
+#include "arm_compute/core/CPP/CPPTypes.h"
+#include "arm_compute/core/Error.h"
+#include "support/ToolchainSupport.h"
+
+#include <array>
+#include <cstdlib>
+#include <cstring>
+#include <fcntl.h>
+#include <fstream>
+#include <map>
+#include <sched.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#ifndef BARE_METAL
+#include <regex>
+#include <thread>
+#endif /* BARE_METAL */
+
+#if !defined(BARE_METAL) && (defined(__arm__) || defined(__aarch64__))
+#include <sys/auxv.h>
+
+/* Get HWCAP bits from asm/hwcap.h */
+#include <asm/hwcap.h>
+#endif /* !BARE_METAL */
+
+/* Make sure the bits we care about are defined, just in case asm/hwcap.h is
+ * out of date (or for bare metal mode) */
+#ifndef HWCAP_ASIMDHP
+#define HWCAP_ASIMDHP (1 << 10)
+#endif /* HWCAP_ASIMDHP */
+
+#ifndef HWCAP_CPUID
+#define HWCAP_CPUID (1 << 11)
+#endif /* HWCAP_CPUID */
+
+#ifndef HWCAP_ASIMDDP
+#define HWCAP_ASIMDDP (1 << 20)
+#endif /* HWCAP_ASIMDDP */
+
+namespace
+{
+using namespace arm_compute;
+
+#if !defined(BARE_METAL) && (defined(__arm__) || defined(__aarch64__))
+struct PerCPUData
+{
+ CPUModel model = CPUModel::GENERIC;
+ unsigned int midr = 0;
+ bool model_set = false;
+};
+
+/* Convert an MIDR register value to a CPUModel enum value. */
+CPUModel midr_to_model(const unsigned int midr)
+{
+ CPUModel model;
+
+ // Unpack variant and CPU ID
+ const int variant = (midr >> 20) & 0xF;
+ const int cpunum = (midr >> 4) & 0xFFF;
+
+ // Only CPUs we have code paths for are detected. All other CPUs can be safely classed as "GENERIC"
+ switch(cpunum)
+ {
+ case 0xd03:
+ model = CPUModel::A53;
+ break;
+
+ case 0xd05:
+ if(variant != 0)
+ {
+ model = CPUModel::A55r1;
+ }
+ else
+ {
+ model = CPUModel::A55r0;
+ }
+ break;
+
+ default:
+ model = CPUModel::GENERIC;
+ break;
+ }
+
+ return model;
+}
+
+void populate_models_cpuid(std::vector<PerCPUData> &cpusv)
+{
+ // If the CPUID capability is present, MIDR information is provided in /sys. Use that to populate the CPU model table.
+ uint32_t i = 0;
+ for(auto &c : cpusv)
+ {
+ std::stringstream str;
+ str << "/sys/devices/system/cpu/cpu" << i++ << "/regs/identification/midr_el1";
+ std::ifstream file;
+ file.open(str.str(), std::ios::in);
+ if(file.is_open())
+ {
+ std::string line;
+ if(bool(getline(file, line)))
+ {
+ const unsigned long midr = support::cpp11::stoul(line, nullptr, 16);
+ c.midr = (midr & 0xffffffff);
+ c.model = midr_to_model(c.midr);
+ c.model_set = true;
+ }
+ }
+ }
+}
+
+void populate_models_cpuinfo(std::vector<PerCPUData> &cpusv)
+{
+ // If "long-form" cpuinfo is present, parse that to populate models.
+ std::regex proc_regex("^processor.*(\\d+)$");
+ std::regex imp_regex("^CPU implementer.*0x(..)$");
+ std::regex var_regex("^CPU variant.*0x(.)$");
+ std::regex part_regex("^CPU part.*0x(...)$");
+ std::regex rev_regex("^CPU revision.*(\\d+)$");
+
+ std::ifstream file;
+ file.open("/proc/cpuinfo", std::ios::in);
+
+ if(file.is_open())
+ {
+ std::string line;
+ int midr = 0;
+ int curcpu = -1;
+
+ while(bool(getline(file, line)))
+ {
+ std::smatch match;
+
+ if(std::regex_match(line, match, proc_regex))
+ {
+ std::string id = match[1];
+ int newcpu = support::cpp11::stoi(id, nullptr, 0);
+
+ if(curcpu >= 0 && midr == 0)
+ {
+ // Matched a new CPU ID without any description of the previous one - looks like old format.
+ return;
+ }
+
+ if(curcpu >= 0)
+ {
+ cpusv[curcpu].midr = midr;
+ cpusv[curcpu].model = midr_to_model(midr);
+ cpusv[curcpu].model_set = true;
+ }
+
+ midr = 0;
+ curcpu = newcpu;
+
+ continue;
+ }
+
+ if(std::regex_match(line, match, imp_regex))
+ {
+ int impv = support::cpp11::stoi(match[1], nullptr, 16);
+ midr |= (impv << 24);
+ continue;
+ }
+
+ if(std::regex_match(line, match, var_regex))
+ {
+ int varv = support::cpp11::stoi(match[1], nullptr, 16);
+ midr |= (varv << 16);
+ continue;
+ }
+
+ if(std::regex_match(line, match, part_regex))
+ {
+ int partv = support::cpp11::stoi(match[1], nullptr, 16);
+ midr |= (partv << 4);
+ continue;
+ }
+
+ if(std::regex_match(line, match, rev_regex))
+ {
+ int regv = support::cpp11::stoi(match[1], nullptr, 10);
+ midr |= (regv);
+ midr |= (0xf << 16);
+ continue;
+ }
+ }
+
+ if(curcpu >= 0)
+ {
+ cpusv[curcpu].midr = midr;
+ cpusv[curcpu].model = midr_to_model(midr);
+ cpusv[curcpu].model_set = true;
+ }
+ }
+}
+
+int get_max_cpus()
+{
+ int max_cpus = 1;
+#if !defined(BARE_METAL) && (defined(__arm__) || defined(__aarch64__))
+ std::ifstream CPUspresent;
+ CPUspresent.open("/sys/devices/system/cpu/present", std::ios::in);
+ bool success = false;
+
+ if(CPUspresent.is_open())
+ {
+ std::string line;
+
+ if(bool(getline(CPUspresent, line)))
+ {
+ /* The content of this file is a list of ranges or single values, e.g.
+ * 0-5, or 1-3,5,7 or similar. As we are interested in the
+ * max valid ID, we just need to find the last valid
+ * delimiter ('-' or ',') and parse the integer immediately after that.
+ */
+ auto startfrom = line.begin();
+
+ for(auto i = line.begin(); i < line.end(); ++i)
+ {
+ if(*i == '-' || *i == ',')
+ {
+ startfrom = i + 1;
+ }
+ }
+
+ line.erase(line.begin(), startfrom);
+
+ max_cpus = support::cpp11::stoi(line, nullptr, 0) + 1;
+ success = true;
+ }
+ }
+
+ // Return std::thread::hardware_concurrency() as a fallback.
+ if(!success)
+ {
+ max_cpus = std::thread::hardware_concurrency();
+ }
+#endif /* BARE_METAL */
+
+ return max_cpus;
+}
+#endif /* !defined(BARE_METAL) && (defined(__arm__) || defined(__aarch64__)) */
+
+} // namespace
+
+namespace arm_compute
+{
+void get_cpu_configuration(CPUInfo &cpuinfo)
+{
+#if !defined(BARE_METAL) && (defined(__arm__) || defined(__aarch64__))
+ bool cpuid = false;
+ bool fp16_support = false;
+ bool dot_support = false;
+
+ const uint32_t hwcaps = getauxval(AT_HWCAP);
+
+ if((hwcaps & HWCAP_CPUID) != 0)
+ {
+ cpuid = true;
+ }
+
+ if((hwcaps & HWCAP_ASIMDHP) != 0)
+ {
+ fp16_support = true;
+ }
+
+ if((hwcaps & HWCAP_ASIMDDP) != 0)
+ {
+ dot_support = true;
+ }
+
+#ifdef __aarch64__
+ /* Pre-4.15 kernels don't have the ASIMDDP bit.
+ *
+ * Although the CPUID bit allows us to read the feature register
+ * directly, the kernel quite sensibly masks this to only show
+ * features known by it to be safe to show to userspace. As a
+ * result, pre-4.15 kernels won't show the relevant bit in the
+ * feature registers either.
+ *
+ * So for now, use a whitelist of CPUs known to support the feature.
+ */
+ if(!dot_support && cpuid)
+ {
+ /* List of CPUs with dot product support: A55r1 A75r1 A75r2 */
+ const unsigned int dotprod_whitelist_masks[] = { 0xfff0fff0, 0xfff0fff0, 0xfff0fff0, 0 };
+ const unsigned int dotprod_whitelist_values[] = { 0x4110d050, 0x4110d0a0, 0x4120d0a0, 0 };
+
+ unsigned long cpuid;
+
+ __asm __volatile(
+ "mrs %0, midr_el1\n"
+ : "=r"(cpuid)
+ :
+ : );
+
+ for(int i = 0; dotprod_whitelist_values[i] != 0; i++)
+ {
+ if((cpuid & dotprod_whitelist_masks[i]) == dotprod_whitelist_values[i])
+ {
+ dot_support = true;
+ break;
+ }
+ }
+ }
+#endif /* __aarch64__ */
+ const unsigned int max_cpus = get_max_cpus();
+ cpuinfo.set_cpu_num(max_cpus);
+ cpuinfo.set_fp16(fp16_support);
+ cpuinfo.set_dotprod(dot_support);
+ std::vector<PerCPUData> percpu(max_cpus);
+ if(cpuid)
+ {
+ populate_models_cpuid(percpu);
+ }
+ else
+ {
+ populate_models_cpuinfo(percpu);
+ }
+ int j(0);
+ for(const auto &v : percpu)
+ {
+ cpuinfo.set_cpu_model(j++, v.model);
+ }
+#else /* !defined(BARE_METAL) && (defined(__arm__) || defined(__aarch64__)) */
+ ARM_COMPUTE_UNUSED(cpuinfo);
+#endif /* !defined(BARE_METAL) && (defined(__arm__) || defined(__aarch64__)) */
+}
+
+unsigned int get_threads_hint()
+{
+ unsigned int num_threads_hint = 1;
+
+#ifndef BARE_METAL
+ std::map<std::string, unsigned int> cpu_part_occurrence_map;
+
+ // CPU part regex
+ std::regex cpu_part_rgx(R"(.*CPU part.+?(?=:).+?(?=\w+)(\w+).*)");
+ std::smatch cpu_part_match;
+
+ // Read cpuinfo and get occurrence of each core
+ std::ifstream cpuinfo;
+ cpuinfo.open("/proc/cpuinfo", std::ios::in);
+ if(cpuinfo.is_open())
+ {
+ std::string line;
+ while(bool(getline(cpuinfo, line)))
+ {
+ if(std::regex_search(line.cbegin(), line.cend(), cpu_part_match, cpu_part_rgx))
+ {
+ std::string cpu_part = cpu_part_match[1];
+ if(cpu_part_occurrence_map.find(cpu_part) != cpu_part_occurrence_map.end())
+ {
+ cpu_part_occurrence_map[cpu_part]++;
+ }
+ else
+ {
+ cpu_part_occurrence_map[cpu_part] = 1;
+ }
+ }
+ }
+ }
+
+ // Get min number of threads
+ auto min_common_cores = std::min_element(cpu_part_occurrence_map.begin(), cpu_part_occurrence_map.end(),
+ [](const std::pair<std::string, unsigned int> &p1, const std::pair<std::string, unsigned int> &p2)
+ {
+ return p1.second < p2.second;
+ });
+
+ // Set thread hint
+ num_threads_hint = cpu_part_occurrence_map.empty() ? std::thread::hardware_concurrency() : min_common_cores->second;
+#endif /* BARE_METAL */
+
+ return num_threads_hint;
+}
+
+} // namespace arm_compute
diff --git a/src/runtime/IScheduler.cpp b/src/runtime/IScheduler.cpp
index 583cb40eca..54a2bd2182 100644
--- a/src/runtime/IScheduler.cpp
+++ b/src/runtime/IScheduler.cpp
@@ -23,202 +23,20 @@
*/
#include "arm_compute/runtime/IScheduler.h"
-#include <array>
-#include <cstdlib>
-#include <cstring>
-#include <fcntl.h>
-#include <fstream>
-#include <map>
-#include <sched.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#ifndef BARE_METAL
-#include <regex>
-#include <thread>
-#endif /* BARE_METAL */
-
-namespace
-{
-unsigned int get_threads_hint()
-{
- unsigned int num_threads_hint = 1;
-
-#ifndef BARE_METAL
- std::map<std::string, unsigned int> cpu_part_occurrence_map;
-
- // CPU part regex
- std::regex cpu_part_rgx(R"(.*CPU part.+?(?=:).+?(?=\w+)(\w+).*)");
- std::smatch cpu_part_match;
-
- // Read cpuinfo and get occurrence of each core
- std::ifstream cpuinfo;
- cpuinfo.open("/proc/cpuinfo", std::ios::in);
- if(cpuinfo.is_open())
- {
- std::string line;
- while(bool(getline(cpuinfo, line)))
- {
- if(std::regex_search(line.cbegin(), line.cend(), cpu_part_match, cpu_part_rgx))
- {
- std::string cpu_part = cpu_part_match[1];
- if(cpu_part_occurrence_map.find(cpu_part) != cpu_part_occurrence_map.end())
- {
- cpu_part_occurrence_map[cpu_part]++;
- }
- else
- {
- cpu_part_occurrence_map[cpu_part] = 1;
- }
- }
- }
- }
-
- // Get min number of threads
- auto min_common_cores = std::min_element(cpu_part_occurrence_map.begin(), cpu_part_occurrence_map.end(),
- [](const std::pair<std::string, unsigned int> &p1, const std::pair<std::string, unsigned int> &p2)
- {
- return p1.second < p2.second;
- });
-
- // Set thread hint
- num_threads_hint = cpu_part_occurrence_map.empty() ? std::thread::hardware_concurrency() : min_common_cores->second;
-#endif /* BARE_METAL */
-
- return num_threads_hint;
-}
-
-unsigned int get_cpu_impl()
-{
-#ifndef BARE_METAL
- int fd = open("/proc/cpuinfo", 0); // NOLINT
- std::array<char, 3000> buff{ {} };
- char *pos = nullptr;
- char *end = nullptr;
- bool foundid = false;
-
- int cpu = sched_getcpu();
-
- if(fd == -1)
- {
- return 0;
- }
-
- int charsread = read(fd, buff.data(), 3000);
- pos = buff.data();
- end = buff.data() + charsread;
-
- close(fd);
-
- /* So, to date I've encountered two formats for /proc/cpuinfo.
- *
- * One of them just lists processor : n for each processor (with no
- * other info), then at the end lists part information for the current
- * CPU.
- *
- * The other has an entire clause (including part number info) for each
- * CPU in the system, with "processor : n" headers.
- *
- * We can cope with either of these formats by waiting to see
- * "processor: n" (where n = our CPU ID), and then looking for the next
- * "CPU part" field.
- */
- while(pos < end)
- {
- if(foundid && strncmp(pos, "CPU part", 8) == 0)
- {
- /* Found part number */
- pos += 11;
-
- for(char *ch = pos; ch < end; ch++)
- {
- if(*ch == '\n')
- {
- *ch = '\0';
- break;
- }
- }
-
- return strtoul(pos, nullptr, 0);
- }
-
- if(strncmp(pos, "processor", 9) == 0)
- {
- /* Found processor ID, see if it's ours. */
- pos += 11;
-
- for(char *ch = pos; ch < end; ch++)
- {
- if(*ch == '\n')
- {
- *ch = '\0';
- break;
- }
- }
-
- int num = strtol(pos, nullptr, 0);
-
- if(num == cpu)
- {
- foundid = true;
- }
- }
-
- while(pos < end)
- {
- char ch = *pos++;
- if(ch == '\n' || ch == '\0')
- {
- break;
- }
- }
- }
-#endif /* BARE_METAL */
-
- return 0;
-}
-} // namespace
+#include "arm_compute/runtime/CPUUtils.h"
namespace arm_compute
{
IScheduler::IScheduler()
+ : _cpu_info()
{
// Work out the best possible number of execution threads
_num_threads_hint = get_threads_hint();
-
- // Work out the CPU implementation
- switch(get_cpu_impl())
- {
- case 0xd0f:
- _info.CPU = CPUTarget::A55_DOT;
- break;
- case 0xd03:
- _info.CPU = CPUTarget::A53;
- break;
- default:
-#ifdef __arm__
- _info.CPU = CPUTarget::ARMV7;
-#elif __aarch64__
- _info.CPU = CPUTarget::ARMV8;
-#else /* __arm__ || __aarch64__ */
- _info.CPU = CPUTarget::INTRINSICS;
-#endif /* __arm__ || __aarch64__ */
- break;
- }
-
- _info.L1_size = 31000;
- _info.L2_size = 500000;
-}
-
-void IScheduler::set_target(CPUTarget target)
-{
- _info.CPU = target;
}
-CPUInfo IScheduler::cpu_info() const
+CPUInfo &IScheduler::cpu_info()
{
- return _info;
+ return _cpu_info;
}
unsigned int IScheduler::num_threads_hint() const
diff --git a/src/runtime/NEON/functions/NEGEMM.cpp b/src/runtime/NEON/functions/NEGEMM.cpp
index c8cba8a174..18e6e919c3 100644
--- a/src/runtime/NEON/functions/NEGEMM.cpp
+++ b/src/runtime/NEON/functions/NEGEMM.cpp
@@ -65,7 +65,8 @@ void NEGEMM::configure(const ITensor *a, const ITensor *b, const ITensor *c, ITe
// Check if we need to reshape the matrix B only on the first run
_reshape_b_only_on_first_run = gemm_info.reshape_b_only_on_first_run();
_run_vector_matrix_multiplication = a->info()->dimension(1) < 2;
- const bool run_optimised = setup_assembly_kernel(a, b, c, d, alpha, beta, _workspace, _memory_group, _asm_glue);
+
+ const bool run_optimised = a->info()->data_type() == DataType::F32 && (c == nullptr || beta == 0.f) && setup_assembly_kernel(a, b, d, alpha, beta, _workspace, _memory_group, _asm_glue);
// Check if the first input tensor is a vector.
// If so, all the kernels for reshaping the tensors can be skipped
diff --git a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
index b2dd0227a5..cdbd32373a 100644
--- a/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEGEMMConvolutionLayer.cpp
@@ -271,8 +271,7 @@ void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weig
const unsigned int fixed_point_position = input->info()->fixed_point_position();
const ITensor *biases_to_use = (_append_bias) ? biases : nullptr;
- bool run_optimised =
- (NEScheduler::get().cpu_info().CPU == CPUTarget::ARMV7 && dt == DataType::F32) || (NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV8 && dt == DataType::F32);
+ bool run_optimised = dt == DataType::F32;
// Reshape weights if needed
if(run_optimised)
@@ -369,8 +368,10 @@ void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weig
// Configure matrix multiply
if(run_optimised)
{
- run_optimised = setup_assembly_kernel(&_input_im2col_reshaped, weights, nullptr, &_gemm_output, 1.f, 0.f, _workspace, _memory_group, _asm_glue);
- ARM_COMPUTE_ERROR_ON_MSG(run_optimised == false, "setup_assembly_kernel failed.");
+ if(!setup_assembly_kernel(&_input_im2col_reshaped, weights, &_gemm_output, 1.f, 0.f, _workspace, _memory_group, _asm_glue))
+ {
+ ARM_COMPUTE_ERROR("setup_assembly_kernel failed.");
+ }
}
else
{
@@ -450,17 +451,10 @@ Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorI
std::unique_ptr<ITensorInfo> reshaped_weights = weights->clone();
bool optimised_kernel = false;
-#if defined(__arm__)
- if(NEScheduler::get().cpu_info().CPU == CPUTarget::ARMV7 && dt == DataType::F32)
- {
- optimised_kernel = true;
- }
-#elif defined(__aarch64__)
- if(NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV8 && dt == DataType::F32)
+ if(dt == DataType::F32)
{
optimised_kernel = true;
}
-#endif /* defined(__arm__) || defined(__aarch64__) */
// Reshape weights if needed
if(optimised_kernel)
diff --git a/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp
index a02872c3d5..27dd6c51d7 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpAssemblyMatrixMultiplyCore.cpp
@@ -58,13 +58,13 @@ void NEGEMMLowpAssemblyMatrixMultiplyCore::configure(const ITensor *a, const ITe
{
case DataType::S8:
{
- run_optimised = setup_assembly_kernel(a, b, nullptr, output, 1.f, 1.f, _workspace, _memory_group, _asm_glue_signed);
+ run_optimised = setup_assembly_kernel(a, b, output, 1.f, 1.f, _workspace, _memory_group, _asm_glue_signed);
break;
}
case DataType::QASYMM8:
case DataType::U8:
{
- run_optimised = setup_assembly_kernel(a, b, nullptr, output, 1.f, 1.f, _workspace, _memory_group, _asm_glue_unsigned);
+ run_optimised = setup_assembly_kernel(a, b, output, 1.f, 1.f, _workspace, _memory_group, _asm_glue_unsigned);
break;
}
default:
diff --git a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
index b0a7bdd7bc..7372c6ca57 100644
--- a/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/NEON/functions/NEGEMMLowpMatrixMultiplyCore.cpp
@@ -62,13 +62,13 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
{
case DataType::S8:
{
- _dot_product_path = setup_assembly_kernel(a, b, nullptr, output, 1.f, 1.f, _workspace, _memory_group, _asm_glue_signed);
+ _dot_product_path = setup_assembly_kernel(a, b, output, 1.f, 1.f, _workspace, _memory_group, _asm_glue_signed);
break;
}
case DataType::QASYMM8:
case DataType::U8:
{
- _dot_product_path = setup_assembly_kernel(a, b, nullptr, output, 1.f, 1.f, _workspace, _memory_group, _asm_glue_unsigned);
+ _dot_product_path = setup_assembly_kernel(a, b, output, 1.f, 1.f, _workspace, _memory_group, _asm_glue_unsigned);
break;
}
default:
@@ -156,10 +156,6 @@ void NEGEMMLowpMatrixMultiplyCore::configure(const ITensor *a, const ITensor *b,
_tmp_a.allocator()->allocate();
_tmp_b.allocator()->allocate();
}
- else
- {
- _workspace.allocator()->allocate();
- }
if(_a_offset != 0)
{
diff --git a/src/runtime/OMP/OMPScheduler.cpp b/src/runtime/OMP/OMPScheduler.cpp
index 3b30f1e56b..795c96caf0 100644
--- a/src/runtime/OMP/OMPScheduler.cpp
+++ b/src/runtime/OMP/OMPScheduler.cpp
@@ -27,6 +27,7 @@
#include "arm_compute/core/Error.h"
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/Utils.h"
+#include "arm_compute/runtime/CPUUtils.h"
#include <omp.h>
@@ -41,6 +42,7 @@ OMPScheduler &OMPScheduler::get()
OMPScheduler::OMPScheduler() // NOLINT
: _num_threads(omp_get_max_threads())
{
+ get_cpu_configuration(_cpu_info);
}
unsigned int OMPScheduler::num_threads() const
@@ -59,7 +61,7 @@ void OMPScheduler::schedule(ICPPKernel *kernel, unsigned int split_dimension)
ARM_COMPUTE_ERROR_ON_MSG(!kernel, "The child class didn't set the kernel");
ThreadInfo info;
- info.cpu_info = _info;
+ info.cpu_info = &_cpu_info;
const Window &max_window = kernel->window();
const unsigned int num_iterations = max_window.num_iterations(split_dimension);
@@ -71,7 +73,7 @@ void OMPScheduler::schedule(ICPPKernel *kernel, unsigned int split_dimension)
}
else
{
- #pragma omp parallel private(info) num_threads(info.num_threads)
+ #pragma omp parallel firstprivate(info) num_threads(info.num_threads)
{
const int tid = omp_get_thread_num();
Window win = max_window.split_window(split_dimension, tid, info.num_threads);