aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/core/NEON/wrapper
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-02-15 12:29:44 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:47:18 +0000
commit57c033bb5400ef19e5952f191da3e878e21bba91 (patch)
treeb325e4a0beba35bcdf29c4ae6dea874d7cd26b9f /arm_compute/core/NEON/wrapper
parent02ee4291795f64fb510a71c6c754671438635186 (diff)
downloadComputeLibrary-57c033bb5400ef19e5952f191da3e878e21bba91.tar.gz
COMPMID-906: Use fused activation in NEON Batch normalization
Change-Id: I5a6413548b2c9b8972c91ddba57395509dffd87e Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/120656 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'arm_compute/core/NEON/wrapper')
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/and.h8
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/dup_n.h60
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h34
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/load.h8
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/max.h58
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/min.h58
-rw-r--r--arm_compute/core/NEON/wrapper/intrinsics/store.h8
-rw-r--r--arm_compute/core/NEON/wrapper/traits.h51
-rw-r--r--arm_compute/core/NEON/wrapper/wrapper.h6
9 files changed, 255 insertions, 36 deletions
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/and.h b/arm_compute/core/NEON/wrapper/intrinsics/and.h
index 9b5cfd6b89..4910738e86 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/and.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/and.h
@@ -24,8 +24,6 @@
#ifndef __ARM_COMPUTE_WRAPPER_AND_H__
#define __ARM_COMPUTE_WRAPPER_AND_H__
-#include "arm_compute/core/NEON/wrapper/traits.h"
-
#include <arm_neon.h>
namespace arm_compute
@@ -55,6 +53,8 @@ VAND_IMPL(uint32_t, uint32x4_t, vandq, u32)
VAND_IMPL(int32_t, int32x4_t, vandq, s32)
VAND_IMPL(uint64_t, uint64x2_t, vandq, u64)
VAND_IMPL(int64_t, int64x2_t, vandq, s64)
-}
-}
+
+#undef VAND_IMPL
+} // namespace wrapper
+} // namespace arm_compute
#endif /* __ARM_COMPUTE_WRAPPER_AND_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/dup_n.h b/arm_compute/core/NEON/wrapper/intrinsics/dup_n.h
new file mode 100644
index 0000000000..1c07b4f3ff
--- /dev/null
+++ b/arm_compute/core/NEON/wrapper/intrinsics/dup_n.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_WRAPPER_DUP_N_H__
+#define __ARM_COMPUTE_WRAPPER_DUP_N_H__
+
+#include "arm_compute/core/NEON/wrapper/traits.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VDUP_N_IMPL(stype, vtype, prefix, postfix, tag) \
+ inline vtype vdup_n(stype value, tag) \
+ { \
+ return prefix##_##postfix(value); \
+ }
+
+VDUP_N_IMPL(uint8_t, uint8x8_t, vdup_n, u8, traits::vector_64_tag)
+VDUP_N_IMPL(int8_t, int8x8_t, vdup_n, s8, traits::vector_64_tag)
+VDUP_N_IMPL(uint16_t, uint16x4_t, vdup_n, u16, traits::vector_64_tag)
+VDUP_N_IMPL(int16_t, int16x4_t, vdup_n, s16, traits::vector_64_tag)
+VDUP_N_IMPL(uint32_t, uint32x2_t, vdup_n, u32, traits::vector_64_tag)
+VDUP_N_IMPL(int32_t, int32x2_t, vdup_n, s32, traits::vector_64_tag)
+VDUP_N_IMPL(float, float32x2_t, vdup_n, f32, traits::vector_64_tag)
+
+VDUP_N_IMPL(uint8_t, uint8x16_t, vdupq_n, u8, traits::vector_128_tag)
+VDUP_N_IMPL(int8_t, int8x16_t, vdupq_n, s8, traits::vector_128_tag)
+VDUP_N_IMPL(uint16_t, uint16x8_t, vdupq_n, u16, traits::vector_128_tag)
+VDUP_N_IMPL(int16_t, int16x8_t, vdupq_n, s16, traits::vector_128_tag)
+VDUP_N_IMPL(uint32_t, uint32x4_t, vdupq_n, u32, traits::vector_128_tag)
+VDUP_N_IMPL(int32_t, int32x4_t, vdupq_n, s32, traits::vector_128_tag)
+VDUP_N_IMPL(float, float32x4_t, vdupq_n, f32, traits::vector_128_tag)
+
+#undef VDUP_N_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_WRAPPER_DUP_N_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
new file mode 100644
index 0000000000..b302b366cd
--- /dev/null
+++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_WRAPPER_INTRINSICS_H__
+#define __ARM_COMPUTE_WRAPPER_INTRINSICS_H__
+
+#include "arm_compute/core/NEON/wrapper/intrinsics/and.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/dup_n.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/load.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/max.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/min.h"
+#include "arm_compute/core/NEON/wrapper/intrinsics/store.h"
+
+#endif /* __ARM_COMPUTE_WRAPPER_INTRINSICS_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/load.h b/arm_compute/core/NEON/wrapper/intrinsics/load.h
index 9629f2b4e0..442d857497 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/load.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/load.h
@@ -24,8 +24,6 @@
#ifndef __ARM_COMPUTE_WRAPPER_LOAD_H__
#define __ARM_COMPUTE_WRAPPER_LOAD_H__
-#include "arm_compute/core/NEON/wrapper/traits.h"
-
#include <arm_neon.h>
namespace arm_compute
@@ -63,6 +61,8 @@ VLOADQ_IMPL(int32_t, int32x4_t, s32)
//VLOAD_IMPL(uint64_t, uint64x1_t, u64)
//VLOAD_IMPL(int64_t, int64x1_t, s64)
VLOADQ_IMPL(float, float32x4_t, f32)
-}
-}
+
+#undef VLOAD_IMPL
+} // namespace wrapper
+} // namespace arm_compute
#endif /* __ARM_COMPUTE_WRAPPER_LOAD_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/max.h b/arm_compute/core/NEON/wrapper/intrinsics/max.h
new file mode 100644
index 0000000000..1a8e95de87
--- /dev/null
+++ b/arm_compute/core/NEON/wrapper/intrinsics/max.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_WRAPPER_MAX_H__
+#define __ARM_COMPUTE_WRAPPER_MAX_H__
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VMAX_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vmax(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VMAX_IMPL(uint8_t, uint8x8_t, vmax, u8)
+VMAX_IMPL(int8_t, int8x8_t, vmax, s8)
+VMAX_IMPL(uint16_t, uint16x4_t, vmax, u16)
+VMAX_IMPL(int16_t, int16x4_t, vmax, s16)
+VMAX_IMPL(uint32_t, uint32x2_t, vmax, u32)
+VMAX_IMPL(int32_t, int32x2_t, vmax, s32)
+VMAX_IMPL(float, float32x2_t, vmax, f32)
+
+VMAX_IMPL(uint8_t, uint8x16_t, vmaxq, u8)
+VMAX_IMPL(int8_t, int8x16_t, vmaxq, s8)
+VMAX_IMPL(uint16_t, uint16x8_t, vmaxq, u16)
+VMAX_IMPL(int16_t, int16x8_t, vmaxq, s16)
+VMAX_IMPL(uint32_t, uint32x4_t, vmaxq, u32)
+VMAX_IMPL(int32_t, int32x4_t, vmaxq, s32)
+VMAX_IMPL(float, float32x4_t, vmaxq, f32)
+
+#undef VMAX_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_WRAPPER_MAX_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/min.h b/arm_compute/core/NEON/wrapper/intrinsics/min.h
new file mode 100644
index 0000000000..ae79631190
--- /dev/null
+++ b/arm_compute/core/NEON/wrapper/intrinsics/min.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_WRAPPER_MIN_H__
+#define __ARM_COMPUTE_WRAPPER_MIN_H__
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace wrapper
+{
+#define VMIN_IMPL(stype, vtype, prefix, postfix) \
+ inline vtype vmin(const vtype &a, const vtype &b) \
+ { \
+ return prefix##_##postfix(a, b); \
+ }
+
+VMIN_IMPL(uint8_t, uint8x8_t, vmin, u8)
+VMIN_IMPL(int8_t, int8x8_t, vmin, s8)
+VMIN_IMPL(uint16_t, uint16x4_t, vmin, u16)
+VMIN_IMPL(int16_t, int16x4_t, vmin, s16)
+VMIN_IMPL(uint32_t, uint32x2_t, vmin, u32)
+VMIN_IMPL(int32_t, int32x2_t, vmin, s32)
+VMIN_IMPL(float, float32x2_t, vmin, f32)
+
+VMIN_IMPL(uint8_t, uint8x16_t, vminq, u8)
+VMIN_IMPL(int8_t, int8x16_t, vminq, s8)
+VMIN_IMPL(uint16_t, uint16x8_t, vminq, u16)
+VMIN_IMPL(int16_t, int16x8_t, vminq, s16)
+VMIN_IMPL(uint32_t, uint32x4_t, vminq, u32)
+VMIN_IMPL(int32_t, int32x4_t, vminq, s32)
+VMIN_IMPL(float, float32x4_t, vminq, f32)
+
+#undef VMIN_IMPL
+} // namespace wrapper
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_WRAPPER_MIN_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/store.h b/arm_compute/core/NEON/wrapper/intrinsics/store.h
index de57b7350f..be89602c09 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/store.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/store.h
@@ -24,8 +24,6 @@
#ifndef __ARM_COMPUTE_WRAPPER_STORE_H__
#define __ARM_COMPUTE_WRAPPER_STORE_H__
-#include "arm_compute/core/NEON/wrapper/traits.h"
-
#include <arm_neon.h>
namespace arm_compute
@@ -57,6 +55,8 @@ VSTORE_IMPL(int32_t, int32x4_t, vst1q, s32)
//VSTORE_IMPL(uint64_t, 2, vst1q, u64)
//VSTORE_IMPL(int64_t, 2, vst1q, s64)
VSTORE_IMPL(float, float32x4_t, vst1q, f32)
-}
-}
+
+#undef VSTORE_IMPL
+} // namespace wrapper
+} // namespace arm_compute
#endif /* __ARM_COMPUTE_WRAPPER_STORE_H__ */
diff --git a/arm_compute/core/NEON/wrapper/traits.h b/arm_compute/core/NEON/wrapper/traits.h
index 045839cf48..08b2c9b48f 100644
--- a/arm_compute/core/NEON/wrapper/traits.h
+++ b/arm_compute/core/NEON/wrapper/traits.h
@@ -35,31 +35,40 @@ namespace traits
// *INDENT-OFF*
// clang-format off
+/** 64-bit vector tag */
+struct vector_64_tag {};
+/** 128-bit vector tag */
+struct vector_128_tag {};
+
/** Create the appropriate NEON vector given its type and size */
template <typename T, int S> struct neon_vector;
/** Specializations */
-template <> struct neon_vector<uint8_t, 8>{ using type = uint8x8_t; };
-template <> struct neon_vector<int8_t, 8>{ using type = int8x8_t; };
-template <> struct neon_vector<uint8_t, 16>{ using type = uint8x16_t; };
-template <> struct neon_vector<int8_t, 16>{ using type = int8x16_t; };
-template <> struct neon_vector<uint16_t, 4>{ using type = uint16x4_t; };
-template <> struct neon_vector<int16_t, 4>{ using type = int16x4_t; };
-template <> struct neon_vector<uint16_t, 8>{ using type = uint16x8_t; };
-template <> struct neon_vector<int16_t, 8>{ using type = int16x8_t; };
-template <> struct neon_vector<uint32_t, 2>{ using type = uint32x2_t; };
-template <> struct neon_vector<int32_t, 2>{ using type = int32x2_t; };
-template <> struct neon_vector<uint32_t, 4>{ using type = uint32x4_t; };
-template <> struct neon_vector<int32_t, 4>{ using type = int32x4_t; };
-template <> struct neon_vector<uint64_t, 1>{ using type = uint64x1_t; };
-template <> struct neon_vector<int64_t, 1>{ using type = int64x1_t; };
-template <> struct neon_vector<uint64_t, 2>{ using type = uint64x2_t; };
-template <> struct neon_vector<int64_t, 2>{ using type = int64x2_t; };
-template <> struct neon_vector<float_t, 2>{ using type = float32x2_t; };
-template <> struct neon_vector<float_t, 4>{ using type = float32x4_t; };
+template <> struct neon_vector<uint8_t, 8>{ using type = uint8x8_t; using tag_type = vector_64_tag; };
+template <> struct neon_vector<int8_t, 8>{ using type = int8x8_t; using tag_type = vector_64_tag; };
+template <> struct neon_vector<uint8_t, 16>{ using type = uint8x16_t; using tag_type = vector_128_tag; };
+template <> struct neon_vector<int8_t, 16>{ using type = int8x16_t; using tag_type = vector_128_tag; };
+template <> struct neon_vector<uint16_t, 4>{ using type = uint16x4_t; using tag_type = vector_64_tag; };
+template <> struct neon_vector<int16_t, 4>{ using type = int16x4_t; using tag_type = vector_64_tag; };
+template <> struct neon_vector<uint16_t, 8>{ using type = uint16x8_t; using tag_type = vector_128_tag; };
+template <> struct neon_vector<int16_t, 8>{ using type = int16x8_t; using tag_type = vector_128_tag; };
+template <> struct neon_vector<uint32_t, 2>{ using type = uint32x2_t; using tag_type = vector_64_tag; };
+template <> struct neon_vector<int32_t, 2>{ using type = int32x2_t; using tag_type = vector_64_tag; };
+template <> struct neon_vector<uint32_t, 4>{ using type = uint32x4_t; using tag_type = vector_128_tag; };
+template <> struct neon_vector<int32_t, 4>{ using type = int32x4_t; using tag_type = vector_128_tag; };
+template <> struct neon_vector<uint64_t, 1>{ using type = uint64x1_t; using tag_type = vector_64_tag; };
+template <> struct neon_vector<int64_t, 1>{ using type = int64x1_t; using tag_type = vector_64_tag; };
+template <> struct neon_vector<uint64_t, 2>{ using type = uint64x2_t; using tag_type = vector_128_tag; };
+template <> struct neon_vector<int64_t, 2>{ using type = int64x2_t; using tag_type = vector_128_tag; };
+template <> struct neon_vector<float_t, 2>{ using type = float32x2_t; using tag_type = vector_64_tag; };
+template <> struct neon_vector<float_t, 4>{ using type = float32x4_t; using tag_type = vector_128_tag; };
+
+/** Helper type template to get the type of a neon vector */
template <typename T, int S> using neon_vector_t = typename neon_vector<T, S>::type;
+/** Helper type template to get the tag type of a neon vector */
+template <typename T, int S> using neon_vector_tag_t = typename neon_vector<T, S>::tag_type;
// clang-format on
// *INDENT-ON*
-}
-}
-}
+} // namespace traits
+} // namespace wrapper
+} // namespace arm_compute
#endif /* __ARM_COMPUTE_WRAPPER_TRAITS_H__ */
diff --git a/arm_compute/core/NEON/wrapper/wrapper.h b/arm_compute/core/NEON/wrapper/wrapper.h
index 9676d04d71..61dc42a69b 100644
--- a/arm_compute/core/NEON/wrapper/wrapper.h
+++ b/arm_compute/core/NEON/wrapper/wrapper.h
@@ -24,10 +24,10 @@
#ifndef __ARM_COMPUTE_WRAPPER_H__
#define __ARM_COMPUTE_WRAPPER_H__
+// Traits
#include "arm_compute/core/NEON/wrapper/traits.h"
-#include "arm_compute/core/NEON/wrapper/intrinsics/and.h"
-#include "arm_compute/core/NEON/wrapper/intrinsics/load.h"
-#include "arm_compute/core/NEON/wrapper/intrinsics/store.h"
+// Intrinsics Overloads
+#include "arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h"
#endif /* __ARM_COMPUTE_WRAPPER_H__ */