aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore2
-rw-r--r--.pre-commit-config.yaml2
-rw-r--r--CMakeLists.txt92
-rw-r--r--README.md7
-rw-r--r--include/attribute.def80
-rw-r--r--include/attribute.h49
-rw-r--r--include/float_utils.h533
-rw-r--r--include/numpy_utils.h104
-rw-r--r--include/tosa_generated.h1987
-rw-r--r--include/tosa_serialization_handler.h202
-rw-r--r--pyproject.toml43
-rw-r--r--python/serializer/tosa_serializer.py906
-rw-r--r--python/tosa/ArithmeticRightShiftAttribute.py26
-rw-r--r--python/tosa/Attribute.py31
-rw-r--r--python/tosa/AxisAttribute.py26
-rw-r--r--python/tosa/ClampAttribute.py99
-rw-r--r--python/tosa/CondIfAttribute.py37
-rw-r--r--python/tosa/ConvAttribute.py101
-rw-r--r--python/tosa/CustomAttribute.py106
-rw-r--r--python/tosa/DType.py8
-rw-r--r--python/tosa/FFTAttribute.py67
-rw-r--r--python/tosa/FullyConnectedAttribute.py33
-rw-r--r--python/tosa/MatMulAttribute.py33
-rw-r--r--python/tosa/MulAttribute.py26
-rw-r--r--python/tosa/NegateAttribute.py33
-rw-r--r--python/tosa/Op.py13
-rw-r--r--python/tosa/PadAttribute.py59
-rw-r--r--python/tosa/PoolAttribute.py88
-rw-r--r--python/tosa/RFFTAttribute.py54
-rw-r--r--python/tosa/RescaleAttribute.py128
-rw-r--r--python/tosa/ReshapeAttribute.py57
-rw-r--r--python/tosa/ResizeAttribute.py156
-rw-r--r--python/tosa/ResizeMode.py1
-rw-r--r--python/tosa/SliceAttribute.py86
-rw-r--r--python/tosa/TableAttribute.py33
-rw-r--r--python/tosa/TileAttribute.py57
-rw-r--r--python/tosa/TosaBasicBlock.py82
-rw-r--r--python/tosa/TosaGraph.py50
-rw-r--r--python/tosa/TosaOperator.py68
-rw-r--r--python/tosa/TosaRegion.py91
-rw-r--r--python/tosa/TosaTensor.py100
-rw-r--r--python/tosa/TransposeAttribute.py33
-rw-r--r--python/tosa/TransposeConvAttribute.py101
-rw-r--r--python/tosa/Version.py61
-rw-r--r--python/tosa/WhileLoopAttribute.py37
-rw-r--r--schema/tosa.fbs144
-rw-r--r--setup.py18
-rw-r--r--src/numpy_utils.cpp197
-rw-r--r--src/tosa_serialization_handler.cpp794
-rwxr-xr-xtest/scripts/test_npy_fileio.py2
-rwxr-xr-xtest/scripts/test_serialization.py197
-rw-r--r--test/scripts/testfiles/test.tosabin544 -> 0 bytes
-rw-r--r--test/src/serialization_npy_test.cpp61
m---------third_party/flatbuffers0
-rw-r--r--third_party/half/ChangeLog.txt213
-rw-r--r--third_party/half/LICENSE.txt21
-rw-r--r--third_party/half/README.txt317
-rw-r--r--third_party/half/include/half.hpp4605
58 files changed, 9745 insertions, 2812 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..5034363
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+__pycache__/
+build/ \ No newline at end of file
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index f40778a..3afbd2d 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -11,7 +11,7 @@ repos:
#j.hooks:
#j.- id: black
-- repo: https://gitlab.com/pycqa/flake8
+- repo: https://github.com/pycqa/flake8
rev: 3.7.9
hooks:
- id: flake8
diff --git a/CMakeLists.txt b/CMakeLists.txt
index aaba60e..5f4f851 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,6 +1,6 @@
#TOSA serialization library
-# Copyright (c) 2020-2021, ARM Limited.
+# Copyright (c) 2020-2023, ARM Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,47 +19,89 @@
cmake_minimum_required(VERSION 3.13.4)
project(TosaSerialization)
-set(CMAKE_CXX_STANDARD 14 CACHE STRING "C++ standard to conform to")
-set(CMAKE_CXX_STANDARD_REQUIRED YES)
+set(CMAKE_CXX_STANDARD 17)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_VERBOSE_MAKEFILE ON)
-include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include)
-include_directories(${PROJECT_SOURCE_DIR}/third_party/flatbuffers/include)
+option(BUILD_TESTS "Build test applications" ON)
+option(FLATBUFFERS_ROOT "Location where the flatbuffers 'include' and 'lib' folders to be found" Off)
+
+include_directories(${PROJECT_SOURCE_DIR}/third_party/half/include)
-# Turn off unnecessary flatbuffers targets
-set(FLATBUFFERS_BUILD_TESTS OFF)
-add_subdirectory(third_party/flatbuffers)
+include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include)
-add_library(tosa_serialization_lib STATIC
+add_library(tosa_serialization_lib
src/tosa_serialization_handler.cpp
src/numpy_utils.cpp
)
-target_link_libraries(tosa_serialization_lib PRIVATE flatbuffers)
-
-add_executable(serialization_read_write
- test/src/serialization_read_write.cpp
+# If flatbuffers is built externally just link it
+if (TARGET flatbuffers)
+ target_link_libraries(tosa_serialization_lib PRIVATE flatbuffers)
+else()
+ # Verify we have a valid flatbuffers include path.
+ # We will explicitly exclude the system include directories and only
+ # accept either a user supplied value or the local third_party/flatbuffers.
+ find_path(FLATBUFFERS_INCLUDE_PATH flatbuffers/flatbuffers.h
+ NO_DEFAULT_PATH
+ HINTS ${FLATBUFFERS_ROOT} ./third_party/flatbuffers
+ PATH_SUFFIXES include)
+ message(STATUS "Flatbuffers include located at: ${FLATBUFFERS_INCLUDE_PATH}")
+ include_directories(${FLATBUFFERS_INCLUDE_PATH})
+
+ # Next is the library.
+ # We will explicitly exclude the system lib directories and only accept
+ # either a user supplied value or the local third_party/flatbuffers.
+ find_library(FLATBUFFERS_LIBRARY
+ NAMES libflatbuffers.a flatbuffers
+ NO_DEFAULT_PATH
+ HINTS ${FLATBUFFERS_ROOT} ./third_party/flatbuffers
+ PATH_SUFFIXES lib)
+
+ if(FLATBUFFERS_LIBRARY)
+ message(STATUS "Flatbuffers library located at: ${FLATBUFFERS_LIBRARY}")
+ target_link_libraries(tosa_serialization_lib PRIVATE ${FLATBUFFERS_LIBRARY})
+ else()
+ # It's not there we treat third_party/flatbuffers as a sub project.
+ # In this case we'll need to build the downloaded source.
+ # Turn off unnecessary flatbuffers targets
+ set(FLATBUFFERS_BUILD_TESTS OFF)
+ add_subdirectory(third_party/flatbuffers)
+ target_link_libraries(tosa_serialization_lib PRIVATE flatbuffers)
+ endif()
+endif()
+
+set(public_headers)
+list(APPEND public_headers
+ include/attribute.h
+ include/attribute.def
+ include/float_utils.h
+ include/numpy_utils.h
+ include/tosa_generated.h
+ include/tosa_serialization_handler.h
)
-target_link_libraries(serialization_read_write
- tosa_serialization_lib
-)
+set_target_properties(tosa_serialization_lib PROPERTIES PUBLIC_HEADER "${public_headers}")
-add_executable(serialization_npy_test
- test/src/serialization_npy_test.cpp
-)
+# Optionally build test executables.
+if (BUILD_TESTS)
+ add_executable(serialization_npy_test
+ test/src/serialization_npy_test.cpp
+ )
-target_link_libraries(serialization_npy_test
- tosa_serialization_lib
-)
+ target_link_libraries(serialization_npy_test
+ tosa_serialization_lib
+ )
+endif()
set(TOSA_SERIALIZATION_LIB_CMAKE_DIR "${CMAKE_INSTALL_LIBDIR}/cmake/tosa_serialization_lib")
-
+# Follow GNU packaging norms for installation directory structure.
+include(GNUInstallDirs)
install(
TARGETS tosa_serialization_lib EXPORT TosaSerializationLibTargets
- ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
- INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
+ PUBLIC_HEADER
+ ARCHIVE
)
install(EXPORT TosaSerializationLibTargets
diff --git a/README.md b/README.md
index 979b76f..9d852df 100644
--- a/README.md
+++ b/README.md
@@ -158,3 +158,10 @@ numpy file, (4) format and (5) usage.
# License
The *TOSA Serialization Library* is licensed under Apache-2.0.
+
+## Third Party Projects
+
+- The [half](https://half.sourceforge.net/) library is licensed under MIT license.
+
+Other third party projects are referenced as sub-modules and as such, are licensed under the licenses stated in their projects.
+
diff --git a/include/attribute.def b/include/attribute.def
index ea91869..723543e 100644
--- a/include/attribute.def
+++ b/include/attribute.def
@@ -1,5 +1,5 @@
-// Copyright (c) 2020-2021, ARM Limited.
+// Copyright (c) 2020-2024, ARM Limited.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -26,68 +26,56 @@
...: variadic variables for more arguments, depending on NUM_ARGS_IN_ATTRIBUTES
*/
-DEF_ATTRIBUTE(Pool, 5,
+DEF_ATTRIBUTE(Pool, 6,
int32_t, V, pad,
int32_t, V, kernel,
int32_t, V, stride,
int32_t, S, input_zp,
- int32_t, S, output_zp)
+ int32_t, S, output_zp,
+ DType, S, acc_type)
-DEF_ATTRIBUTE(Conv, 5,
+DEF_ATTRIBUTE(Conv, 7,
int32_t, V, pad,
int32_t, V, stride,
int32_t, V, dilation,
int32_t, S, input_zp,
- int32_t, S, weight_zp)
+ int32_t, S, weight_zp,
+ bool, S, local_bound,
+ DType, S, acc_type)
-DEF_ATTRIBUTE(TransposeConv, 5,
+DEF_ATTRIBUTE(TransposeConv, 7,
int32_t, V, out_pad,
int32_t, V, stride,
int32_t, V, output_shape,
int32_t, S, input_zp,
- int32_t, S, weight_zp)
+ int32_t, S, weight_zp,
+ bool, S, local_bound,
+ DType, S, acc_type)
-DEF_ATTRIBUTE(Pad, 3,
- int32_t, V, padding,
- int32_t, S, pad_const_int,
- float, S, pad_const_fp)
+DEF_ATTRIBUTE(Pad, 1,
+ uint8_t, V, pad_const)
DEF_ATTRIBUTE(Axis, 1,
int32_t, S, axis)
-DEF_ATTRIBUTE(Reshape, 1,
- int32_t, V, new_shape)
-
-DEF_ATTRIBUTE(Slice, 2,
- int32_t, V, start,
- int32_t, V, size)
-
-DEF_ATTRIBUTE(Tile, 1,
- int32_t, V, multiples)
-
-DEF_ATTRIBUTE(Resize, 7,
- int32_t, V, output_size,
- int32_t, V, stride,
- int32_t, V, offset,
- int32_t, S, shift,
- float, V, stride_fp,
- float, V, offset_fp,
+DEF_ATTRIBUTE(Resize, 4,
+ int16_t, V, scale,
+ int16_t, V, offset,
+ int16_t, V, border,
ResizeMode, S, mode)
-DEF_ATTRIBUTE(Clamp, 4,
- int32_t, S, min_int,
- int32_t, S, max_int,
- float, S, min_fp,
- float, S, max_fp)
+DEF_ATTRIBUTE(Clamp, 2,
+ uint8_t, V, min_val,
+ uint8_t, V, max_val)
DEF_ATTRIBUTE(Rescale, 7,
int32_t, S, input_zp,
int32_t, S, output_zp,
- int32_t, V, multiplier,
- int32_t, V, shift,
bool, S, scale32,
bool, S, double_round,
- bool, S, per_channel)
+ bool, S, per_channel,
+ bool, S, input_unsigned,
+ bool, S, output_unsigned)
DEF_ATTRIBUTE(Mul, 1,
int32_t, S, shift)
@@ -96,12 +84,12 @@ DEF_ATTRIBUTE(ArithmeticRightShift, 1,
bool, S, round)
DEF_ATTRIBUTE(CondIf, 2,
- string, S, then_branch,
- string, S, else_branch)
+ string, S, then_graph,
+ string, S, else_graph)
DEF_ATTRIBUTE(WhileLoop, 2,
- string, S, cond_branch,
- string, S, body_branch)
+ string, S, cond_graph,
+ string, S, body_graph)
DEF_ATTRIBUTE(Transpose, 1,
int32_t, V, perms)
@@ -120,3 +108,15 @@ DEF_ATTRIBUTE(FullyConnected, 2,
DEF_ATTRIBUTE(Negate, 2,
int32_t, S, input1_zp,
int32_t, S, output_zp)
+
+DEF_ATTRIBUTE(Custom, 3,
+ string, S, operator_name,
+ string, S, domain_name,
+ uint8_t, V, implementation_attrs)
+
+DEF_ATTRIBUTE(FFT, 2,
+ bool, S, inverse,
+ bool, S, local_bound)
+
+DEF_ATTRIBUTE(RFFT, 1,
+ bool, S, local_bound)
diff --git a/include/attribute.h b/include/attribute.h
index 93f7bc4..d6c96a4 100644
--- a/include/attribute.h
+++ b/include/attribute.h
@@ -40,13 +40,43 @@ public:
{}
};
+inline int convertFlatbuffersU8toF32(const flatbuffers::Vector<uint8_t>& in, uint32_t out_size, std::vector<float>& out)
+{
+ out.clear();
+ if (in.size() < out_size * sizeof(float))
+ {
+ printf("convertFlatbuffersU8toF32(): uint8 Flatbuffers buffer size %u must be >= target size %ld\n", in.size(),
+ out_size * sizeof(float));
+ return 1;
+ }
+ for (uint32_t i = 0; i < out_size; i++)
+ {
+ uint32_t byte0 = in[i * sizeof(float)];
+ uint32_t byte1 = in[i * sizeof(float) + 1];
+ uint32_t byte2 = in[i * sizeof(float) + 2];
+ uint32_t byte3 = in[i * sizeof(float) + 3];
+ uint32_t val_u32 = byte0 + (byte1 << 8) + (byte2 << 16) + (byte3 << 24);
+ float* val_fp32 = reinterpret_cast<float*>(&val_u32);
+ out.push_back(*val_fp32);
+ }
+ return 0;
+}
+
#define DEF_ARGS_VER0_S_STR(V) _##V = p->V()->str();
#define DEF_ARGS_VER0_S_DEFAULT(V) _##V = p->V();
+#define DEF_ARGS_VER0_S_float_as_bytes(V) \
+ { \
+ std::vector<float> attr_vec; \
+ if (p->V() && convertFlatbuffersU8toF32(*(p->V()), 1, attr_vec)) \
+ assert(0 && "Failed to convert u8 buffer to f32"); \
+ _##V = (!attr_vec.empty()) ? attr_vec[0] : 0.0f; \
+ }
#define DEF_ARGS_VER0_S_int32_t(V) DEF_ARGS_VER0_S_DEFAULT(V)
-#define DEF_ARGS_VER0_S_float(V) DEF_ARGS_VER0_S_DEFAULT(V)
+#define DEF_ARGS_VER0_S_float(V) DEF_ARGS_VER0_S_float_as_bytes(V)
#define DEF_ARGS_VER0_S_bool(V) DEF_ARGS_VER0_S_DEFAULT(V)
#define DEF_ARGS_VER0_S_ResizeMode(V) DEF_ARGS_VER0_S_DEFAULT(V)
+#define DEF_ARGS_VER0_S_DType(V) DEF_ARGS_VER0_S_DEFAULT(V)
#define DEF_ARGS_VER0_S_string(V) DEF_ARGS_VER0_S_STR(V)
#define DEF_ARGS_VER0_S(T, V) DEF_ARGS_VER0_S_##T(V)
@@ -100,6 +130,18 @@ public:
DEF_ARGS_##VER(FALSE, T3, F3, V3) DEF_ARGS_##VER(FALSE, T4, F4, V4) DEF_ARGS_##VER(FALSE, T5, F5, V5) \
DEF_ARGS_##VER(FALSE, T6, F6, V6)
+#define DEF_ARGS_8(VER, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4, T5, F5, V5, T6, F6, V6, T7, F7, \
+ V7) \
+ DEF_ARGS_##VER(TRUE, T0, F0, V0) DEF_ARGS_##VER(FALSE, T1, F1, V1) DEF_ARGS_##VER(FALSE, T2, F2, V2) \
+ DEF_ARGS_##VER(FALSE, T3, F3, V3) DEF_ARGS_##VER(FALSE, T4, F4, V4) DEF_ARGS_##VER(FALSE, T5, F5, V5) \
+ DEF_ARGS_##VER(FALSE, T6, F6, V6) DEF_ARGS_##VER(FALSE, T7, F7, V7)
+
+#define DEF_ARGS_9(VER, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4, T5, F5, V5, T6, F6, V6, T7, F7, \
+ V7, T8, F8, V8) \
+ DEF_ARGS_##VER(TRUE, T0, F0, V0) DEF_ARGS_##VER(FALSE, T1, F1, V1) DEF_ARGS_##VER(FALSE, T2, F2, V2) \
+ DEF_ARGS_##VER(FALSE, T3, F3, V3) DEF_ARGS_##VER(FALSE, T4, F4, V4) DEF_ARGS_##VER(FALSE, T5, F5, V5) \
+ DEF_ARGS_##VER(FALSE, T6, F6, V6) DEF_ARGS_##VER(FALSE, T7, F7, V7) DEF_ARGS_##VER(FALSE, T8, F8, V8)
+
#define DEF_VER0_VAR_DECL_PTR(NAME) const NAME* p = static_cast<const NAME*>(options);
#define DEF_VER0_VAR_0(NAME)
#define DEF_VER0_VAR_1(NAME) DEF_VER0_VAR_DECL_PTR(NAME)
@@ -109,6 +151,8 @@ public:
#define DEF_VER0_VAR_5(NAME) DEF_VER0_VAR_DECL_PTR(NAME)
#define DEF_VER0_VAR_6(NAME) DEF_VER0_VAR_DECL_PTR(NAME)
#define DEF_VER0_VAR_7(NAME) DEF_VER0_VAR_DECL_PTR(NAME)
+#define DEF_VER0_VAR_8(NAME) DEF_VER0_VAR_DECL_PTR(NAME)
+#define DEF_VER0_VAR_9(NAME) DEF_VER0_VAR_DECL_PTR(NAME)
#define DEF_ATTRIBUTE(NAME, NUM_ARGS, ...) \
class Tosa##NAME##Attribute : public TosaAttributeBase \
@@ -144,6 +188,8 @@ public:
#undef DEF_ARGS_5
#undef DEF_ARGS_6
#undef DEF_ARGS_7
+#undef DEF_ARGS_8
+#undef DEF_ARGS_9
#undef DEF_ARGS_VER0
#undef DEF_ARGS_VER1
#undef DEF_ARGS_VER2
@@ -153,6 +199,7 @@ public:
#undef DEF_ARGS_VER0_S_float
#undef DEF_ARGS_VER0_S_bool
#undef DEF_ARGS_VER0_S_ResizeMode
+#undef DEF_ARGS_VER0_S_DType
#undef DEF_ARGS_VER0_S_string
#undef DEF_ARGS_VER0_S_STR
#undef DEF_ARGS_VER0_S_DEFAULT
diff --git a/include/float_utils.h b/include/float_utils.h
new file mode 100644
index 0000000..831ad74
--- /dev/null
+++ b/include/float_utils.h
@@ -0,0 +1,533 @@
+// Copyright (c) 2024, ARM Limited.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef TOSA_FLOAT_UTILS_H_
+#define TOSA_FLOAT_UTILS_H_
+
+#include <algorithm>
+#include <cstdint>
+#include <limits>
+#include <type_traits>
+#if defined(__cpp_lib_bit_cast)
+#include <bit>
+#endif // defined(__cpp_lib_bit_cast)
+
+namespace tosa
+{
+
+namespace float_support
+{
+
+struct hidden
+{};
+
+#if defined(__cpp_lib_bit_cast)
+#define BITCAST_CONSTEXPR constexpr inline
+
+constexpr inline int32_t get_bits(const float& f)
+{
+ return std::bit_cast<int32_t>(f);
+}
+constexpr inline float from_bits(const int32_t& i)
+{
+ return std::bit_cast<float>(i);
+}
+
+#else
+#define BITCAST_CONSTEXPR inline
+
+union ufloat32
+{
+ constexpr ufloat32(const float& x)
+ : f(x)
+ {}
+ constexpr ufloat32(const int32_t& x)
+ : i(x)
+ {}
+
+ float f;
+ int32_t i;
+};
+
+inline int32_t get_bits(const float& f)
+{
+ return ufloat32(f).i;
+}
+inline float from_bits(const int32_t& i)
+{
+ return ufloat32(i).f;
+}
+#endif
+
+} // namespace float_support
+
+template <typename storage_t,
+ size_t n_exp_bits,
+ bool has_nan,
+ bool with_denorm,
+ bool with_infinity,
+ std::enable_if_t<(n_exp_bits + 1 < sizeof(storage_t) * 8), bool> = true>
+class float_t
+{
+ storage_t m_data = 0;
+
+public:
+ static constexpr size_t n_exponent_bits = n_exp_bits;
+ static constexpr size_t n_significand_bits = sizeof(storage_t) * 8 - 1 - n_exp_bits;
+ static constexpr int64_t exponent_bias = (1 << (n_exp_bits - 1)) - 1;
+
+ /// \brief Construct a floating point type with the given bit
+ /// representation.
+ static constexpr float_t from_bits(storage_t bits)
+ {
+ return float_t(float_support::hidden(), bits);
+ }
+
+ /// \brief Construct a float from the given sign, exponent and significand
+ /// bits.
+ static constexpr float_t from_bits(bool pm, storage_t e, storage_t s)
+ {
+ storage_t bits = pm ? 1 : 0;
+
+ bits <<= n_exp_bits;
+ bits |= e;
+
+ bits <<= n_significand_bits;
+ if (with_denorm || e)
+ bits |= s;
+
+ return float_t(float_support::hidden(), bits);
+ }
+
+ /// \brief (Hidden) Construct a float type from a given bit pattern
+ constexpr float_t(const float_support::hidden&, storage_t bits)
+ : m_data(bits)
+ {}
+
+ constexpr float_t()
+ : m_data(0)
+ {}
+ constexpr float_t(const float_t& other)
+ : m_data(other.m_data)
+ {}
+
+ /// \brief Cast to a different floating point representation.
+ template <typename other_storage_t,
+ size_t other_n_exp_bits,
+ bool other_has_nan,
+ bool other_has_denorm,
+ bool other_has_infinity>
+ constexpr inline
+ operator float_t<other_storage_t, other_n_exp_bits, other_has_nan, other_has_denorm, other_has_infinity>() const
+ {
+ using other_float_t =
+ float_t<other_storage_t, other_n_exp_bits, other_has_nan, other_has_denorm, other_has_infinity>;
+
+ // Shortcut for types which are fundamentally similar (e.g., bf16 ->
+ // fp32)
+ if constexpr (n_exp_bits == other_n_exp_bits && sizeof(other_storage_t) >= sizeof(storage_t) &&
+ has_nan == other_has_nan)
+ {
+ return other_float_t::from_bits(static_cast<other_storage_t>(m_data)
+ << (sizeof(other_storage_t) - sizeof(storage_t)) * 8);
+ }
+
+ // Get initial values for the new floating point type
+ const bool sign_bit = m_data < 0;
+ int64_t new_exponent_bits = 0;
+ uint64_t new_significand = 0;
+
+ if (is_nan() || is_infinity())
+ {
+ new_exponent_bits = (1 << other_n_exp_bits) - 1;
+
+ if (is_nan())
+ {
+ if constexpr (other_has_infinity)
+ {
+ // Copy across the `not_quiet bit`; set the LSB. Don't
+ // attempt to copy across any of the rest of the payload.
+ new_significand =
+ 0x1 | (((significand() >> (n_significand_bits - 1)) & 1) << other_float_t::n_significand_bits);
+ }
+ else
+ {
+ new_significand = (1ul << other_float_t::n_significand_bits) - 1;
+ }
+ }
+ else if constexpr (!other_has_infinity)
+ {
+ new_significand = (1ul << other_float_t::n_significand_bits) - (other_has_nan ? 2 : 1);
+ }
+ }
+ else if (!is_zero())
+ {
+ const int64_t this_exponent_bits = exponent_bits();
+ {
+ constexpr int64_t exponent_rebias = other_float_t::exponent_bias - exponent_bias;
+ new_exponent_bits = std::max(this_exponent_bits + exponent_rebias, exponent_rebias + 1);
+ }
+ new_significand = this->significand() << (64 - n_significand_bits);
+
+ // Normalise subnormals
+ if (this_exponent_bits == 0)
+ {
+ // Shift the most-significant 1 out of the magnitude to convert
+ // it to a significand. Modify the exponent accordingly.
+ uint8_t shift = __builtin_clzl(new_significand) + 1;
+ new_exponent_bits -= shift;
+ new_significand <<= shift;
+ }
+
+ // Align the significand for the output type
+ uint32_t shift = 64 - other_float_t::n_significand_bits;
+ const bool other_is_subnormal = new_exponent_bits <= 0;
+ if (other_is_subnormal)
+ {
+ shift += 1 - new_exponent_bits;
+ new_exponent_bits = 0;
+ }
+
+ const uint64_t shift_out = shift == 64 ? new_significand : new_significand & ((1ll << shift) - 1);
+ new_significand = shift == 64 ? 0 : new_significand >> shift;
+
+ // Reinsert the most-significant-one if this is a subnormal in the
+ // output type.
+ new_significand |= (other_is_subnormal ? 1ll : 0) << (64 - shift);
+
+ // Apply rounding based on the bits shifted out of the significand
+ const uint64_t shift_half = 1ll << (shift - 1);
+ if (shift_out > shift_half || (shift_out == shift_half && (new_significand & 1)))
+ {
+ new_significand += 1;
+
+ // Handle the case that the significand overflowed due to
+ // rounding
+ constexpr uint64_t max_significand = (1ll << other_float_t::n_significand_bits) - 1;
+ if (new_significand > max_significand)
+ {
+ new_significand = 0;
+ new_exponent_bits++;
+ }
+ }
+
+ // Saturate to infinity if the exponent is larger than can be
+ // represented in the output type. This can only occur if the size
+ // of the exponent of the new type is not greater than the exponent
+ // of the old type.
+ if constexpr (other_n_exp_bits <= n_exp_bits)
+ {
+ constexpr int64_t inf_exp_bits = (1ll << other_n_exp_bits) - 1;
+ if (new_exponent_bits >= inf_exp_bits)
+ {
+ new_exponent_bits = inf_exp_bits;
+ new_significand =
+ other_has_infinity ? 0 : (1ul << other_float_t::n_significand_bits) - (other_has_nan ? 2 : 1);
+ }
+ }
+ }
+
+ return other_float_t::from_bits(sign_bit, new_exponent_bits, new_significand);
+ }
+
+ /// \brief Convert from a 32-bit floating point value
+ BITCAST_CONSTEXPR
+ float_t(const float& f)
+ {
+ // If this format exactly represents the binary32 format then get
+ // the bits from the provided float; otherwise get a binary32
+ // representation and then convert to this format.
+ if constexpr (represents_binary32())
+ m_data = float_support::get_bits(f);
+ else
+ m_data = static_cast<float_t<storage_t, n_exp_bits, has_nan, with_denorm, with_infinity>>(
+ static_cast<float_t<int32_t, 8, true, true, true>>(f))
+ .m_data;
+ }
+
+ /// \brief Cast to a 32-bit floating point value
+ BITCAST_CONSTEXPR operator float() const
+ {
+ // If this format exactly represents the binary32 format then return
+ // a float; otherwise get a binary32 representation and then return
+ // a float.
+ if constexpr (represents_binary32())
+ return float_support::from_bits(m_data);
+ else
+ return static_cast<float>(this->operator float_t<int32_t, 8, true, true, true>());
+ }
+
+ /// \brief Return whether this type represents the IEEE754 binary32
+ /// format
+ constexpr static inline bool represents_binary32()
+ {
+ return std::is_same_v<storage_t, int32_t> && n_exp_bits == 8 && has_nan && with_denorm && with_infinity;
+ }
+
+ constexpr auto operator-() const
+ {
+ return from_bits(m_data ^ (1ll << (sizeof(storage_t) * 8 - 1)));
+ }
+
+ constexpr bool is_subnormal() const
+ {
+ return exponent_bits() == 0 && significand() != 0;
+ }
+
+ constexpr bool is_zero() const
+ {
+ return exponent_bits() == 0 && significand() == 0;
+ }
+
+ constexpr bool is_nan() const
+ {
+ return has_nan && (exponent_bits() == (1ul << n_exponent_bits) - 1) &&
+ ((with_infinity && significand()) ||
+ (!with_infinity && significand() == (1ul << n_significand_bits) - 1));
+ }
+
+ constexpr bool is_infinity() const
+ {
+ return with_infinity && ((exponent_bits() == (1ul << n_exponent_bits) - 1) && !significand());
+ }
+
+ constexpr inline const storage_t& bits() const
+ {
+ return m_data;
+ }
+
+ /// \brief Get the exponent
+ constexpr inline int64_t exponent() const
+ {
+ return std::max<int64_t>(exponent_bits(), 1ul) - exponent_bias;
+ }
+
+ /// \brief Get the bits from the exponent
+ constexpr inline uint64_t exponent_bits() const
+ {
+ constexpr uint64_t mask = (1ul << n_exp_bits) - 1;
+ return (m_data >> n_significand_bits) & mask;
+ }
+
+ constexpr inline uint64_t significand() const
+ {
+ return m_data & ((1ul << n_significand_bits) - 1);
+ }
+
+ constexpr inline bool operator==(const float_t& other) const
+ {
+ return !is_nan() && !other.is_nan() && ((is_zero() && other.is_zero()) || bits() == other.bits());
+ }
+
+ constexpr inline float_t& operator+=(const float_t& rhs)
+ {
+ this->m_data = static_cast<float_t>(static_cast<float>(*this) + static_cast<float>(rhs)).bits();
+ return *this;
+ }
+};
+
+// This should probably be exported so we can use it elsewhere
+#undef BITCAST_CONSTEXPR
+
+namespace float_support
+{
+
+// Pre-C++23 these can't be computed as constexpr, so have to hardcode them
+
+template <int>
+struct digits10; // floor(log10(2) * (digits - 1)
+template <int>
+struct max_digits10; // ceil(log10(2) * digits + 1)
+template <int>
+struct min_exponent10; // floor(log10(2) * min_exponent)
+template <int>
+struct max_exponent10; // floor(log10(2) * max_exponent)
+
+template <>
+struct digits10<8>
+{
+ constexpr static inline int value = 2;
+};
+
+template <>
+struct max_digits10<8>
+{
+ constexpr static inline int value = 4;
+};
+
+template <>
+struct digits10<10>
+{
+ constexpr static inline int value = 2;
+};
+
+template <>
+struct max_digits10<10>
+{
+ constexpr static inline int value = 5;
+};
+
+template <>
+struct digits10<24>
+{
+ constexpr static inline int value = 6;
+};
+
+template <>
+struct max_digits10<24>
+{
+ constexpr static inline int value = 9;
+};
+
+template <>
+struct min_exponent10<-13>
+{
+ constexpr static inline int value = -3;
+};
+
+template <>
+struct max_exponent10<16>
+{
+ constexpr static inline int value = 4;
+};
+
+template <>
+struct min_exponent10<-125>
+{
+ constexpr static inline int value = -37;
+};
+
+template <>
+struct max_exponent10<128>
+{
+ constexpr static inline int value = 38;
+};
+
+template <int d>
+inline constexpr int digits10_v = digits10<d>::value;
+template <int d>
+inline constexpr int max_digits10_v = max_digits10<d>::value;
+
+template <int e>
+inline constexpr int min_exponent10_v = min_exponent10<e>::value;
+
+template <int e>
+inline constexpr int max_exponent10_v = max_exponent10<e>::value;
+
+} // namespace float_support
+
+} // namespace tosa
+
+namespace std
+{
+
+template <typename storage_t, size_t n_exp_bits, bool has_nan, bool has_denorm, bool has_inf>
+struct is_floating_point<tosa::float_t<storage_t, n_exp_bits, has_nan, has_denorm, has_inf>>
+ : std::integral_constant<bool, true>
+{};
+
+template <typename storage_t, size_t n_exp_bits, bool has_nan, bool with_denorm, bool with_inf>
+class numeric_limits<tosa::float_t<storage_t, n_exp_bits, has_nan, with_denorm, with_inf>>
+{
+ using this_float_t = tosa::float_t<storage_t, n_exp_bits, has_nan, with_denorm, with_inf>;
+
+public:
+ static constexpr bool is_specialized = true;
+
+ static constexpr auto min() noexcept
+ {
+ return this_float_t::from_bits(false, 1, 0);
+ }
+
+ static constexpr auto max() noexcept
+ {
+ return this_float_t::from_bits(false, (1 << this_float_t::n_exponent_bits) - 2,
+ (1 << this_float_t::n_significand_bits) - 1);
+ }
+
+ static constexpr auto lowest() noexcept
+ {
+ return -max();
+ }
+
+ static constexpr int digits = this_float_t::n_significand_bits + 1;
+ static constexpr int digits10 = tosa::float_support::digits10_v<digits>;
+ static constexpr int max_digits10 = tosa::float_support::max_digits10_v<digits>;
+
+ static constexpr bool is_signed = true;
+ static constexpr bool is_integer = false;
+ static constexpr bool is_exact = false;
+ static constexpr int radix = 2;
+
+ static constexpr auto epsilon() noexcept
+ {
+ return this_float_t::from_bits(false, this_float_t::exponent_bias - this_float_t::n_significand_bits, 0);
+ }
+
+ static constexpr auto round_error() noexcept
+ {
+ return this_float_t::from_bits(0, this_float_t::exponent_bias - 1, 0);
+ }
+
+ static constexpr int min_exponent = (1 - this_float_t::exponent_bias) + 1;
+ static constexpr int min_exponent10 = tosa::float_support::min_exponent10_v<min_exponent>;
+ static constexpr int max_exponent = this_float_t::exponent_bias + 1;
+ static constexpr int max_exponent10 = tosa::float_support::max_exponent10_v<max_exponent>;
+
+ static constexpr bool has_infinity = with_inf;
+ static constexpr bool has_quiet_NaN = has_nan;
+ static constexpr bool has_signaling_NaN = true;
+ static constexpr float_denorm_style has_denorm = with_denorm ? denorm_present : denorm_absent;
+ static constexpr bool has_denorm_loss = false;
+
+ static constexpr auto infinity() noexcept
+ {
+ if constexpr (with_inf)
+ {
+ return this_float_t::from_bits(false, (1 << this_float_t::n_exponent_bits) - 1, 0);
+ }
+ else
+ {
+ return this_float_t::from_bits(false, 0, 0);
+ }
+ }
+
+ static constexpr auto quiet_NaN() noexcept
+ {
+ return this_float_t::from_bits(false, (1 << this_float_t::n_exponent_bits) - 1,
+ 1 << (this_float_t::n_significand_bits - 1) | 1);
+ }
+
+ static constexpr auto signaling_NaN() noexcept
+ {
+ return this_float_t::from_bits(false, (1 << this_float_t::n_exponent_bits) - 1, 1);
+ }
+
+ static constexpr auto denorm_min() noexcept
+ {
+ return this_float_t::from_bits(false, 0, 1);
+ }
+
+ static constexpr bool is_iec559 = false;
+ static constexpr bool is_bounded = false;
+ static constexpr bool is_modulo = false;
+
+ static constexpr bool traps = false;
+ static constexpr bool tinyness_before = false;
+ static constexpr float_round_style round_style = round_to_nearest;
+};
+
+} // namespace std
+
+#endif // TOSA_FLOAT_UTILS_H_
diff --git a/include/numpy_utils.h b/include/numpy_utils.h
index c64bc17..60cf77e 100644
--- a/include/numpy_utils.h
+++ b/include/numpy_utils.h
@@ -1,5 +1,5 @@
-// Copyright (c) 2020-2021, ARM Limited.
+// Copyright (c) 2020-2023, ARM Limited.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -24,6 +24,8 @@
#include <cstring>
#include <vector>
+#include "half.hpp"
+
class NumpyUtilities
{
public:
@@ -35,31 +37,89 @@ public:
FILE_TYPE_MISMATCH,
HEADER_PARSE_ERROR,
BUFFER_SIZE_MISMATCH,
+ DATA_TYPE_NOT_SUPPORTED,
};
- static NPError readFromNpyFile(const char* filename, const uint32_t elems, float* databuf);
-
- static NPError readFromNpyFile(const char* filename, const uint32_t elems, int32_t* databuf);
-
- static NPError readFromNpyFile(const char* filename, const uint32_t elems, int64_t* databuf);
-
- static NPError readFromNpyFile(const char* filename, const uint32_t elems, bool* databuf);
-
- static NPError writeToNpyFile(const char* filename, const std::vector<int32_t>& shape, const bool* databuf);
-
- static NPError writeToNpyFile(const char* filename, const uint32_t elems, const bool* databuf);
-
- static NPError writeToNpyFile(const char* filename, const std::vector<int32_t>& shape, const int32_t* databuf);
-
- static NPError writeToNpyFile(const char* filename, const uint32_t elems, const int32_t* databuf);
+ template <typename T>
+ static const char* getDTypeString(bool& is_bool)
+ {
+ is_bool = false;
+ if (std::is_same<T, bool>::value)
+ {
+ is_bool = true;
+ return "'|b1'";
+ }
+ if (std::is_same<T, uint8_t>::value)
+ {
+ return "'|u1'";
+ }
+ if (std::is_same<T, int8_t>::value)
+ {
+ return "'|i1'";
+ }
+ if (std::is_same<T, uint16_t>::value)
+ {
+ return "'<u2'";
+ }
+ if (std::is_same<T, int16_t>::value)
+ {
+ return "'<i2'";
+ }
+ if (std::is_same<T, int32_t>::value)
+ {
+ return "'<i4'";
+ }
+ if (std::is_same<T, int64_t>::value)
+ {
+ return "'<i8'";
+ }
+ if (std::is_same<T, float>::value)
+ {
+ return "'<f4'";
+ }
+ if (std::is_same<T, double>::value)
+ {
+ return "'<f8'";
+ }
+ if (std::is_same<T, half_float::half>::value)
+ {
+ return "'<f2'";
+ }
+ assert(false && "unsupported Dtype");
+ };
- static NPError writeToNpyFile(const char* filename, const std::vector<int32_t>& shape, const int64_t* databuf);
+ template <typename T>
+ static NPError writeToNpyFile(const char* filename, const uint32_t elems, const T* databuf)
+ {
+ std::vector<int32_t> shape = { static_cast<int32_t>(elems) };
+ return writeToNpyFile(filename, shape, databuf);
+ }
- static NPError writeToNpyFile(const char* filename, const uint32_t elems, const int64_t* databuf);
+ template <typename T>
+ static NPError writeToNpyFile(const char* filename, const std::vector<int32_t>& shape, const T* databuf)
+ {
+ bool is_bool;
+ const char* dtype_str = getDTypeString<T>(is_bool);
+ return writeToNpyFileCommon(filename, dtype_str, sizeof(T), shape, databuf, is_bool);
+ }
- static NPError writeToNpyFile(const char* filename, const std::vector<int32_t>& shape, const float* databuf);
+ template <typename T>
+ static NPError readFromNpyFile(const char* filename, const uint32_t elems, T* databuf)
+ {
+ bool is_bool;
+ const char* dtype_str = getDTypeString<T>(is_bool);
+ return readFromNpyFileCommon(filename, dtype_str, sizeof(T), elems, databuf, is_bool);
+ }
- static NPError writeToNpyFile(const char* filename, const uint32_t elems, const float* databuf);
+ template <typename D, typename S>
+ static void copyBufferByElement(D* dest_buf, S* src_buf, int num)
+ {
+ static_assert(sizeof(D) >= sizeof(S), "The size of dest_buf must be equal to or larger than that of src_buf");
+ for (int i = 0; i < num; ++i)
+ {
+ dest_buf[i] = src_buf[i];
+ }
+ }
private:
static NPError writeToNpyFileCommon(const char* filename,
@@ -75,7 +135,11 @@ private:
void* databuf,
bool bool_translate);
static NPError checkNpyHeader(FILE* infile, const uint32_t elems, const char* dtype_str);
+ static NPError getHeader(FILE* infile, bool& is_signed, int& bit_length, char& byte_order);
static NPError writeNpyHeader(FILE* outfile, const std::vector<int32_t>& shape, const char* dtype_str);
};
+template <>
+NumpyUtilities::NPError NumpyUtilities::readFromNpyFile(const char* filename, const uint32_t elems, int32_t* databuf);
+
#endif // _TOSA_NUMPY_UTILS_H
diff --git a/include/tosa_generated.h b/include/tosa_generated.h
index 023825d..20f6993 100644
--- a/include/tosa_generated.h
+++ b/include/tosa_generated.h
@@ -6,6 +6,13 @@
#include "flatbuffers/flatbuffers.h"
+// Ensure the included flatbuffers.h is the same version as when this file was
+// generated, otherwise it may not be compatible.
+static_assert(FLATBUFFERS_VERSION_MAJOR == 23 &&
+ FLATBUFFERS_VERSION_MINOR == 5 &&
+ FLATBUFFERS_VERSION_REVISION == 26,
+ "Non-compatible flatbuffers version included");
+
namespace tosa {
struct PoolAttribute;
@@ -23,15 +30,6 @@ struct PadAttributeBuilder;
struct AxisAttribute;
struct AxisAttributeBuilder;
-struct ReshapeAttribute;
-struct ReshapeAttributeBuilder;
-
-struct SliceAttribute;
-struct SliceAttributeBuilder;
-
-struct TileAttribute;
-struct TileAttributeBuilder;
-
struct ResizeAttribute;
struct ResizeAttributeBuilder;
@@ -68,6 +66,15 @@ struct FullyConnectedAttributeBuilder;
struct NegateAttribute;
struct NegateAttributeBuilder;
+struct CustomAttribute;
+struct CustomAttributeBuilder;
+
+struct FFTAttribute;
+struct FFTAttributeBuilder;
+
+struct RFFTAttribute;
+struct RFFTAttributeBuilder;
+
struct Version;
struct VersionBuilder;
@@ -80,10 +87,13 @@ struct TosaOperatorBuilder;
struct TosaBasicBlock;
struct TosaBasicBlockBuilder;
+struct TosaRegion;
+struct TosaRegionBuilder;
+
struct TosaGraph;
struct TosaGraphBuilder;
-enum DType {
+enum DType : uint32_t {
DType_UNKNOWN = 0,
DType_BOOL = 1,
DType_UINT8 = 2,
@@ -92,13 +102,18 @@ enum DType {
DType_INT16 = 5,
DType_INT32 = 6,
DType_INT48 = 7,
- DType_FLOAT = 8,
+ DType_FP32 = 8,
DType_UINT16 = 9,
+ DType_FP16 = 10,
+ DType_BF16 = 11,
+ DType_SHAPE = 12,
+ DType_FP8E4M3 = 13,
+ DType_FP8E5M2 = 14,
DType_MIN = DType_UNKNOWN,
- DType_MAX = DType_UINT16
+ DType_MAX = DType_FP8E5M2
};
-inline const DType (&EnumValuesDType())[10] {
+inline const DType (&EnumValuesDType())[15] {
static const DType values[] = {
DType_UNKNOWN,
DType_BOOL,
@@ -108,14 +123,19 @@ inline const DType (&EnumValuesDType())[10] {
DType_INT16,
DType_INT32,
DType_INT48,
- DType_FLOAT,
- DType_UINT16
+ DType_FP32,
+ DType_UINT16,
+ DType_FP16,
+ DType_BF16,
+ DType_SHAPE,
+ DType_FP8E4M3,
+ DType_FP8E5M2
};
return values;
}
inline const char * const *EnumNamesDType() {
- static const char * const names[11] = {
+ static const char * const names[16] = {
"UNKNOWN",
"BOOL",
"UINT8",
@@ -124,20 +144,25 @@ inline const char * const *EnumNamesDType() {
"INT16",
"INT32",
"INT48",
- "FLOAT",
+ "FP32",
"UINT16",
+ "FP16",
+ "BF16",
+ "SHAPE",
+ "FP8E4M3",
+ "FP8E5M2",
nullptr
};
return names;
}
inline const char *EnumNameDType(DType e) {
- if (flatbuffers::IsOutRange(e, DType_UNKNOWN, DType_UINT16)) return "";
+ if (::flatbuffers::IsOutRange(e, DType_UNKNOWN, DType_FP8E5M2)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesDType()[index];
}
-enum ResizeMode {
+enum ResizeMode : uint32_t {
ResizeMode_UNKNOWN = 0,
ResizeMode_NEAREST = 1,
ResizeMode_BILINEAR = 2,
@@ -165,12 +190,12 @@ inline const char * const *EnumNamesResizeMode() {
}
inline const char *EnumNameResizeMode(ResizeMode e) {
- if (flatbuffers::IsOutRange(e, ResizeMode_UNKNOWN, ResizeMode_BILINEAR)) return "";
+ if (::flatbuffers::IsOutRange(e, ResizeMode_UNKNOWN, ResizeMode_BILINEAR)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesResizeMode()[index];
}
-enum Op {
+enum Op : uint32_t {
Op_UNKNOWN = 0,
Op_ARGMAX = 1,
Op_AVG_POOL2D = 2,
@@ -240,11 +265,23 @@ enum Op {
Op_CUSTOM = 66,
Op_COND_IF = 67,
Op_WHILE_LOOP = 68,
+ Op_FFT2D = 69,
+ Op_RFFT2D = 70,
+ Op_ERF = 71,
+ Op_DIM = 72,
+ Op_CONST_SHAPE = 73,
+ Op_CONCAT_SHAPE = 74,
+ Op_ADD_SHAPE = 75,
+ Op_SUB_SHAPE = 76,
+ Op_MUL_SHAPE = 77,
+ Op_DIV_SHAPE = 78,
+ Op_COS = 79,
+ Op_SIN = 80,
Op_MIN = Op_UNKNOWN,
- Op_MAX = Op_WHILE_LOOP
+ Op_MAX = Op_SIN
};
-inline const Op (&EnumValuesOp())[69] {
+inline const Op (&EnumValuesOp())[81] {
static const Op values[] = {
Op_UNKNOWN,
Op_ARGMAX,
@@ -314,13 +351,25 @@ inline const Op (&EnumValuesOp())[69] {
Op_IDENTITY,
Op_CUSTOM,
Op_COND_IF,
- Op_WHILE_LOOP
+ Op_WHILE_LOOP,
+ Op_FFT2D,
+ Op_RFFT2D,
+ Op_ERF,
+ Op_DIM,
+ Op_CONST_SHAPE,
+ Op_CONCAT_SHAPE,
+ Op_ADD_SHAPE,
+ Op_SUB_SHAPE,
+ Op_MUL_SHAPE,
+ Op_DIV_SHAPE,
+ Op_COS,
+ Op_SIN
};
return values;
}
inline const char * const *EnumNamesOp() {
- static const char * const names[70] = {
+ static const char * const names[82] = {
"UNKNOWN",
"ARGMAX",
"AVG_POOL2D",
@@ -390,41 +439,53 @@ inline const char * const *EnumNamesOp() {
"CUSTOM",
"COND_IF",
"WHILE_LOOP",
+ "FFT2D",
+ "RFFT2D",
+ "ERF",
+ "DIM",
+ "CONST_SHAPE",
+ "CONCAT_SHAPE",
+ "ADD_SHAPE",
+ "SUB_SHAPE",
+ "MUL_SHAPE",
+ "DIV_SHAPE",
+ "COS",
+ "SIN",
nullptr
};
return names;
}
inline const char *EnumNameOp(Op e) {
- if (flatbuffers::IsOutRange(e, Op_UNKNOWN, Op_WHILE_LOOP)) return "";
+ if (::flatbuffers::IsOutRange(e, Op_UNKNOWN, Op_SIN)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesOp()[index];
}
-enum Attribute {
+enum Attribute : uint8_t {
Attribute_NONE = 0,
Attribute_PoolAttribute = 1,
Attribute_ConvAttribute = 2,
Attribute_TransposeConvAttribute = 3,
Attribute_PadAttribute = 4,
Attribute_AxisAttribute = 5,
- Attribute_ReshapeAttribute = 6,
- Attribute_SliceAttribute = 7,
- Attribute_TileAttribute = 8,
- Attribute_ResizeAttribute = 9,
- Attribute_ClampAttribute = 10,
- Attribute_RescaleAttribute = 11,
- Attribute_MulAttribute = 12,
- Attribute_ArithmeticRightShiftAttribute = 13,
- Attribute_CondIfAttribute = 14,
- Attribute_WhileLoopAttribute = 15,
- Attribute_TransposeAttribute = 16,
- Attribute_TableAttribute = 17,
- Attribute_MatMulAttribute = 18,
- Attribute_FullyConnectedAttribute = 19,
- Attribute_NegateAttribute = 20,
+ Attribute_ResizeAttribute = 6,
+ Attribute_ClampAttribute = 7,
+ Attribute_RescaleAttribute = 8,
+ Attribute_MulAttribute = 9,
+ Attribute_ArithmeticRightShiftAttribute = 10,
+ Attribute_CondIfAttribute = 11,
+ Attribute_WhileLoopAttribute = 12,
+ Attribute_TransposeAttribute = 13,
+ Attribute_TableAttribute = 14,
+ Attribute_MatMulAttribute = 15,
+ Attribute_FullyConnectedAttribute = 16,
+ Attribute_NegateAttribute = 17,
+ Attribute_CustomAttribute = 18,
+ Attribute_FFTAttribute = 19,
+ Attribute_RFFTAttribute = 20,
Attribute_MIN = Attribute_NONE,
- Attribute_MAX = Attribute_NegateAttribute
+ Attribute_MAX = Attribute_RFFTAttribute
};
inline const Attribute (&EnumValuesAttribute())[21] {
@@ -435,9 +496,6 @@ inline const Attribute (&EnumValuesAttribute())[21] {
Attribute_TransposeConvAttribute,
Attribute_PadAttribute,
Attribute_AxisAttribute,
- Attribute_ReshapeAttribute,
- Attribute_SliceAttribute,
- Attribute_TileAttribute,
Attribute_ResizeAttribute,
Attribute_ClampAttribute,
Attribute_RescaleAttribute,
@@ -449,7 +507,10 @@ inline const Attribute (&EnumValuesAttribute())[21] {
Attribute_TableAttribute,
Attribute_MatMulAttribute,
Attribute_FullyConnectedAttribute,
- Attribute_NegateAttribute
+ Attribute_NegateAttribute,
+ Attribute_CustomAttribute,
+ Attribute_FFTAttribute,
+ Attribute_RFFTAttribute
};
return values;
}
@@ -462,9 +523,6 @@ inline const char * const *EnumNamesAttribute() {
"TransposeConvAttribute",
"PadAttribute",
"AxisAttribute",
- "ReshapeAttribute",
- "SliceAttribute",
- "TileAttribute",
"ResizeAttribute",
"ClampAttribute",
"RescaleAttribute",
@@ -477,13 +535,16 @@ inline const char * const *EnumNamesAttribute() {
"MatMulAttribute",
"FullyConnectedAttribute",
"NegateAttribute",
+ "CustomAttribute",
+ "FFTAttribute",
+ "RFFTAttribute",
nullptr
};
return names;
}
inline const char *EnumNameAttribute(Attribute e) {
- if (flatbuffers::IsOutRange(e, Attribute_NONE, Attribute_NegateAttribute)) return "";
+ if (::flatbuffers::IsOutRange(e, Attribute_NONE, Attribute_RFFTAttribute)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesAttribute()[index];
}
@@ -512,18 +573,6 @@ template<> struct AttributeTraits<tosa::AxisAttribute> {
static const Attribute enum_value = Attribute_AxisAttribute;
};
-template<> struct AttributeTraits<tosa::ReshapeAttribute> {
- static const Attribute enum_value = Attribute_ReshapeAttribute;
-};
-
-template<> struct AttributeTraits<tosa::SliceAttribute> {
- static const Attribute enum_value = Attribute_SliceAttribute;
-};
-
-template<> struct AttributeTraits<tosa::TileAttribute> {
- static const Attribute enum_value = Attribute_TileAttribute;
-};
-
template<> struct AttributeTraits<tosa::ResizeAttribute> {
static const Attribute enum_value = Attribute_ResizeAttribute;
};
@@ -572,26 +621,39 @@ template<> struct AttributeTraits<tosa::NegateAttribute> {
static const Attribute enum_value = Attribute_NegateAttribute;
};
-bool VerifyAttribute(flatbuffers::Verifier &verifier, const void *obj, Attribute type);
-bool VerifyAttributeVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types);
+template<> struct AttributeTraits<tosa::CustomAttribute> {
+ static const Attribute enum_value = Attribute_CustomAttribute;
+};
+
+template<> struct AttributeTraits<tosa::FFTAttribute> {
+ static const Attribute enum_value = Attribute_FFTAttribute;
+};
+
+template<> struct AttributeTraits<tosa::RFFTAttribute> {
+ static const Attribute enum_value = Attribute_RFFTAttribute;
+};
+
+bool VerifyAttribute(::flatbuffers::Verifier &verifier, const void *obj, Attribute type);
+bool VerifyAttributeVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<uint8_t> *types);
-struct PoolAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct PoolAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef PoolAttributeBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_PAD = 4,
VT_KERNEL = 6,
VT_STRIDE = 8,
VT_INPUT_ZP = 10,
- VT_OUTPUT_ZP = 12
+ VT_OUTPUT_ZP = 12,
+ VT_ACC_TYPE = 14
};
- const flatbuffers::Vector<int32_t> *pad() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_PAD);
+ const ::flatbuffers::Vector<int32_t> *pad() const {
+ return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_PAD);
}
- const flatbuffers::Vector<int32_t> *kernel() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_KERNEL);
+ const ::flatbuffers::Vector<int32_t> *kernel() const {
+ return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_KERNEL);
}
- const flatbuffers::Vector<int32_t> *stride() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_STRIDE);
+ const ::flatbuffers::Vector<int32_t> *stride() const {
+ return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_STRIDE);
}
int32_t input_zp() const {
return GetField<int32_t>(VT_INPUT_ZP, 0);
@@ -599,7 +661,10 @@ struct PoolAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
int32_t output_zp() const {
return GetField<int32_t>(VT_OUTPUT_ZP, 0);
}
- bool Verify(flatbuffers::Verifier &verifier) const {
+ tosa::DType acc_type() const {
+ return static_cast<tosa::DType>(GetField<uint32_t>(VT_ACC_TYPE, 0));
+ }
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_PAD) &&
verifier.VerifyVector(pad()) &&
@@ -607,23 +672,24 @@ struct PoolAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
verifier.VerifyVector(kernel()) &&
VerifyOffset(verifier, VT_STRIDE) &&
verifier.VerifyVector(stride()) &&
- VerifyField<int32_t>(verifier, VT_INPUT_ZP) &&
- VerifyField<int32_t>(verifier, VT_OUTPUT_ZP) &&
+ VerifyField<int32_t>(verifier, VT_INPUT_ZP, 4) &&
+ VerifyField<int32_t>(verifier, VT_OUTPUT_ZP, 4) &&
+ VerifyField<uint32_t>(verifier, VT_ACC_TYPE, 4) &&
verifier.EndTable();
}
};
struct PoolAttributeBuilder {
typedef PoolAttribute Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_pad(flatbuffers::Offset<flatbuffers::Vector<int32_t>> pad) {
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
+ void add_pad(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> pad) {
fbb_.AddOffset(PoolAttribute::VT_PAD, pad);
}
- void add_kernel(flatbuffers::Offset<flatbuffers::Vector<int32_t>> kernel) {
+ void add_kernel(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> kernel) {
fbb_.AddOffset(PoolAttribute::VT_KERNEL, kernel);
}
- void add_stride(flatbuffers::Offset<flatbuffers::Vector<int32_t>> stride) {
+ void add_stride(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride) {
fbb_.AddOffset(PoolAttribute::VT_STRIDE, stride);
}
void add_input_zp(int32_t input_zp) {
@@ -632,26 +698,30 @@ struct PoolAttributeBuilder {
void add_output_zp(int32_t output_zp) {
fbb_.AddElement<int32_t>(PoolAttribute::VT_OUTPUT_ZP, output_zp, 0);
}
- explicit PoolAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ void add_acc_type(tosa::DType acc_type) {
+ fbb_.AddElement<uint32_t>(PoolAttribute::VT_ACC_TYPE, static_cast<uint32_t>(acc_type), 0);
+ }
+ explicit PoolAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- PoolAttributeBuilder &operator=(const PoolAttributeBuilder &);
- flatbuffers::Offset<PoolAttribute> Finish() {
+ ::flatbuffers::Offset<PoolAttribute> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<PoolAttribute>(end);
+ auto o = ::flatbuffers::Offset<PoolAttribute>(end);
return o;
}
};
-inline flatbuffers::Offset<PoolAttribute> CreatePoolAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> pad = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> kernel = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> stride = 0,
+inline ::flatbuffers::Offset<PoolAttribute> CreatePoolAttribute(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> pad = 0,
+ ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> kernel = 0,
+ ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride = 0,
int32_t input_zp = 0,
- int32_t output_zp = 0) {
+ int32_t output_zp = 0,
+ tosa::DType acc_type = tosa::DType_UNKNOWN) {
PoolAttributeBuilder builder_(_fbb);
+ builder_.add_acc_type(acc_type);
builder_.add_output_zp(output_zp);
builder_.add_input_zp(input_zp);
builder_.add_stride(stride);
@@ -660,13 +730,14 @@ inline flatbuffers::Offset<PoolAttribute> CreatePoolAttribute(
return builder_.Finish();
}
-inline flatbuffers::Offset<PoolAttribute> CreatePoolAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<PoolAttribute> CreatePoolAttributeDirect(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
const std::vector<int32_t> *pad = nullptr,
const std::vector<int32_t> *kernel = nullptr,
const std::vector<int32_t> *stride = nullptr,
int32_t input_zp = 0,
- int32_t output_zp = 0) {
+ int32_t output_zp = 0,
+ tosa::DType acc_type = tosa::DType_UNKNOWN) {
auto pad__ = pad ? _fbb.CreateVector<int32_t>(*pad) : 0;
auto kernel__ = kernel ? _fbb.CreateVector<int32_t>(*kernel) : 0;
auto stride__ = stride ? _fbb.CreateVector<int32_t>(*stride) : 0;
@@ -676,26 +747,29 @@ inline flatbuffers::Offset<PoolAttribute> CreatePoolAttributeDirect(
kernel__,
stride__,
input_zp,
- output_zp);
+ output_zp,
+ acc_type);
}
-struct ConvAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct ConvAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef ConvAttributeBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_PAD = 4,
VT_STRIDE = 6,
VT_DILATION = 8,
VT_INPUT_ZP = 10,
- VT_WEIGHT_ZP = 12
+ VT_WEIGHT_ZP = 12,
+ VT_LOCAL_BOUND = 14,
+ VT_ACC_TYPE = 16
};
- const flatbuffers::Vector<int32_t> *pad() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_PAD);
+ const ::flatbuffers::Vector<int32_t> *pad() const {
+ return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_PAD);
}
- const flatbuffers::Vector<int32_t> *stride() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_STRIDE);
+ const ::flatbuffers::Vector<int32_t> *stride() const {
+ return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_STRIDE);
}
- const flatbuffers::Vector<int32_t> *dilation() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_DILATION);
+ const ::flatbuffers::Vector<int32_t> *dilation() const {
+ return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_DILATION);
}
int32_t input_zp() const {
return GetField<int32_t>(VT_INPUT_ZP, 0);
@@ -703,7 +777,13 @@ struct ConvAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
int32_t weight_zp() const {
return GetField<int32_t>(VT_WEIGHT_ZP, 0);
}
- bool Verify(flatbuffers::Verifier &verifier) const {
+ bool local_bound() const {
+ return GetField<uint8_t>(VT_LOCAL_BOUND, 0) != 0;
+ }
+ tosa::DType acc_type() const {
+ return static_cast<tosa::DType>(GetField<uint32_t>(VT_ACC_TYPE, 0));
+ }
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_PAD) &&
verifier.VerifyVector(pad()) &&
@@ -711,23 +791,25 @@ struct ConvAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
verifier.VerifyVector(stride()) &&
VerifyOffset(verifier, VT_DILATION) &&
verifier.VerifyVector(dilation()) &&
- VerifyField<int32_t>(verifier, VT_INPUT_ZP) &&
- VerifyField<int32_t>(verifier, VT_WEIGHT_ZP) &&
+ VerifyField<int32_t>(verifier, VT_INPUT_ZP, 4) &&
+ VerifyField<int32_t>(verifier, VT_WEIGHT_ZP, 4) &&
+ VerifyField<uint8_t>(verifier, VT_LOCAL_BOUND, 1) &&
+ VerifyField<uint32_t>(verifier, VT_ACC_TYPE, 4) &&
verifier.EndTable();
}
};
struct ConvAttributeBuilder {
typedef ConvAttribute Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_pad(flatbuffers::Offset<flatbuffers::Vector<int32_t>> pad) {
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
+ void add_pad(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> pad) {
fbb_.AddOffset(ConvAttribute::VT_PAD, pad);
}
- void add_stride(flatbuffers::Offset<flatbuffers::Vector<int32_t>> stride) {
+ void add_stride(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride) {
fbb_.AddOffset(ConvAttribute::VT_STRIDE, stride);
}
- void add_dilation(flatbuffers::Offset<flatbuffers::Vector<int32_t>> dilation) {
+ void add_dilation(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> dilation) {
fbb_.AddOffset(ConvAttribute::VT_DILATION, dilation);
}
void add_input_zp(int32_t input_zp) {
@@ -736,41 +818,52 @@ struct ConvAttributeBuilder {
void add_weight_zp(int32_t weight_zp) {
fbb_.AddElement<int32_t>(ConvAttribute::VT_WEIGHT_ZP, weight_zp, 0);
}
- explicit ConvAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ void add_local_bound(bool local_bound) {
+ fbb_.AddElement<uint8_t>(ConvAttribute::VT_LOCAL_BOUND, static_cast<uint8_t>(local_bound), 0);
+ }
+ void add_acc_type(tosa::DType acc_type) {
+ fbb_.AddElement<uint32_t>(ConvAttribute::VT_ACC_TYPE, static_cast<uint32_t>(acc_type), 0);
+ }
+ explicit ConvAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- ConvAttributeBuilder &operator=(const ConvAttributeBuilder &);
- flatbuffers::Offset<ConvAttribute> Finish() {
+ ::flatbuffers::Offset<ConvAttribute> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ConvAttribute>(end);
+ auto o = ::flatbuffers::Offset<ConvAttribute>(end);
return o;
}
};
-inline flatbuffers::Offset<ConvAttribute> CreateConvAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> pad = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> stride = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> dilation = 0,
+inline ::flatbuffers::Offset<ConvAttribute> CreateConvAttribute(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> pad = 0,
+ ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride = 0,
+ ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> dilation = 0,
int32_t input_zp = 0,
- int32_t weight_zp = 0) {
+ int32_t weight_zp = 0,
+ bool local_bound = false,
+ tosa::DType acc_type = tosa::DType_UNKNOWN) {
ConvAttributeBuilder builder_(_fbb);
+ builder_.add_acc_type(acc_type);
builder_.add_weight_zp(weight_zp);
builder_.add_input_zp(input_zp);
builder_.add_dilation(dilation);
builder_.add_stride(stride);
builder_.add_pad(pad);
+ builder_.add_local_bound(local_bound);
return builder_.Finish();
}
-inline flatbuffers::Offset<ConvAttribute> CreateConvAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<ConvAttribute> CreateConvAttributeDirect(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
const std::vector<int32_t> *pad = nullptr,
const std::vector<int32_t> *stride = nullptr,
const std::vector<int32_t> *dilation = nullptr,
int32_t input_zp = 0,
- int32_t weight_zp = 0) {
+ int32_t weight_zp = 0,
+ bool local_bound = false,
+ tosa::DType acc_type = tosa::DType_UNKNOWN) {
auto pad__ = pad ? _fbb.CreateVector<int32_t>(*pad) : 0;
auto stride__ = stride ? _fbb.CreateVector<int32_t>(*stride) : 0;
auto dilation__ = dilation ? _fbb.CreateVector<int32_t>(*dilation) : 0;
@@ -780,26 +873,30 @@ inline flatbuffers::Offset<ConvAttribute> CreateConvAttributeDirect(
stride__,
dilation__,
input_zp,
- weight_zp);
+ weight_zp,
+ local_bound,
+ acc_type);
}
-struct TransposeConvAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct TransposeConvAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef TransposeConvAttributeBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_OUT_PAD = 4,
VT_STRIDE = 6,
VT_OUTPUT_SHAPE = 8,
VT_INPUT_ZP = 10,
- VT_WEIGHT_ZP = 12
+ VT_WEIGHT_ZP = 12,
+ VT_LOCAL_BOUND = 14,
+ VT_ACC_TYPE = 16
};
- const flatbuffers::Vector<int32_t> *out_pad() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUT_PAD);
+ const ::flatbuffers::Vector<int32_t> *out_pad() const {
+ return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_OUT_PAD);
}
- const flatbuffers::Vector<int32_t> *stride() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_STRIDE);
+ const ::flatbuffers::Vector<int32_t> *stride() const {
+ return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_STRIDE);
}
- const flatbuffers::Vector<int32_t> *output_shape() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUT_SHAPE);
+ const ::flatbuffers::Vector<int32_t> *output_shape() const {
+ return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_OUTPUT_SHAPE);
}
int32_t input_zp() const {
return GetField<int32_t>(VT_INPUT_ZP, 0);
@@ -807,7 +904,13 @@ struct TransposeConvAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Tab
int32_t weight_zp() const {
return GetField<int32_t>(VT_WEIGHT_ZP, 0);
}
- bool Verify(flatbuffers::Verifier &verifier) const {
+ bool local_bound() const {
+ return GetField<uint8_t>(VT_LOCAL_BOUND, 0) != 0;
+ }
+ tosa::DType acc_type() const {
+ return static_cast<tosa::DType>(GetField<uint32_t>(VT_ACC_TYPE, 0));
+ }
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_OUT_PAD) &&
verifier.VerifyVector(out_pad()) &&
@@ -815,23 +918,25 @@ struct TransposeConvAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Tab
verifier.VerifyVector(stride()) &&
VerifyOffset(verifier, VT_OUTPUT_SHAPE) &&
verifier.VerifyVector(output_shape()) &&
- VerifyField<int32_t>(verifier, VT_INPUT_ZP) &&
- VerifyField<int32_t>(verifier, VT_WEIGHT_ZP) &&
+ VerifyField<int32_t>(verifier, VT_INPUT_ZP, 4) &&
+ VerifyField<int32_t>(verifier, VT_WEIGHT_ZP, 4) &&
+ VerifyField<uint8_t>(verifier, VT_LOCAL_BOUND, 1) &&
+ VerifyField<uint32_t>(verifier, VT_ACC_TYPE, 4) &&
verifier.EndTable();
}
};
struct TransposeConvAttributeBuilder {
typedef TransposeConvAttribute Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_out_pad(flatbuffers::Offset<flatbuffers::Vector<int32_t>> out_pad) {
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
+ void add_out_pad(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> out_pad) {
fbb_.AddOffset(TransposeConvAttribute::VT_OUT_PAD, out_pad);
}
- void add_stride(flatbuffers::Offset<flatbuffers::Vector<int32_t>> stride) {
+ void add_stride(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride) {
fbb_.AddOffset(TransposeConvAttribute::VT_STRIDE, stride);
}
- void add_output_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> output_shape) {
+ void add_output_shape(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> output_shape) {
fbb_.AddOffset(TransposeConvAttribute::VT_OUTPUT_SHAPE, output_shape);
}
void add_input_zp(int32_t input_zp) {
@@ -840,41 +945,52 @@ struct TransposeConvAttributeBuilder {
void add_weight_zp(int32_t weight_zp) {
fbb_.AddElement<int32_t>(TransposeConvAttribute::VT_WEIGHT_ZP, weight_zp, 0);
}
- explicit TransposeConvAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ void add_local_bound(bool local_bound) {
+ fbb_.AddElement<uint8_t>(TransposeConvAttribute::VT_LOCAL_BOUND, static_cast<uint8_t>(local_bound), 0);
+ }
+ void add_acc_type(tosa::DType acc_type) {
+ fbb_.AddElement<uint32_t>(TransposeConvAttribute::VT_ACC_TYPE, static_cast<uint32_t>(acc_type), 0);
+ }
+ explicit TransposeConvAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- TransposeConvAttributeBuilder &operator=(const TransposeConvAttributeBuilder &);
- flatbuffers::Offset<TransposeConvAttribute> Finish() {
+ ::flatbuffers::Offset<TransposeConvAttribute> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TransposeConvAttribute>(end);
+ auto o = ::flatbuffers::Offset<TransposeConvAttribute>(end);
return o;
}
};
-inline flatbuffers::Offset<TransposeConvAttribute> CreateTransposeConvAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> out_pad = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> stride = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> output_shape = 0,
+inline ::flatbuffers::Offset<TransposeConvAttribute> CreateTransposeConvAttribute(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> out_pad = 0,
+ ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> stride = 0,
+ ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> output_shape = 0,
int32_t input_zp = 0,
- int32_t weight_zp = 0) {
+ int32_t weight_zp = 0,
+ bool local_bound = false,
+ tosa::DType acc_type = tosa::DType_UNKNOWN) {
TransposeConvAttributeBuilder builder_(_fbb);
+ builder_.add_acc_type(acc_type);
builder_.add_weight_zp(weight_zp);
builder_.add_input_zp(input_zp);
builder_.add_output_shape(output_shape);
builder_.add_stride(stride);
builder_.add_out_pad(out_pad);
+ builder_.add_local_bound(local_bound);
return builder_.Finish();
}
-inline flatbuffers::Offset<TransposeConvAttribute> CreateTransposeConvAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<TransposeConvAttribute> CreateTransposeConvAttributeDirect(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
const std::vector<int32_t> *out_pad = nullptr,
const std::vector<int32_t> *stride = nullptr,
const std::vector<int32_t> *output_shape = nullptr,
int32_t input_zp = 0,
- int32_t weight_zp = 0) {
+ int32_t weight_zp = 0,
+ bool local_bound = false,
+ tosa::DType acc_type = tosa::DType_UNKNOWN) {
auto out_pad__ = out_pad ? _fbb.CreateVector<int32_t>(*out_pad) : 0;
auto stride__ = stride ? _fbb.CreateVector<int32_t>(*stride) : 0;
auto output_shape__ = output_shape ? _fbb.CreateVector<int32_t>(*output_shape) : 0;
@@ -884,86 +1000,64 @@ inline flatbuffers::Offset<TransposeConvAttribute> CreateTransposeConvAttributeD
stride__,
output_shape__,
input_zp,
- weight_zp);
+ weight_zp,
+ local_bound,
+ acc_type);
}
-struct PadAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct PadAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef PadAttributeBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_PADDING = 4,
- VT_PAD_CONST_INT = 6,
- VT_PAD_CONST_FP = 8
+ VT_PAD_CONST = 4
};
- const flatbuffers::Vector<int32_t> *padding() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_PADDING);
+ const ::flatbuffers::Vector<uint8_t> *pad_const() const {
+ return GetPointer<const ::flatbuffers::Vector<uint8_t> *>(VT_PAD_CONST);
}
- int32_t pad_const_int() const {
- return GetField<int32_t>(VT_PAD_CONST_INT, 0);
- }
- float pad_const_fp() const {
- return GetField<float>(VT_PAD_CONST_FP, 0.0f);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_PADDING) &&
- verifier.VerifyVector(padding()) &&
- VerifyField<int32_t>(verifier, VT_PAD_CONST_INT) &&
- VerifyField<float>(verifier, VT_PAD_CONST_FP) &&
+ VerifyOffset(verifier, VT_PAD_CONST) &&
+ verifier.VerifyVector(pad_const()) &&
verifier.EndTable();
}
};
struct PadAttributeBuilder {
typedef PadAttribute Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_padding(flatbuffers::Offset<flatbuffers::Vector<int32_t>> padding) {
- fbb_.AddOffset(PadAttribute::VT_PADDING, padding);
- }
- void add_pad_const_int(int32_t pad_const_int) {
- fbb_.AddElement<int32_t>(PadAttribute::VT_PAD_CONST_INT, pad_const_int, 0);
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
+ void add_pad_const(::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> pad_const) {
+ fbb_.AddOffset(PadAttribute::VT_PAD_CONST, pad_const);
}
- void add_pad_const_fp(float pad_const_fp) {
- fbb_.AddElement<float>(PadAttribute::VT_PAD_CONST_FP, pad_const_fp, 0.0f);
- }
- explicit PadAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit PadAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- PadAttributeBuilder &operator=(const PadAttributeBuilder &);
- flatbuffers::Offset<PadAttribute> Finish() {
+ ::flatbuffers::Offset<PadAttribute> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<PadAttribute>(end);
+ auto o = ::flatbuffers::Offset<PadAttribute>(end);
return o;
}
};
-inline flatbuffers::Offset<PadAttribute> CreatePadAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> padding = 0,
- int32_t pad_const_int = 0,
- float pad_const_fp = 0.0f) {
+inline ::flatbuffers::Offset<PadAttribute> CreatePadAttribute(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ ::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> pad_const = 0) {
PadAttributeBuilder builder_(_fbb);
- builder_.add_pad_const_fp(pad_const_fp);
- builder_.add_pad_const_int(pad_const_int);
- builder_.add_padding(padding);
+ builder_.add_pad_const(pad_const);
return builder_.Finish();
}
-inline flatbuffers::Offset<PadAttribute> CreatePadAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *padding = nullptr,
- int32_t pad_const_int = 0,
- float pad_const_fp = 0.0f) {
- auto padding__ = padding ? _fbb.CreateVector<int32_t>(*padding) : 0;
+inline ::flatbuffers::Offset<PadAttribute> CreatePadAttributeDirect(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ const std::vector<uint8_t> *pad_const = nullptr) {
+ if (pad_const) { _fbb.ForceVectorAlignment(pad_const->size(), sizeof(uint8_t), 8); }
+ auto pad_const__ = pad_const ? _fbb.CreateVector<uint8_t>(*pad_const) : 0;
return tosa::CreatePadAttribute(
_fbb,
- padding__,
- pad_const_int,
- pad_const_fp);
+ pad_const__);
}
-struct AxisAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct AxisAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef AxisAttributeBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_AXIS = 4
@@ -971,424 +1065,207 @@ struct AxisAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
int32_t axis() const {
return GetField<int32_t>(VT_AXIS, 0);
}
- bool Verify(flatbuffers::Verifier &verifier) const {
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
- VerifyField<int32_t>(verifier, VT_AXIS) &&
+ VerifyField<int32_t>(verifier, VT_AXIS, 4) &&
verifier.EndTable();
}
};
struct AxisAttributeBuilder {
typedef AxisAttribute Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
void add_axis(int32_t axis) {
fbb_.AddElement<int32_t>(AxisAttribute::VT_AXIS, axis, 0);
}
- explicit AxisAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit AxisAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- AxisAttributeBuilder &operator=(const AxisAttributeBuilder &);
- flatbuffers::Offset<AxisAttribute> Finish() {
+ ::flatbuffers::Offset<AxisAttribute> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<AxisAttribute>(end);
+ auto o = ::flatbuffers::Offset<AxisAttribute>(end);
return o;
}
};
-inline flatbuffers::Offset<AxisAttribute> CreateAxisAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<AxisAttribute> CreateAxisAttribute(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
int32_t axis = 0) {
AxisAttributeBuilder builder_(_fbb);
builder_.add_axis(axis);
return builder_.Finish();
}
-struct ReshapeAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- typedef ReshapeAttributeBuilder Builder;
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_NEW_SHAPE = 4
- };
- const flatbuffers::Vector<int32_t> *new_shape() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_NEW_SHAPE);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_NEW_SHAPE) &&
- verifier.VerifyVector(new_shape()) &&
- verifier.EndTable();
- }
-};
-
-struct ReshapeAttributeBuilder {
- typedef ReshapeAttribute Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_new_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> new_shape) {
- fbb_.AddOffset(ReshapeAttribute::VT_NEW_SHAPE, new_shape);
- }
- explicit ReshapeAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- ReshapeAttributeBuilder &operator=(const ReshapeAttributeBuilder &);
- flatbuffers::Offset<ReshapeAttribute> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ReshapeAttribute>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<ReshapeAttribute> CreateReshapeAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> new_shape = 0) {
- ReshapeAttributeBuilder builder_(_fbb);
- builder_.add_new_shape(new_shape);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<ReshapeAttribute> CreateReshapeAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *new_shape = nullptr) {
- auto new_shape__ = new_shape ? _fbb.CreateVector<int32_t>(*new_shape) : 0;
- return tosa::CreateReshapeAttribute(
- _fbb,
- new_shape__);
-}
-
-struct SliceAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- typedef SliceAttributeBuilder Builder;
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_START = 4,
- VT_SIZE = 6
- };
- const flatbuffers::Vector<int32_t> *start() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_START);
- }
- const flatbuffers::Vector<int32_t> *size() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SIZE);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_START) &&
- verifier.VerifyVector(start()) &&
- VerifyOffset(verifier, VT_SIZE) &&
- verifier.VerifyVector(size()) &&
- verifier.EndTable();
- }
-};
-
-struct SliceAttributeBuilder {
- typedef SliceAttribute Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_start(flatbuffers::Offset<flatbuffers::Vector<int32_t>> start) {
- fbb_.AddOffset(SliceAttribute::VT_START, start);
- }
- void add_size(flatbuffers::Offset<flatbuffers::Vector<int32_t>> size) {
- fbb_.AddOffset(SliceAttribute::VT_SIZE, size);
- }
- explicit SliceAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- SliceAttributeBuilder &operator=(const SliceAttributeBuilder &);
- flatbuffers::Offset<SliceAttribute> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<SliceAttribute>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<SliceAttribute> CreateSliceAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> start = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> size = 0) {
- SliceAttributeBuilder builder_(_fbb);
- builder_.add_size(size);
- builder_.add_start(start);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<SliceAttribute> CreateSliceAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *start = nullptr,
- const std::vector<int32_t> *size = nullptr) {
- auto start__ = start ? _fbb.CreateVector<int32_t>(*start) : 0;
- auto size__ = size ? _fbb.CreateVector<int32_t>(*size) : 0;
- return tosa::CreateSliceAttribute(
- _fbb,
- start__,
- size__);
-}
-
-struct TileAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
- typedef TileAttributeBuilder Builder;
- enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_MULTIPLES = 4
- };
- const flatbuffers::Vector<int32_t> *multiples() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_MULTIPLES);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
- return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_MULTIPLES) &&
- verifier.VerifyVector(multiples()) &&
- verifier.EndTable();
- }
-};
-
-struct TileAttributeBuilder {
- typedef TileAttribute Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_multiples(flatbuffers::Offset<flatbuffers::Vector<int32_t>> multiples) {
- fbb_.AddOffset(TileAttribute::VT_MULTIPLES, multiples);
- }
- explicit TileAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
- : fbb_(_fbb) {
- start_ = fbb_.StartTable();
- }
- TileAttributeBuilder &operator=(const TileAttributeBuilder &);
- flatbuffers::Offset<TileAttribute> Finish() {
- const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TileAttribute>(end);
- return o;
- }
-};
-
-inline flatbuffers::Offset<TileAttribute> CreateTileAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> multiples = 0) {
- TileAttributeBuilder builder_(_fbb);
- builder_.add_multiples(multiples);
- return builder_.Finish();
-}
-
-inline flatbuffers::Offset<TileAttribute> CreateTileAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *multiples = nullptr) {
- auto multiples__ = multiples ? _fbb.CreateVector<int32_t>(*multiples) : 0;
- return tosa::CreateTileAttribute(
- _fbb,
- multiples__);
-}
-
-struct ResizeAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct ResizeAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef ResizeAttributeBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_OUTPUT_SIZE = 4,
- VT_STRIDE = 6,
- VT_OFFSET = 8,
- VT_SHIFT = 10,
- VT_STRIDE_FP = 12,
- VT_OFFSET_FP = 14,
- VT_MODE = 16
+ VT_SCALE = 4,
+ VT_OFFSET = 6,
+ VT_BORDER = 8,
+ VT_MODE = 10
};
- const flatbuffers::Vector<int32_t> *output_size() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUT_SIZE);
+ const ::flatbuffers::Vector<int16_t> *scale() const {
+ return GetPointer<const ::flatbuffers::Vector<int16_t> *>(VT_SCALE);
}
- const flatbuffers::Vector<int32_t> *stride() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_STRIDE);
+ const ::flatbuffers::Vector<int16_t> *offset() const {
+ return GetPointer<const ::flatbuffers::Vector<int16_t> *>(VT_OFFSET);
}
- const flatbuffers::Vector<int32_t> *offset() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OFFSET);
- }
- int32_t shift() const {
- return GetField<int32_t>(VT_SHIFT, 0);
- }
- const flatbuffers::Vector<float> *stride_fp() const {
- return GetPointer<const flatbuffers::Vector<float> *>(VT_STRIDE_FP);
- }
- const flatbuffers::Vector<float> *offset_fp() const {
- return GetPointer<const flatbuffers::Vector<float> *>(VT_OFFSET_FP);
+ const ::flatbuffers::Vector<int16_t> *border() const {
+ return GetPointer<const ::flatbuffers::Vector<int16_t> *>(VT_BORDER);
}
tosa::ResizeMode mode() const {
return static_cast<tosa::ResizeMode>(GetField<uint32_t>(VT_MODE, 0));
}
- bool Verify(flatbuffers::Verifier &verifier) const {
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_OUTPUT_SIZE) &&
- verifier.VerifyVector(output_size()) &&
- VerifyOffset(verifier, VT_STRIDE) &&
- verifier.VerifyVector(stride()) &&
+ VerifyOffset(verifier, VT_SCALE) &&
+ verifier.VerifyVector(scale()) &&
VerifyOffset(verifier, VT_OFFSET) &&
verifier.VerifyVector(offset()) &&
- VerifyField<int32_t>(verifier, VT_SHIFT) &&
- VerifyOffset(verifier, VT_STRIDE_FP) &&
- verifier.VerifyVector(stride_fp()) &&
- VerifyOffset(verifier, VT_OFFSET_FP) &&
- verifier.VerifyVector(offset_fp()) &&
- VerifyField<uint32_t>(verifier, VT_MODE) &&
+ VerifyOffset(verifier, VT_BORDER) &&
+ verifier.VerifyVector(border()) &&
+ VerifyField<uint32_t>(verifier, VT_MODE, 4) &&
verifier.EndTable();
}
};
struct ResizeAttributeBuilder {
typedef ResizeAttribute Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_output_size(flatbuffers::Offset<flatbuffers::Vector<int32_t>> output_size) {
- fbb_.AddOffset(ResizeAttribute::VT_OUTPUT_SIZE, output_size);
- }
- void add_stride(flatbuffers::Offset<flatbuffers::Vector<int32_t>> stride) {
- fbb_.AddOffset(ResizeAttribute::VT_STRIDE, stride);
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
+ void add_scale(::flatbuffers::Offset<::flatbuffers::Vector<int16_t>> scale) {
+ fbb_.AddOffset(ResizeAttribute::VT_SCALE, scale);
}
- void add_offset(flatbuffers::Offset<flatbuffers::Vector<int32_t>> offset) {
+ void add_offset(::flatbuffers::Offset<::flatbuffers::Vector<int16_t>> offset) {
fbb_.AddOffset(ResizeAttribute::VT_OFFSET, offset);
}
- void add_shift(int32_t shift) {
- fbb_.AddElement<int32_t>(ResizeAttribute::VT_SHIFT, shift, 0);
- }
- void add_stride_fp(flatbuffers::Offset<flatbuffers::Vector<float>> stride_fp) {
- fbb_.AddOffset(ResizeAttribute::VT_STRIDE_FP, stride_fp);
- }
- void add_offset_fp(flatbuffers::Offset<flatbuffers::Vector<float>> offset_fp) {
- fbb_.AddOffset(ResizeAttribute::VT_OFFSET_FP, offset_fp);
+ void add_border(::flatbuffers::Offset<::flatbuffers::Vector<int16_t>> border) {
+ fbb_.AddOffset(ResizeAttribute::VT_BORDER, border);
}
void add_mode(tosa::ResizeMode mode) {
fbb_.AddElement<uint32_t>(ResizeAttribute::VT_MODE, static_cast<uint32_t>(mode), 0);
}
- explicit ResizeAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit ResizeAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- ResizeAttributeBuilder &operator=(const ResizeAttributeBuilder &);
- flatbuffers::Offset<ResizeAttribute> Finish() {
+ ::flatbuffers::Offset<ResizeAttribute> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ResizeAttribute>(end);
+ auto o = ::flatbuffers::Offset<ResizeAttribute>(end);
return o;
}
};
-inline flatbuffers::Offset<ResizeAttribute> CreateResizeAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> output_size = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> stride = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> offset = 0,
- int32_t shift = 0,
- flatbuffers::Offset<flatbuffers::Vector<float>> stride_fp = 0,
- flatbuffers::Offset<flatbuffers::Vector<float>> offset_fp = 0,
+inline ::flatbuffers::Offset<ResizeAttribute> CreateResizeAttribute(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ ::flatbuffers::Offset<::flatbuffers::Vector<int16_t>> scale = 0,
+ ::flatbuffers::Offset<::flatbuffers::Vector<int16_t>> offset = 0,
+ ::flatbuffers::Offset<::flatbuffers::Vector<int16_t>> border = 0,
tosa::ResizeMode mode = tosa::ResizeMode_UNKNOWN) {
ResizeAttributeBuilder builder_(_fbb);
builder_.add_mode(mode);
- builder_.add_offset_fp(offset_fp);
- builder_.add_stride_fp(stride_fp);
- builder_.add_shift(shift);
+ builder_.add_border(border);
builder_.add_offset(offset);
- builder_.add_stride(stride);
- builder_.add_output_size(output_size);
+ builder_.add_scale(scale);
return builder_.Finish();
}
-inline flatbuffers::Offset<ResizeAttribute> CreateResizeAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- const std::vector<int32_t> *output_size = nullptr,
- const std::vector<int32_t> *stride = nullptr,
- const std::vector<int32_t> *offset = nullptr,
- int32_t shift = 0,
- const std::vector<float> *stride_fp = nullptr,
- const std::vector<float> *offset_fp = nullptr,
+inline ::flatbuffers::Offset<ResizeAttribute> CreateResizeAttributeDirect(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ const std::vector<int16_t> *scale = nullptr,
+ const std::vector<int16_t> *offset = nullptr,
+ const std::vector<int16_t> *border = nullptr,
tosa::ResizeMode mode = tosa::ResizeMode_UNKNOWN) {
- auto output_size__ = output_size ? _fbb.CreateVector<int32_t>(*output_size) : 0;
- auto stride__ = stride ? _fbb.CreateVector<int32_t>(*stride) : 0;
- auto offset__ = offset ? _fbb.CreateVector<int32_t>(*offset) : 0;
- auto stride_fp__ = stride_fp ? _fbb.CreateVector<float>(*stride_fp) : 0;
- auto offset_fp__ = offset_fp ? _fbb.CreateVector<float>(*offset_fp) : 0;
+ auto scale__ = scale ? _fbb.CreateVector<int16_t>(*scale) : 0;
+ auto offset__ = offset ? _fbb.CreateVector<int16_t>(*offset) : 0;
+ auto border__ = border ? _fbb.CreateVector<int16_t>(*border) : 0;
return tosa::CreateResizeAttribute(
_fbb,
- output_size__,
- stride__,
+ scale__,
offset__,
- shift,
- stride_fp__,
- offset_fp__,
+ border__,
mode);
}
-struct ClampAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct ClampAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef ClampAttributeBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_MIN_INT = 4,
- VT_MAX_INT = 6,
- VT_MIN_FP = 8,
- VT_MAX_FP = 10
+ VT_MIN_VAL = 4,
+ VT_MAX_VAL = 6
};
- int32_t min_int() const {
- return GetField<int32_t>(VT_MIN_INT, 0);
+ const ::flatbuffers::Vector<uint8_t> *min_val() const {
+ return GetPointer<const ::flatbuffers::Vector<uint8_t> *>(VT_MIN_VAL);
}
- int32_t max_int() const {
- return GetField<int32_t>(VT_MAX_INT, 0);
+ const ::flatbuffers::Vector<uint8_t> *max_val() const {
+ return GetPointer<const ::flatbuffers::Vector<uint8_t> *>(VT_MAX_VAL);
}
- float min_fp() const {
- return GetField<float>(VT_MIN_FP, 0.0f);
- }
- float max_fp() const {
- return GetField<float>(VT_MAX_FP, 0.0f);
- }
- bool Verify(flatbuffers::Verifier &verifier) const {
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
- VerifyField<int32_t>(verifier, VT_MIN_INT) &&
- VerifyField<int32_t>(verifier, VT_MAX_INT) &&
- VerifyField<float>(verifier, VT_MIN_FP) &&
- VerifyField<float>(verifier, VT_MAX_FP) &&
+ VerifyOffset(verifier, VT_MIN_VAL) &&
+ verifier.VerifyVector(min_val()) &&
+ VerifyOffset(verifier, VT_MAX_VAL) &&
+ verifier.VerifyVector(max_val()) &&
verifier.EndTable();
}
};
struct ClampAttributeBuilder {
typedef ClampAttribute Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_min_int(int32_t min_int) {
- fbb_.AddElement<int32_t>(ClampAttribute::VT_MIN_INT, min_int, 0);
- }
- void add_max_int(int32_t max_int) {
- fbb_.AddElement<int32_t>(ClampAttribute::VT_MAX_INT, max_int, 0);
- }
- void add_min_fp(float min_fp) {
- fbb_.AddElement<float>(ClampAttribute::VT_MIN_FP, min_fp, 0.0f);
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
+ void add_min_val(::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> min_val) {
+ fbb_.AddOffset(ClampAttribute::VT_MIN_VAL, min_val);
}
- void add_max_fp(float max_fp) {
- fbb_.AddElement<float>(ClampAttribute::VT_MAX_FP, max_fp, 0.0f);
+ void add_max_val(::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> max_val) {
+ fbb_.AddOffset(ClampAttribute::VT_MAX_VAL, max_val);
}
- explicit ClampAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit ClampAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- ClampAttributeBuilder &operator=(const ClampAttributeBuilder &);
- flatbuffers::Offset<ClampAttribute> Finish() {
+ ::flatbuffers::Offset<ClampAttribute> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ClampAttribute>(end);
+ auto o = ::flatbuffers::Offset<ClampAttribute>(end);
return o;
}
};
-inline flatbuffers::Offset<ClampAttribute> CreateClampAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- int32_t min_int = 0,
- int32_t max_int = 0,
- float min_fp = 0.0f,
- float max_fp = 0.0f) {
+inline ::flatbuffers::Offset<ClampAttribute> CreateClampAttribute(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ ::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> min_val = 0,
+ ::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> max_val = 0) {
ClampAttributeBuilder builder_(_fbb);
- builder_.add_max_fp(max_fp);
- builder_.add_min_fp(min_fp);
- builder_.add_max_int(max_int);
- builder_.add_min_int(min_int);
+ builder_.add_max_val(max_val);
+ builder_.add_min_val(min_val);
return builder_.Finish();
}
-struct RescaleAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+inline ::flatbuffers::Offset<ClampAttribute> CreateClampAttributeDirect(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ const std::vector<uint8_t> *min_val = nullptr,
+ const std::vector<uint8_t> *max_val = nullptr) {
+ if (min_val) { _fbb.ForceVectorAlignment(min_val->size(), sizeof(uint8_t), 8); }
+ auto min_val__ = min_val ? _fbb.CreateVector<uint8_t>(*min_val) : 0;
+ if (max_val) { _fbb.ForceVectorAlignment(max_val->size(), sizeof(uint8_t), 8); }
+ auto max_val__ = max_val ? _fbb.CreateVector<uint8_t>(*max_val) : 0;
+ return tosa::CreateClampAttribute(
+ _fbb,
+ min_val__,
+ max_val__);
+}
+
+struct RescaleAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef RescaleAttributeBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_INPUT_ZP = 4,
VT_OUTPUT_ZP = 6,
- VT_MULTIPLIER = 8,
- VT_SHIFT = 10,
- VT_SCALE32 = 12,
- VT_DOUBLE_ROUND = 14,
- VT_PER_CHANNEL = 16
+ VT_SCALE32 = 8,
+ VT_DOUBLE_ROUND = 10,
+ VT_PER_CHANNEL = 12,
+ VT_INPUT_UNSIGNED = 14,
+ VT_OUTPUT_UNSIGNED = 16
};
int32_t input_zp() const {
return GetField<int32_t>(VT_INPUT_ZP, 0);
@@ -1396,12 +1273,6 @@ struct RescaleAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
int32_t output_zp() const {
return GetField<int32_t>(VT_OUTPUT_ZP, 0);
}
- const flatbuffers::Vector<int32_t> *multiplier() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_MULTIPLIER);
- }
- const flatbuffers::Vector<int32_t> *shift() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SHIFT);
- }
bool scale32() const {
return GetField<uint8_t>(VT_SCALE32, 0) != 0;
}
@@ -1411,37 +1282,35 @@ struct RescaleAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
bool per_channel() const {
return GetField<uint8_t>(VT_PER_CHANNEL, 0) != 0;
}
- bool Verify(flatbuffers::Verifier &verifier) const {
+ bool input_unsigned() const {
+ return GetField<uint8_t>(VT_INPUT_UNSIGNED, 0) != 0;
+ }
+ bool output_unsigned() const {
+ return GetField<uint8_t>(VT_OUTPUT_UNSIGNED, 0) != 0;
+ }
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
- VerifyField<int32_t>(verifier, VT_INPUT_ZP) &&
- VerifyField<int32_t>(verifier, VT_OUTPUT_ZP) &&
- VerifyOffset(verifier, VT_MULTIPLIER) &&
- verifier.VerifyVector(multiplier()) &&
- VerifyOffset(verifier, VT_SHIFT) &&
- verifier.VerifyVector(shift()) &&
- VerifyField<uint8_t>(verifier, VT_SCALE32) &&
- VerifyField<uint8_t>(verifier, VT_DOUBLE_ROUND) &&
- VerifyField<uint8_t>(verifier, VT_PER_CHANNEL) &&
+ VerifyField<int32_t>(verifier, VT_INPUT_ZP, 4) &&
+ VerifyField<int32_t>(verifier, VT_OUTPUT_ZP, 4) &&
+ VerifyField<uint8_t>(verifier, VT_SCALE32, 1) &&
+ VerifyField<uint8_t>(verifier, VT_DOUBLE_ROUND, 1) &&
+ VerifyField<uint8_t>(verifier, VT_PER_CHANNEL, 1) &&
+ VerifyField<uint8_t>(verifier, VT_INPUT_UNSIGNED, 1) &&
+ VerifyField<uint8_t>(verifier, VT_OUTPUT_UNSIGNED, 1) &&
verifier.EndTable();
}
};
struct RescaleAttributeBuilder {
typedef RescaleAttribute Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
void add_input_zp(int32_t input_zp) {
fbb_.AddElement<int32_t>(RescaleAttribute::VT_INPUT_ZP, input_zp, 0);
}
void add_output_zp(int32_t output_zp) {
fbb_.AddElement<int32_t>(RescaleAttribute::VT_OUTPUT_ZP, output_zp, 0);
}
- void add_multiplier(flatbuffers::Offset<flatbuffers::Vector<int32_t>> multiplier) {
- fbb_.AddOffset(RescaleAttribute::VT_MULTIPLIER, multiplier);
- }
- void add_shift(flatbuffers::Offset<flatbuffers::Vector<int32_t>> shift) {
- fbb_.AddOffset(RescaleAttribute::VT_SHIFT, shift);
- }
void add_scale32(bool scale32) {
fbb_.AddElement<uint8_t>(RescaleAttribute::VT_SCALE32, static_cast<uint8_t>(scale32), 0);
}
@@ -1451,61 +1320,44 @@ struct RescaleAttributeBuilder {
void add_per_channel(bool per_channel) {
fbb_.AddElement<uint8_t>(RescaleAttribute::VT_PER_CHANNEL, static_cast<uint8_t>(per_channel), 0);
}
- explicit RescaleAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ void add_input_unsigned(bool input_unsigned) {
+ fbb_.AddElement<uint8_t>(RescaleAttribute::VT_INPUT_UNSIGNED, static_cast<uint8_t>(input_unsigned), 0);
+ }
+ void add_output_unsigned(bool output_unsigned) {
+ fbb_.AddElement<uint8_t>(RescaleAttribute::VT_OUTPUT_UNSIGNED, static_cast<uint8_t>(output_unsigned), 0);
+ }
+ explicit RescaleAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- RescaleAttributeBuilder &operator=(const RescaleAttributeBuilder &);
- flatbuffers::Offset<RescaleAttribute> Finish() {
+ ::flatbuffers::Offset<RescaleAttribute> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<RescaleAttribute>(end);
+ auto o = ::flatbuffers::Offset<RescaleAttribute>(end);
return o;
}
};
-inline flatbuffers::Offset<RescaleAttribute> CreateRescaleAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<RescaleAttribute> CreateRescaleAttribute(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
int32_t input_zp = 0,
int32_t output_zp = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> multiplier = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> shift = 0,
bool scale32 = false,
bool double_round = false,
- bool per_channel = false) {
+ bool per_channel = false,
+ bool input_unsigned = false,
+ bool output_unsigned = false) {
RescaleAttributeBuilder builder_(_fbb);
- builder_.add_shift(shift);
- builder_.add_multiplier(multiplier);
builder_.add_output_zp(output_zp);
builder_.add_input_zp(input_zp);
+ builder_.add_output_unsigned(output_unsigned);
+ builder_.add_input_unsigned(input_unsigned);
builder_.add_per_channel(per_channel);
builder_.add_double_round(double_round);
builder_.add_scale32(scale32);
return builder_.Finish();
}
-inline flatbuffers::Offset<RescaleAttribute> CreateRescaleAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- int32_t input_zp = 0,
- int32_t output_zp = 0,
- const std::vector<int32_t> *multiplier = nullptr,
- const std::vector<int32_t> *shift = nullptr,
- bool scale32 = false,
- bool double_round = false,
- bool per_channel = false) {
- auto multiplier__ = multiplier ? _fbb.CreateVector<int32_t>(*multiplier) : 0;
- auto shift__ = shift ? _fbb.CreateVector<int32_t>(*shift) : 0;
- return tosa::CreateRescaleAttribute(
- _fbb,
- input_zp,
- output_zp,
- multiplier__,
- shift__,
- scale32,
- double_round,
- per_channel);
-}
-
-struct MulAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct MulAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef MulAttributeBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_SHIFT = 4
@@ -1513,41 +1365,40 @@ struct MulAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
int32_t shift() const {
return GetField<int32_t>(VT_SHIFT, 0);
}
- bool Verify(flatbuffers::Verifier &verifier) const {
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
- VerifyField<int32_t>(verifier, VT_SHIFT) &&
+ VerifyField<int32_t>(verifier, VT_SHIFT, 4) &&
verifier.EndTable();
}
};
struct MulAttributeBuilder {
typedef MulAttribute Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
void add_shift(int32_t shift) {
fbb_.AddElement<int32_t>(MulAttribute::VT_SHIFT, shift, 0);
}
- explicit MulAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit MulAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- MulAttributeBuilder &operator=(const MulAttributeBuilder &);
- flatbuffers::Offset<MulAttribute> Finish() {
+ ::flatbuffers::Offset<MulAttribute> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<MulAttribute>(end);
+ auto o = ::flatbuffers::Offset<MulAttribute>(end);
return o;
}
};
-inline flatbuffers::Offset<MulAttribute> CreateMulAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<MulAttribute> CreateMulAttribute(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
int32_t shift = 0) {
MulAttributeBuilder builder_(_fbb);
builder_.add_shift(shift);
return builder_.Finish();
}
-struct ArithmeticRightShiftAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct ArithmeticRightShiftAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef ArithmeticRightShiftAttributeBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_ROUND = 4
@@ -1555,181 +1406,178 @@ struct ArithmeticRightShiftAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffe
bool round() const {
return GetField<uint8_t>(VT_ROUND, 0) != 0;
}
- bool Verify(flatbuffers::Verifier &verifier) const {
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
- VerifyField<uint8_t>(verifier, VT_ROUND) &&
+ VerifyField<uint8_t>(verifier, VT_ROUND, 1) &&
verifier.EndTable();
}
};
struct ArithmeticRightShiftAttributeBuilder {
typedef ArithmeticRightShiftAttribute Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
void add_round(bool round) {
fbb_.AddElement<uint8_t>(ArithmeticRightShiftAttribute::VT_ROUND, static_cast<uint8_t>(round), 0);
}
- explicit ArithmeticRightShiftAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit ArithmeticRightShiftAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- ArithmeticRightShiftAttributeBuilder &operator=(const ArithmeticRightShiftAttributeBuilder &);
- flatbuffers::Offset<ArithmeticRightShiftAttribute> Finish() {
+ ::flatbuffers::Offset<ArithmeticRightShiftAttribute> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<ArithmeticRightShiftAttribute>(end);
+ auto o = ::flatbuffers::Offset<ArithmeticRightShiftAttribute>(end);
return o;
}
};
-inline flatbuffers::Offset<ArithmeticRightShiftAttribute> CreateArithmeticRightShiftAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<ArithmeticRightShiftAttribute> CreateArithmeticRightShiftAttribute(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
bool round = false) {
ArithmeticRightShiftAttributeBuilder builder_(_fbb);
builder_.add_round(round);
return builder_.Finish();
}
-struct CondIfAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct CondIfAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef CondIfAttributeBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_THEN_BRANCH = 4,
- VT_ELSE_BRANCH = 6
+ VT_THEN_GRAPH = 4,
+ VT_ELSE_GRAPH = 6
};
- const flatbuffers::String *then_branch() const {
- return GetPointer<const flatbuffers::String *>(VT_THEN_BRANCH);
+ const ::flatbuffers::String *then_graph() const {
+ return GetPointer<const ::flatbuffers::String *>(VT_THEN_GRAPH);
}
- const flatbuffers::String *else_branch() const {
- return GetPointer<const flatbuffers::String *>(VT_ELSE_BRANCH);
+ const ::flatbuffers::String *else_graph() const {
+ return GetPointer<const ::flatbuffers::String *>(VT_ELSE_GRAPH);
}
- bool Verify(flatbuffers::Verifier &verifier) const {
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_THEN_BRANCH) &&
- verifier.VerifyString(then_branch()) &&
- VerifyOffset(verifier, VT_ELSE_BRANCH) &&
- verifier.VerifyString(else_branch()) &&
+ VerifyOffset(verifier, VT_THEN_GRAPH) &&
+ verifier.VerifyString(then_graph()) &&
+ VerifyOffset(verifier, VT_ELSE_GRAPH) &&
+ verifier.VerifyString(else_graph()) &&
verifier.EndTable();
}
};
struct CondIfAttributeBuilder {
typedef CondIfAttribute Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_then_branch(flatbuffers::Offset<flatbuffers::String> then_branch) {
- fbb_.AddOffset(CondIfAttribute::VT_THEN_BRANCH, then_branch);
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
+ void add_then_graph(::flatbuffers::Offset<::flatbuffers::String> then_graph) {
+ fbb_.AddOffset(CondIfAttribute::VT_THEN_GRAPH, then_graph);
}
- void add_else_branch(flatbuffers::Offset<flatbuffers::String> else_branch) {
- fbb_.AddOffset(CondIfAttribute::VT_ELSE_BRANCH, else_branch);
+ void add_else_graph(::flatbuffers::Offset<::flatbuffers::String> else_graph) {
+ fbb_.AddOffset(CondIfAttribute::VT_ELSE_GRAPH, else_graph);
}
- explicit CondIfAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit CondIfAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- CondIfAttributeBuilder &operator=(const CondIfAttributeBuilder &);
- flatbuffers::Offset<CondIfAttribute> Finish() {
+ ::flatbuffers::Offset<CondIfAttribute> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<CondIfAttribute>(end);
+ auto o = ::flatbuffers::Offset<CondIfAttribute>(end);
return o;
}
};
-inline flatbuffers::Offset<CondIfAttribute> CreateCondIfAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::String> then_branch = 0,
- flatbuffers::Offset<flatbuffers::String> else_branch = 0) {
+inline ::flatbuffers::Offset<CondIfAttribute> CreateCondIfAttribute(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ ::flatbuffers::Offset<::flatbuffers::String> then_graph = 0,
+ ::flatbuffers::Offset<::flatbuffers::String> else_graph = 0) {
CondIfAttributeBuilder builder_(_fbb);
- builder_.add_else_branch(else_branch);
- builder_.add_then_branch(then_branch);
+ builder_.add_else_graph(else_graph);
+ builder_.add_then_graph(then_graph);
return builder_.Finish();
}
-inline flatbuffers::Offset<CondIfAttribute> CreateCondIfAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- const char *then_branch = nullptr,
- const char *else_branch = nullptr) {
- auto then_branch__ = then_branch ? _fbb.CreateString(then_branch) : 0;
- auto else_branch__ = else_branch ? _fbb.CreateString(else_branch) : 0;
+inline ::flatbuffers::Offset<CondIfAttribute> CreateCondIfAttributeDirect(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ const char *then_graph = nullptr,
+ const char *else_graph = nullptr) {
+ auto then_graph__ = then_graph ? _fbb.CreateString(then_graph) : 0;
+ auto else_graph__ = else_graph ? _fbb.CreateString(else_graph) : 0;
return tosa::CreateCondIfAttribute(
_fbb,
- then_branch__,
- else_branch__);
+ then_graph__,
+ else_graph__);
}
-struct WhileLoopAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct WhileLoopAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef WhileLoopAttributeBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
- VT_COND_BRANCH = 4,
- VT_BODY_BRANCH = 6
+ VT_COND_GRAPH = 4,
+ VT_BODY_GRAPH = 6
};
- const flatbuffers::String *cond_branch() const {
- return GetPointer<const flatbuffers::String *>(VT_COND_BRANCH);
+ const ::flatbuffers::String *cond_graph() const {
+ return GetPointer<const ::flatbuffers::String *>(VT_COND_GRAPH);
}
- const flatbuffers::String *body_branch() const {
- return GetPointer<const flatbuffers::String *>(VT_BODY_BRANCH);
+ const ::flatbuffers::String *body_graph() const {
+ return GetPointer<const ::flatbuffers::String *>(VT_BODY_GRAPH);
}
- bool Verify(flatbuffers::Verifier &verifier) const {
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_COND_BRANCH) &&
- verifier.VerifyString(cond_branch()) &&
- VerifyOffset(verifier, VT_BODY_BRANCH) &&
- verifier.VerifyString(body_branch()) &&
+ VerifyOffset(verifier, VT_COND_GRAPH) &&
+ verifier.VerifyString(cond_graph()) &&
+ VerifyOffset(verifier, VT_BODY_GRAPH) &&
+ verifier.VerifyString(body_graph()) &&
verifier.EndTable();
}
};
struct WhileLoopAttributeBuilder {
typedef WhileLoopAttribute Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_cond_branch(flatbuffers::Offset<flatbuffers::String> cond_branch) {
- fbb_.AddOffset(WhileLoopAttribute::VT_COND_BRANCH, cond_branch);
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
+ void add_cond_graph(::flatbuffers::Offset<::flatbuffers::String> cond_graph) {
+ fbb_.AddOffset(WhileLoopAttribute::VT_COND_GRAPH, cond_graph);
}
- void add_body_branch(flatbuffers::Offset<flatbuffers::String> body_branch) {
- fbb_.AddOffset(WhileLoopAttribute::VT_BODY_BRANCH, body_branch);
+ void add_body_graph(::flatbuffers::Offset<::flatbuffers::String> body_graph) {
+ fbb_.AddOffset(WhileLoopAttribute::VT_BODY_GRAPH, body_graph);
}
- explicit WhileLoopAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit WhileLoopAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- WhileLoopAttributeBuilder &operator=(const WhileLoopAttributeBuilder &);
- flatbuffers::Offset<WhileLoopAttribute> Finish() {
+ ::flatbuffers::Offset<WhileLoopAttribute> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<WhileLoopAttribute>(end);
+ auto o = ::flatbuffers::Offset<WhileLoopAttribute>(end);
return o;
}
};
-inline flatbuffers::Offset<WhileLoopAttribute> CreateWhileLoopAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::String> cond_branch = 0,
- flatbuffers::Offset<flatbuffers::String> body_branch = 0) {
+inline ::flatbuffers::Offset<WhileLoopAttribute> CreateWhileLoopAttribute(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ ::flatbuffers::Offset<::flatbuffers::String> cond_graph = 0,
+ ::flatbuffers::Offset<::flatbuffers::String> body_graph = 0) {
WhileLoopAttributeBuilder builder_(_fbb);
- builder_.add_body_branch(body_branch);
- builder_.add_cond_branch(cond_branch);
+ builder_.add_body_graph(body_graph);
+ builder_.add_cond_graph(cond_graph);
return builder_.Finish();
}
-inline flatbuffers::Offset<WhileLoopAttribute> CreateWhileLoopAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- const char *cond_branch = nullptr,
- const char *body_branch = nullptr) {
- auto cond_branch__ = cond_branch ? _fbb.CreateString(cond_branch) : 0;
- auto body_branch__ = body_branch ? _fbb.CreateString(body_branch) : 0;
+inline ::flatbuffers::Offset<WhileLoopAttribute> CreateWhileLoopAttributeDirect(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ const char *cond_graph = nullptr,
+ const char *body_graph = nullptr) {
+ auto cond_graph__ = cond_graph ? _fbb.CreateString(cond_graph) : 0;
+ auto body_graph__ = body_graph ? _fbb.CreateString(body_graph) : 0;
return tosa::CreateWhileLoopAttribute(
_fbb,
- cond_branch__,
- body_branch__);
+ cond_graph__,
+ body_graph__);
}
-struct TransposeAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct TransposeAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef TransposeAttributeBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_PERMS = 4
};
- const flatbuffers::Vector<int32_t> *perms() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_PERMS);
+ const ::flatbuffers::Vector<int32_t> *perms() const {
+ return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_PERMS);
}
- bool Verify(flatbuffers::Verifier &verifier) const {
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_PERMS) &&
verifier.VerifyVector(perms()) &&
@@ -1739,33 +1587,32 @@ struct TransposeAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
struct TransposeAttributeBuilder {
typedef TransposeAttribute Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_perms(flatbuffers::Offset<flatbuffers::Vector<int32_t>> perms) {
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
+ void add_perms(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> perms) {
fbb_.AddOffset(TransposeAttribute::VT_PERMS, perms);
}
- explicit TransposeAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit TransposeAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- TransposeAttributeBuilder &operator=(const TransposeAttributeBuilder &);
- flatbuffers::Offset<TransposeAttribute> Finish() {
+ ::flatbuffers::Offset<TransposeAttribute> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TransposeAttribute>(end);
+ auto o = ::flatbuffers::Offset<TransposeAttribute>(end);
return o;
}
};
-inline flatbuffers::Offset<TransposeAttribute> CreateTransposeAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> perms = 0) {
+inline ::flatbuffers::Offset<TransposeAttribute> CreateTransposeAttribute(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> perms = 0) {
TransposeAttributeBuilder builder_(_fbb);
builder_.add_perms(perms);
return builder_.Finish();
}
-inline flatbuffers::Offset<TransposeAttribute> CreateTransposeAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<TransposeAttribute> CreateTransposeAttributeDirect(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
const std::vector<int32_t> *perms = nullptr) {
auto perms__ = perms ? _fbb.CreateVector<int32_t>(*perms) : 0;
return tosa::CreateTransposeAttribute(
@@ -1773,15 +1620,15 @@ inline flatbuffers::Offset<TransposeAttribute> CreateTransposeAttributeDirect(
perms__);
}
-struct TableAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct TableAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef TableAttributeBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_TABLE = 4
};
- const flatbuffers::Vector<int16_t> *table() const {
- return GetPointer<const flatbuffers::Vector<int16_t> *>(VT_TABLE);
+ const ::flatbuffers::Vector<int16_t> *table() const {
+ return GetPointer<const ::flatbuffers::Vector<int16_t> *>(VT_TABLE);
}
- bool Verify(flatbuffers::Verifier &verifier) const {
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_TABLE) &&
verifier.VerifyVector(table()) &&
@@ -1791,33 +1638,32 @@ struct TableAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
struct TableAttributeBuilder {
typedef TableAttribute Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_table(flatbuffers::Offset<flatbuffers::Vector<int16_t>> table) {
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
+ void add_table(::flatbuffers::Offset<::flatbuffers::Vector<int16_t>> table) {
fbb_.AddOffset(TableAttribute::VT_TABLE, table);
}
- explicit TableAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit TableAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- TableAttributeBuilder &operator=(const TableAttributeBuilder &);
- flatbuffers::Offset<TableAttribute> Finish() {
+ ::flatbuffers::Offset<TableAttribute> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TableAttribute>(end);
+ auto o = ::flatbuffers::Offset<TableAttribute>(end);
return o;
}
};
-inline flatbuffers::Offset<TableAttribute> CreateTableAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::Vector<int16_t>> table = 0) {
+inline ::flatbuffers::Offset<TableAttribute> CreateTableAttribute(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ ::flatbuffers::Offset<::flatbuffers::Vector<int16_t>> table = 0) {
TableAttributeBuilder builder_(_fbb);
builder_.add_table(table);
return builder_.Finish();
}
-inline flatbuffers::Offset<TableAttribute> CreateTableAttributeDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<TableAttribute> CreateTableAttributeDirect(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
const std::vector<int16_t> *table = nullptr) {
auto table__ = table ? _fbb.CreateVector<int16_t>(*table) : 0;
return tosa::CreateTableAttribute(
@@ -1825,7 +1671,7 @@ inline flatbuffers::Offset<TableAttribute> CreateTableAttributeDirect(
table__);
}
-struct MatMulAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct MatMulAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef MatMulAttributeBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_A_ZP = 4,
@@ -1837,38 +1683,37 @@ struct MatMulAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
int32_t b_zp() const {
return GetField<int32_t>(VT_B_ZP, 0);
}
- bool Verify(flatbuffers::Verifier &verifier) const {
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
- VerifyField<int32_t>(verifier, VT_A_ZP) &&
- VerifyField<int32_t>(verifier, VT_B_ZP) &&
+ VerifyField<int32_t>(verifier, VT_A_ZP, 4) &&
+ VerifyField<int32_t>(verifier, VT_B_ZP, 4) &&
verifier.EndTable();
}
};
struct MatMulAttributeBuilder {
typedef MatMulAttribute Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
void add_a_zp(int32_t a_zp) {
fbb_.AddElement<int32_t>(MatMulAttribute::VT_A_ZP, a_zp, 0);
}
void add_b_zp(int32_t b_zp) {
fbb_.AddElement<int32_t>(MatMulAttribute::VT_B_ZP, b_zp, 0);
}
- explicit MatMulAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit MatMulAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- MatMulAttributeBuilder &operator=(const MatMulAttributeBuilder &);
- flatbuffers::Offset<MatMulAttribute> Finish() {
+ ::flatbuffers::Offset<MatMulAttribute> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<MatMulAttribute>(end);
+ auto o = ::flatbuffers::Offset<MatMulAttribute>(end);
return o;
}
};
-inline flatbuffers::Offset<MatMulAttribute> CreateMatMulAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<MatMulAttribute> CreateMatMulAttribute(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
int32_t a_zp = 0,
int32_t b_zp = 0) {
MatMulAttributeBuilder builder_(_fbb);
@@ -1877,7 +1722,7 @@ inline flatbuffers::Offset<MatMulAttribute> CreateMatMulAttribute(
return builder_.Finish();
}
-struct FullyConnectedAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct FullyConnectedAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef FullyConnectedAttributeBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_INPUT_ZP = 4,
@@ -1889,38 +1734,37 @@ struct FullyConnectedAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Ta
int32_t weight_zp() const {
return GetField<int32_t>(VT_WEIGHT_ZP, 0);
}
- bool Verify(flatbuffers::Verifier &verifier) const {
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
- VerifyField<int32_t>(verifier, VT_INPUT_ZP) &&
- VerifyField<int32_t>(verifier, VT_WEIGHT_ZP) &&
+ VerifyField<int32_t>(verifier, VT_INPUT_ZP, 4) &&
+ VerifyField<int32_t>(verifier, VT_WEIGHT_ZP, 4) &&
verifier.EndTable();
}
};
struct FullyConnectedAttributeBuilder {
typedef FullyConnectedAttribute Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
void add_input_zp(int32_t input_zp) {
fbb_.AddElement<int32_t>(FullyConnectedAttribute::VT_INPUT_ZP, input_zp, 0);
}
void add_weight_zp(int32_t weight_zp) {
fbb_.AddElement<int32_t>(FullyConnectedAttribute::VT_WEIGHT_ZP, weight_zp, 0);
}
- explicit FullyConnectedAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit FullyConnectedAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- FullyConnectedAttributeBuilder &operator=(const FullyConnectedAttributeBuilder &);
- flatbuffers::Offset<FullyConnectedAttribute> Finish() {
+ ::flatbuffers::Offset<FullyConnectedAttribute> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<FullyConnectedAttribute>(end);
+ auto o = ::flatbuffers::Offset<FullyConnectedAttribute>(end);
return o;
}
};
-inline flatbuffers::Offset<FullyConnectedAttribute> CreateFullyConnectedAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<FullyConnectedAttribute> CreateFullyConnectedAttribute(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
int32_t input_zp = 0,
int32_t weight_zp = 0) {
FullyConnectedAttributeBuilder builder_(_fbb);
@@ -1929,7 +1773,7 @@ inline flatbuffers::Offset<FullyConnectedAttribute> CreateFullyConnectedAttribut
return builder_.Finish();
}
-struct NegateAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct NegateAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef NegateAttributeBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_INPUT1_ZP = 4,
@@ -1941,38 +1785,37 @@ struct NegateAttribute FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
int32_t output_zp() const {
return GetField<int32_t>(VT_OUTPUT_ZP, 0);
}
- bool Verify(flatbuffers::Verifier &verifier) const {
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
- VerifyField<int32_t>(verifier, VT_INPUT1_ZP) &&
- VerifyField<int32_t>(verifier, VT_OUTPUT_ZP) &&
+ VerifyField<int32_t>(verifier, VT_INPUT1_ZP, 4) &&
+ VerifyField<int32_t>(verifier, VT_OUTPUT_ZP, 4) &&
verifier.EndTable();
}
};
struct NegateAttributeBuilder {
typedef NegateAttribute Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
void add_input1_zp(int32_t input1_zp) {
fbb_.AddElement<int32_t>(NegateAttribute::VT_INPUT1_ZP, input1_zp, 0);
}
void add_output_zp(int32_t output_zp) {
fbb_.AddElement<int32_t>(NegateAttribute::VT_OUTPUT_ZP, output_zp, 0);
}
- explicit NegateAttributeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit NegateAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- NegateAttributeBuilder &operator=(const NegateAttributeBuilder &);
- flatbuffers::Offset<NegateAttribute> Finish() {
+ ::flatbuffers::Offset<NegateAttribute> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<NegateAttribute>(end);
+ auto o = ::flatbuffers::Offset<NegateAttribute>(end);
return o;
}
};
-inline flatbuffers::Offset<NegateAttribute> CreateNegateAttribute(
- flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<NegateAttribute> CreateNegateAttribute(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
int32_t input1_zp = 0,
int32_t output_zp = 0) {
NegateAttributeBuilder builder_(_fbb);
@@ -1981,7 +1824,178 @@ inline flatbuffers::Offset<NegateAttribute> CreateNegateAttribute(
return builder_.Finish();
}
-struct Version FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct CustomAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+ typedef CustomAttributeBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+ VT_OPERATOR_NAME = 4,
+ VT_DOMAIN_NAME = 6,
+ VT_IMPLEMENTATION_ATTRS = 8
+ };
+ const ::flatbuffers::String *operator_name() const {
+ return GetPointer<const ::flatbuffers::String *>(VT_OPERATOR_NAME);
+ }
+ const ::flatbuffers::String *domain_name() const {
+ return GetPointer<const ::flatbuffers::String *>(VT_DOMAIN_NAME);
+ }
+ const ::flatbuffers::Vector<uint8_t> *implementation_attrs() const {
+ return GetPointer<const ::flatbuffers::Vector<uint8_t> *>(VT_IMPLEMENTATION_ATTRS);
+ }
+ bool Verify(::flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyOffset(verifier, VT_OPERATOR_NAME) &&
+ verifier.VerifyString(operator_name()) &&
+ VerifyOffset(verifier, VT_DOMAIN_NAME) &&
+ verifier.VerifyString(domain_name()) &&
+ VerifyOffset(verifier, VT_IMPLEMENTATION_ATTRS) &&
+ verifier.VerifyVector(implementation_attrs()) &&
+ verifier.EndTable();
+ }
+};
+
+struct CustomAttributeBuilder {
+ typedef CustomAttribute Table;
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
+ void add_operator_name(::flatbuffers::Offset<::flatbuffers::String> operator_name) {
+ fbb_.AddOffset(CustomAttribute::VT_OPERATOR_NAME, operator_name);
+ }
+ void add_domain_name(::flatbuffers::Offset<::flatbuffers::String> domain_name) {
+ fbb_.AddOffset(CustomAttribute::VT_DOMAIN_NAME, domain_name);
+ }
+ void add_implementation_attrs(::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> implementation_attrs) {
+ fbb_.AddOffset(CustomAttribute::VT_IMPLEMENTATION_ATTRS, implementation_attrs);
+ }
+ explicit CustomAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ ::flatbuffers::Offset<CustomAttribute> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = ::flatbuffers::Offset<CustomAttribute>(end);
+ return o;
+ }
+};
+
+inline ::flatbuffers::Offset<CustomAttribute> CreateCustomAttribute(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ ::flatbuffers::Offset<::flatbuffers::String> operator_name = 0,
+ ::flatbuffers::Offset<::flatbuffers::String> domain_name = 0,
+ ::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> implementation_attrs = 0) {
+ CustomAttributeBuilder builder_(_fbb);
+ builder_.add_implementation_attrs(implementation_attrs);
+ builder_.add_domain_name(domain_name);
+ builder_.add_operator_name(operator_name);
+ return builder_.Finish();
+}
+
+inline ::flatbuffers::Offset<CustomAttribute> CreateCustomAttributeDirect(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ const char *operator_name = nullptr,
+ const char *domain_name = nullptr,
+ const std::vector<uint8_t> *implementation_attrs = nullptr) {
+ auto operator_name__ = operator_name ? _fbb.CreateString(operator_name) : 0;
+ auto domain_name__ = domain_name ? _fbb.CreateString(domain_name) : 0;
+ auto implementation_attrs__ = implementation_attrs ? _fbb.CreateVector<uint8_t>(*implementation_attrs) : 0;
+ return tosa::CreateCustomAttribute(
+ _fbb,
+ operator_name__,
+ domain_name__,
+ implementation_attrs__);
+}
+
+struct FFTAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+ typedef FFTAttributeBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+ VT_INVERSE = 4,
+ VT_LOCAL_BOUND = 6
+ };
+ bool inverse() const {
+ return GetField<uint8_t>(VT_INVERSE, 0) != 0;
+ }
+ bool local_bound() const {
+ return GetField<uint8_t>(VT_LOCAL_BOUND, 0) != 0;
+ }
+ bool Verify(::flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<uint8_t>(verifier, VT_INVERSE, 1) &&
+ VerifyField<uint8_t>(verifier, VT_LOCAL_BOUND, 1) &&
+ verifier.EndTable();
+ }
+};
+
+struct FFTAttributeBuilder {
+ typedef FFTAttribute Table;
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
+ void add_inverse(bool inverse) {
+ fbb_.AddElement<uint8_t>(FFTAttribute::VT_INVERSE, static_cast<uint8_t>(inverse), 0);
+ }
+ void add_local_bound(bool local_bound) {
+ fbb_.AddElement<uint8_t>(FFTAttribute::VT_LOCAL_BOUND, static_cast<uint8_t>(local_bound), 0);
+ }
+ explicit FFTAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ ::flatbuffers::Offset<FFTAttribute> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = ::flatbuffers::Offset<FFTAttribute>(end);
+ return o;
+ }
+};
+
+inline ::flatbuffers::Offset<FFTAttribute> CreateFFTAttribute(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ bool inverse = false,
+ bool local_bound = false) {
+ FFTAttributeBuilder builder_(_fbb);
+ builder_.add_local_bound(local_bound);
+ builder_.add_inverse(inverse);
+ return builder_.Finish();
+}
+
+struct RFFTAttribute FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+ typedef RFFTAttributeBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+ VT_LOCAL_BOUND = 4
+ };
+ bool local_bound() const {
+ return GetField<uint8_t>(VT_LOCAL_BOUND, 0) != 0;
+ }
+ bool Verify(::flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<uint8_t>(verifier, VT_LOCAL_BOUND, 1) &&
+ verifier.EndTable();
+ }
+};
+
+struct RFFTAttributeBuilder {
+ typedef RFFTAttribute Table;
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
+ void add_local_bound(bool local_bound) {
+ fbb_.AddElement<uint8_t>(RFFTAttribute::VT_LOCAL_BOUND, static_cast<uint8_t>(local_bound), 0);
+ }
+ explicit RFFTAttributeBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ ::flatbuffers::Offset<RFFTAttribute> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = ::flatbuffers::Offset<RFFTAttribute>(end);
+ return o;
+ }
+};
+
+inline ::flatbuffers::Offset<RFFTAttribute> CreateRFFTAttribute(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ bool local_bound = false) {
+ RFFTAttributeBuilder builder_(_fbb);
+ builder_.add_local_bound(local_bound);
+ return builder_.Finish();
+}
+
+struct Version FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef VersionBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT__MAJOR = 4,
@@ -1990,60 +2004,59 @@ struct Version FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VT__DRAFT = 10
};
int32_t _major() const {
- return GetField<int32_t>(VT__MAJOR, 0);
+ return GetField<int32_t>(VT__MAJOR, -1);
}
int32_t _minor() const {
- return GetField<int32_t>(VT__MINOR, 31);
+ return GetField<int32_t>(VT__MINOR, -1);
}
int32_t _patch() const {
- return GetField<int32_t>(VT__PATCH, 0);
+ return GetField<int32_t>(VT__PATCH, -1);
}
bool _draft() const {
return GetField<uint8_t>(VT__DRAFT, 1) != 0;
}
- bool Verify(flatbuffers::Verifier &verifier) const {
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
- VerifyField<int32_t>(verifier, VT__MAJOR) &&
- VerifyField<int32_t>(verifier, VT__MINOR) &&
- VerifyField<int32_t>(verifier, VT__PATCH) &&
- VerifyField<uint8_t>(verifier, VT__DRAFT) &&
+ VerifyField<int32_t>(verifier, VT__MAJOR, 4) &&
+ VerifyField<int32_t>(verifier, VT__MINOR, 4) &&
+ VerifyField<int32_t>(verifier, VT__PATCH, 4) &&
+ VerifyField<uint8_t>(verifier, VT__DRAFT, 1) &&
verifier.EndTable();
}
};
struct VersionBuilder {
typedef Version Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
void add__major(int32_t _major) {
- fbb_.AddElement<int32_t>(Version::VT__MAJOR, _major, 0);
+ fbb_.AddElement<int32_t>(Version::VT__MAJOR, _major, -1);
}
void add__minor(int32_t _minor) {
- fbb_.AddElement<int32_t>(Version::VT__MINOR, _minor, 31);
+ fbb_.AddElement<int32_t>(Version::VT__MINOR, _minor, -1);
}
void add__patch(int32_t _patch) {
- fbb_.AddElement<int32_t>(Version::VT__PATCH, _patch, 0);
+ fbb_.AddElement<int32_t>(Version::VT__PATCH, _patch, -1);
}
void add__draft(bool _draft) {
fbb_.AddElement<uint8_t>(Version::VT__DRAFT, static_cast<uint8_t>(_draft), 1);
}
- explicit VersionBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit VersionBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- VersionBuilder &operator=(const VersionBuilder &);
- flatbuffers::Offset<Version> Finish() {
+ ::flatbuffers::Offset<Version> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<Version>(end);
+ auto o = ::flatbuffers::Offset<Version>(end);
return o;
}
};
-inline flatbuffers::Offset<Version> CreateVersion(
- flatbuffers::FlatBufferBuilder &_fbb,
- int32_t _major = 0,
- int32_t _minor = 31,
- int32_t _patch = 0,
+inline ::flatbuffers::Offset<Version> CreateVersion(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ int32_t _major = -1,
+ int32_t _minor = -1,
+ int32_t _patch = -1,
bool _draft = true) {
VersionBuilder builder_(_fbb);
builder_.add__patch(_patch);
@@ -2053,100 +2066,137 @@ inline flatbuffers::Offset<Version> CreateVersion(
return builder_.Finish();
}
-struct TosaTensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct TosaTensor FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef TosaTensorBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_NAME = 4,
VT_SHAPE = 6,
VT_TYPE = 8,
- VT_DATA = 10
+ VT_DATA = 10,
+ VT_VARIABLE = 12,
+ VT_IS_UNRANKED = 14,
+ VT_VARIABLE_NAME = 16
};
- const flatbuffers::String *name() const {
- return GetPointer<const flatbuffers::String *>(VT_NAME);
+ const ::flatbuffers::String *name() const {
+ return GetPointer<const ::flatbuffers::String *>(VT_NAME);
}
- const flatbuffers::Vector<int32_t> *shape() const {
- return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_SHAPE);
+ const ::flatbuffers::Vector<int32_t> *shape() const {
+ return GetPointer<const ::flatbuffers::Vector<int32_t> *>(VT_SHAPE);
}
tosa::DType type() const {
return static_cast<tosa::DType>(GetField<uint32_t>(VT_TYPE, 0));
}
- const flatbuffers::Vector<uint8_t> *data() const {
- return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_DATA);
+ const ::flatbuffers::Vector<uint8_t> *data() const {
+ return GetPointer<const ::flatbuffers::Vector<uint8_t> *>(VT_DATA);
+ }
+ bool variable() const {
+ return GetField<uint8_t>(VT_VARIABLE, 0) != 0;
+ }
+ bool is_unranked() const {
+ return GetField<uint8_t>(VT_IS_UNRANKED, 0) != 0;
}
- bool Verify(flatbuffers::Verifier &verifier) const {
+ const ::flatbuffers::String *variable_name() const {
+ return GetPointer<const ::flatbuffers::String *>(VT_VARIABLE_NAME);
+ }
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_NAME) &&
verifier.VerifyString(name()) &&
VerifyOffset(verifier, VT_SHAPE) &&
verifier.VerifyVector(shape()) &&
- VerifyField<uint32_t>(verifier, VT_TYPE) &&
+ VerifyField<uint32_t>(verifier, VT_TYPE, 4) &&
VerifyOffset(verifier, VT_DATA) &&
verifier.VerifyVector(data()) &&
+ VerifyField<uint8_t>(verifier, VT_VARIABLE, 1) &&
+ VerifyField<uint8_t>(verifier, VT_IS_UNRANKED, 1) &&
+ VerifyOffset(verifier, VT_VARIABLE_NAME) &&
+ verifier.VerifyString(variable_name()) &&
verifier.EndTable();
}
};
struct TosaTensorBuilder {
typedef TosaTensor Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_name(flatbuffers::Offset<flatbuffers::String> name) {
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
+ void add_name(::flatbuffers::Offset<::flatbuffers::String> name) {
fbb_.AddOffset(TosaTensor::VT_NAME, name);
}
- void add_shape(flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape) {
+ void add_shape(::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> shape) {
fbb_.AddOffset(TosaTensor::VT_SHAPE, shape);
}
void add_type(tosa::DType type) {
fbb_.AddElement<uint32_t>(TosaTensor::VT_TYPE, static_cast<uint32_t>(type), 0);
}
- void add_data(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data) {
+ void add_data(::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> data) {
fbb_.AddOffset(TosaTensor::VT_DATA, data);
}
- explicit TosaTensorBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ void add_variable(bool variable) {
+ fbb_.AddElement<uint8_t>(TosaTensor::VT_VARIABLE, static_cast<uint8_t>(variable), 0);
+ }
+ void add_is_unranked(bool is_unranked) {
+ fbb_.AddElement<uint8_t>(TosaTensor::VT_IS_UNRANKED, static_cast<uint8_t>(is_unranked), 0);
+ }
+ void add_variable_name(::flatbuffers::Offset<::flatbuffers::String> variable_name) {
+ fbb_.AddOffset(TosaTensor::VT_VARIABLE_NAME, variable_name);
+ }
+ explicit TosaTensorBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- TosaTensorBuilder &operator=(const TosaTensorBuilder &);
- flatbuffers::Offset<TosaTensor> Finish() {
+ ::flatbuffers::Offset<TosaTensor> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TosaTensor>(end);
+ auto o = ::flatbuffers::Offset<TosaTensor>(end);
return o;
}
};
-inline flatbuffers::Offset<TosaTensor> CreateTosaTensor(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::String> name = 0,
- flatbuffers::Offset<flatbuffers::Vector<int32_t>> shape = 0,
+inline ::flatbuffers::Offset<TosaTensor> CreateTosaTensor(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ ::flatbuffers::Offset<::flatbuffers::String> name = 0,
+ ::flatbuffers::Offset<::flatbuffers::Vector<int32_t>> shape = 0,
tosa::DType type = tosa::DType_UNKNOWN,
- flatbuffers::Offset<flatbuffers::Vector<uint8_t>> data = 0) {
+ ::flatbuffers::Offset<::flatbuffers::Vector<uint8_t>> data = 0,
+ bool variable = false,
+ bool is_unranked = false,
+ ::flatbuffers::Offset<::flatbuffers::String> variable_name = 0) {
TosaTensorBuilder builder_(_fbb);
+ builder_.add_variable_name(variable_name);
builder_.add_data(data);
builder_.add_type(type);
builder_.add_shape(shape);
builder_.add_name(name);
+ builder_.add_is_unranked(is_unranked);
+ builder_.add_variable(variable);
return builder_.Finish();
}
-inline flatbuffers::Offset<TosaTensor> CreateTosaTensorDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<TosaTensor> CreateTosaTensorDirect(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
const char *name = nullptr,
const std::vector<int32_t> *shape = nullptr,
tosa::DType type = tosa::DType_UNKNOWN,
- const std::vector<uint8_t> *data = nullptr) {
+ const std::vector<uint8_t> *data = nullptr,
+ bool variable = false,
+ bool is_unranked = false,
+ const char *variable_name = nullptr) {
auto name__ = name ? _fbb.CreateString(name) : 0;
auto shape__ = shape ? _fbb.CreateVector<int32_t>(*shape) : 0;
if (data) { _fbb.ForceVectorAlignment(data->size(), sizeof(uint8_t), 8); }
auto data__ = data ? _fbb.CreateVector<uint8_t>(*data) : 0;
+ auto variable_name__ = variable_name ? _fbb.CreateString(variable_name) : 0;
return tosa::CreateTosaTensor(
_fbb,
name__,
shape__,
type,
- data__);
+ data__,
+ variable,
+ is_unranked,
+ variable_name__);
}
-struct TosaOperator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct TosaOperator FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef TosaOperatorBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_OP = 4,
@@ -2180,15 +2230,6 @@ struct TosaOperator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
const tosa::AxisAttribute *attribute_as_AxisAttribute() const {
return attribute_type() == tosa::Attribute_AxisAttribute ? static_cast<const tosa::AxisAttribute *>(attribute()) : nullptr;
}
- const tosa::ReshapeAttribute *attribute_as_ReshapeAttribute() const {
- return attribute_type() == tosa::Attribute_ReshapeAttribute ? static_cast<const tosa::ReshapeAttribute *>(attribute()) : nullptr;
- }
- const tosa::SliceAttribute *attribute_as_SliceAttribute() const {
- return attribute_type() == tosa::Attribute_SliceAttribute ? static_cast<const tosa::SliceAttribute *>(attribute()) : nullptr;
- }
- const tosa::TileAttribute *attribute_as_TileAttribute() const {
- return attribute_type() == tosa::Attribute_TileAttribute ? static_cast<const tosa::TileAttribute *>(attribute()) : nullptr;
- }
const tosa::ResizeAttribute *attribute_as_ResizeAttribute() const {
return attribute_type() == tosa::Attribute_ResizeAttribute ? static_cast<const tosa::ResizeAttribute *>(attribute()) : nullptr;
}
@@ -2225,16 +2266,25 @@ struct TosaOperator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
const tosa::NegateAttribute *attribute_as_NegateAttribute() const {
return attribute_type() == tosa::Attribute_NegateAttribute ? static_cast<const tosa::NegateAttribute *>(attribute()) : nullptr;
}
- const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *inputs() const {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_INPUTS);
+ const tosa::CustomAttribute *attribute_as_CustomAttribute() const {
+ return attribute_type() == tosa::Attribute_CustomAttribute ? static_cast<const tosa::CustomAttribute *>(attribute()) : nullptr;
+ }
+ const tosa::FFTAttribute *attribute_as_FFTAttribute() const {
+ return attribute_type() == tosa::Attribute_FFTAttribute ? static_cast<const tosa::FFTAttribute *>(attribute()) : nullptr;
+ }
+ const tosa::RFFTAttribute *attribute_as_RFFTAttribute() const {
+ return attribute_type() == tosa::Attribute_RFFTAttribute ? static_cast<const tosa::RFFTAttribute *>(attribute()) : nullptr;
}
- const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *outputs() const {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_OUTPUTS);
+ const ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *inputs() const {
+ return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *>(VT_INPUTS);
}
- bool Verify(flatbuffers::Verifier &verifier) const {
+ const ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *outputs() const {
+ return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *>(VT_OUTPUTS);
+ }
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
- VerifyField<uint32_t>(verifier, VT_OP) &&
- VerifyField<uint8_t>(verifier, VT_ATTRIBUTE_TYPE) &&
+ VerifyField<uint32_t>(verifier, VT_OP, 4) &&
+ VerifyField<uint8_t>(verifier, VT_ATTRIBUTE_TYPE, 1) &&
VerifyOffset(verifier, VT_ATTRIBUTE) &&
VerifyAttribute(verifier, attribute(), attribute_type()) &&
VerifyOffset(verifier, VT_INPUTS) &&
@@ -2267,18 +2317,6 @@ template<> inline const tosa::AxisAttribute *TosaOperator::attribute_as<tosa::Ax
return attribute_as_AxisAttribute();
}
-template<> inline const tosa::ReshapeAttribute *TosaOperator::attribute_as<tosa::ReshapeAttribute>() const {
- return attribute_as_ReshapeAttribute();
-}
-
-template<> inline const tosa::SliceAttribute *TosaOperator::attribute_as<tosa::SliceAttribute>() const {
- return attribute_as_SliceAttribute();
-}
-
-template<> inline const tosa::TileAttribute *TosaOperator::attribute_as<tosa::TileAttribute>() const {
- return attribute_as_TileAttribute();
-}
-
template<> inline const tosa::ResizeAttribute *TosaOperator::attribute_as<tosa::ResizeAttribute>() const {
return attribute_as_ResizeAttribute();
}
@@ -2327,44 +2365,55 @@ template<> inline const tosa::NegateAttribute *TosaOperator::attribute_as<tosa::
return attribute_as_NegateAttribute();
}
+template<> inline const tosa::CustomAttribute *TosaOperator::attribute_as<tosa::CustomAttribute>() const {
+ return attribute_as_CustomAttribute();
+}
+
+template<> inline const tosa::FFTAttribute *TosaOperator::attribute_as<tosa::FFTAttribute>() const {
+ return attribute_as_FFTAttribute();
+}
+
+template<> inline const tosa::RFFTAttribute *TosaOperator::attribute_as<tosa::RFFTAttribute>() const {
+ return attribute_as_RFFTAttribute();
+}
+
struct TosaOperatorBuilder {
typedef TosaOperator Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
void add_op(tosa::Op op) {
fbb_.AddElement<uint32_t>(TosaOperator::VT_OP, static_cast<uint32_t>(op), 0);
}
void add_attribute_type(tosa::Attribute attribute_type) {
fbb_.AddElement<uint8_t>(TosaOperator::VT_ATTRIBUTE_TYPE, static_cast<uint8_t>(attribute_type), 0);
}
- void add_attribute(flatbuffers::Offset<void> attribute) {
+ void add_attribute(::flatbuffers::Offset<void> attribute) {
fbb_.AddOffset(TosaOperator::VT_ATTRIBUTE, attribute);
}
- void add_inputs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> inputs) {
+ void add_inputs(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> inputs) {
fbb_.AddOffset(TosaOperator::VT_INPUTS, inputs);
}
- void add_outputs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> outputs) {
+ void add_outputs(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> outputs) {
fbb_.AddOffset(TosaOperator::VT_OUTPUTS, outputs);
}
- explicit TosaOperatorBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit TosaOperatorBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- TosaOperatorBuilder &operator=(const TosaOperatorBuilder &);
- flatbuffers::Offset<TosaOperator> Finish() {
+ ::flatbuffers::Offset<TosaOperator> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TosaOperator>(end);
+ auto o = ::flatbuffers::Offset<TosaOperator>(end);
return o;
}
};
-inline flatbuffers::Offset<TosaOperator> CreateTosaOperator(
- flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<TosaOperator> CreateTosaOperator(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
tosa::Op op = tosa::Op_UNKNOWN,
tosa::Attribute attribute_type = tosa::Attribute_NONE,
- flatbuffers::Offset<void> attribute = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> inputs = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> outputs = 0) {
+ ::flatbuffers::Offset<void> attribute = 0,
+ ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> inputs = 0,
+ ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> outputs = 0) {
TosaOperatorBuilder builder_(_fbb);
builder_.add_outputs(outputs);
builder_.add_inputs(inputs);
@@ -2374,15 +2423,15 @@ inline flatbuffers::Offset<TosaOperator> CreateTosaOperator(
return builder_.Finish();
}
-inline flatbuffers::Offset<TosaOperator> CreateTosaOperatorDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<TosaOperator> CreateTosaOperatorDirect(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
tosa::Op op = tosa::Op_UNKNOWN,
tosa::Attribute attribute_type = tosa::Attribute_NONE,
- flatbuffers::Offset<void> attribute = 0,
- const std::vector<flatbuffers::Offset<flatbuffers::String>> *inputs = nullptr,
- const std::vector<flatbuffers::Offset<flatbuffers::String>> *outputs = nullptr) {
- auto inputs__ = inputs ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*inputs) : 0;
- auto outputs__ = outputs ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*outputs) : 0;
+ ::flatbuffers::Offset<void> attribute = 0,
+ const std::vector<::flatbuffers::Offset<::flatbuffers::String>> *inputs = nullptr,
+ const std::vector<::flatbuffers::Offset<::flatbuffers::String>> *outputs = nullptr) {
+ auto inputs__ = inputs ? _fbb.CreateVector<::flatbuffers::Offset<::flatbuffers::String>>(*inputs) : 0;
+ auto outputs__ = outputs ? _fbb.CreateVector<::flatbuffers::Offset<::flatbuffers::String>>(*outputs) : 0;
return tosa::CreateTosaOperator(
_fbb,
op,
@@ -2392,7 +2441,7 @@ inline flatbuffers::Offset<TosaOperator> CreateTosaOperatorDirect(
outputs__);
}
-struct TosaBasicBlock FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct TosaBasicBlock FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef TosaBasicBlockBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_NAME = 4,
@@ -2401,22 +2450,22 @@ struct TosaBasicBlock FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
VT_INPUTS = 10,
VT_OUTPUTS = 12
};
- const flatbuffers::String *name() const {
- return GetPointer<const flatbuffers::String *>(VT_NAME);
+ const ::flatbuffers::String *name() const {
+ return GetPointer<const ::flatbuffers::String *>(VT_NAME);
}
- const flatbuffers::Vector<flatbuffers::Offset<tosa::TosaOperator>> *operators() const {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tosa::TosaOperator>> *>(VT_OPERATORS);
+ const ::flatbuffers::Vector<::flatbuffers::Offset<tosa::TosaOperator>> *operators() const {
+ return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<tosa::TosaOperator>> *>(VT_OPERATORS);
}
- const flatbuffers::Vector<flatbuffers::Offset<tosa::TosaTensor>> *tensors() const {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tosa::TosaTensor>> *>(VT_TENSORS);
+ const ::flatbuffers::Vector<::flatbuffers::Offset<tosa::TosaTensor>> *tensors() const {
+ return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<tosa::TosaTensor>> *>(VT_TENSORS);
}
- const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *inputs() const {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_INPUTS);
+ const ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *inputs() const {
+ return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *>(VT_INPUTS);
}
- const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *outputs() const {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_OUTPUTS);
+ const ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *outputs() const {
+ return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>> *>(VT_OUTPUTS);
}
- bool Verify(flatbuffers::Verifier &verifier) const {
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_NAME) &&
verifier.VerifyString(name()) &&
@@ -2438,42 +2487,41 @@ struct TosaBasicBlock FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
struct TosaBasicBlockBuilder {
typedef TosaBasicBlock Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_name(flatbuffers::Offset<flatbuffers::String> name) {
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
+ void add_name(::flatbuffers::Offset<::flatbuffers::String> name) {
fbb_.AddOffset(TosaBasicBlock::VT_NAME, name);
}
- void add_operators(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tosa::TosaOperator>>> operators) {
+ void add_operators(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tosa::TosaOperator>>> operators) {
fbb_.AddOffset(TosaBasicBlock::VT_OPERATORS, operators);
}
- void add_tensors(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tosa::TosaTensor>>> tensors) {
+ void add_tensors(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tosa::TosaTensor>>> tensors) {
fbb_.AddOffset(TosaBasicBlock::VT_TENSORS, tensors);
}
- void add_inputs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> inputs) {
+ void add_inputs(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> inputs) {
fbb_.AddOffset(TosaBasicBlock::VT_INPUTS, inputs);
}
- void add_outputs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> outputs) {
+ void add_outputs(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> outputs) {
fbb_.AddOffset(TosaBasicBlock::VT_OUTPUTS, outputs);
}
- explicit TosaBasicBlockBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit TosaBasicBlockBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- TosaBasicBlockBuilder &operator=(const TosaBasicBlockBuilder &);
- flatbuffers::Offset<TosaBasicBlock> Finish() {
+ ::flatbuffers::Offset<TosaBasicBlock> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TosaBasicBlock>(end);
+ auto o = ::flatbuffers::Offset<TosaBasicBlock>(end);
return o;
}
};
-inline flatbuffers::Offset<TosaBasicBlock> CreateTosaBasicBlock(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<flatbuffers::String> name = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tosa::TosaOperator>>> operators = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tosa::TosaTensor>>> tensors = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> inputs = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> outputs = 0) {
+inline ::flatbuffers::Offset<TosaBasicBlock> CreateTosaBasicBlock(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ ::flatbuffers::Offset<::flatbuffers::String> name = 0,
+ ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tosa::TosaOperator>>> operators = 0,
+ ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tosa::TosaTensor>>> tensors = 0,
+ ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> inputs = 0,
+ ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<::flatbuffers::String>>> outputs = 0) {
TosaBasicBlockBuilder builder_(_fbb);
builder_.add_outputs(outputs);
builder_.add_inputs(inputs);
@@ -2483,18 +2531,18 @@ inline flatbuffers::Offset<TosaBasicBlock> CreateTosaBasicBlock(
return builder_.Finish();
}
-inline flatbuffers::Offset<TosaBasicBlock> CreateTosaBasicBlockDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
+inline ::flatbuffers::Offset<TosaBasicBlock> CreateTosaBasicBlockDirect(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
const char *name = nullptr,
- const std::vector<flatbuffers::Offset<tosa::TosaOperator>> *operators = nullptr,
- const std::vector<flatbuffers::Offset<tosa::TosaTensor>> *tensors = nullptr,
- const std::vector<flatbuffers::Offset<flatbuffers::String>> *inputs = nullptr,
- const std::vector<flatbuffers::Offset<flatbuffers::String>> *outputs = nullptr) {
+ const std::vector<::flatbuffers::Offset<tosa::TosaOperator>> *operators = nullptr,
+ const std::vector<::flatbuffers::Offset<tosa::TosaTensor>> *tensors = nullptr,
+ const std::vector<::flatbuffers::Offset<::flatbuffers::String>> *inputs = nullptr,
+ const std::vector<::flatbuffers::Offset<::flatbuffers::String>> *outputs = nullptr) {
auto name__ = name ? _fbb.CreateString(name) : 0;
- auto operators__ = operators ? _fbb.CreateVector<flatbuffers::Offset<tosa::TosaOperator>>(*operators) : 0;
- auto tensors__ = tensors ? _fbb.CreateVector<flatbuffers::Offset<tosa::TosaTensor>>(*tensors) : 0;
- auto inputs__ = inputs ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*inputs) : 0;
- auto outputs__ = outputs ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*outputs) : 0;
+ auto operators__ = operators ? _fbb.CreateVector<::flatbuffers::Offset<tosa::TosaOperator>>(*operators) : 0;
+ auto tensors__ = tensors ? _fbb.CreateVector<::flatbuffers::Offset<tosa::TosaTensor>>(*tensors) : 0;
+ auto inputs__ = inputs ? _fbb.CreateVector<::flatbuffers::Offset<::flatbuffers::String>>(*inputs) : 0;
+ auto outputs__ = outputs ? _fbb.CreateVector<::flatbuffers::Offset<::flatbuffers::String>>(*outputs) : 0;
return tosa::CreateTosaBasicBlock(
_fbb,
name__,
@@ -2504,73 +2552,139 @@ inline flatbuffers::Offset<TosaBasicBlock> CreateTosaBasicBlockDirect(
outputs__);
}
-struct TosaGraph FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+struct TosaRegion FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
+ typedef TosaRegionBuilder Builder;
+ enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
+ VT_NAME = 4,
+ VT_BLOCKS = 6
+ };
+ const ::flatbuffers::String *name() const {
+ return GetPointer<const ::flatbuffers::String *>(VT_NAME);
+ }
+ const ::flatbuffers::Vector<::flatbuffers::Offset<tosa::TosaBasicBlock>> *blocks() const {
+ return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<tosa::TosaBasicBlock>> *>(VT_BLOCKS);
+ }
+ bool Verify(::flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyOffset(verifier, VT_NAME) &&
+ verifier.VerifyString(name()) &&
+ VerifyOffset(verifier, VT_BLOCKS) &&
+ verifier.VerifyVector(blocks()) &&
+ verifier.VerifyVectorOfTables(blocks()) &&
+ verifier.EndTable();
+ }
+};
+
+struct TosaRegionBuilder {
+ typedef TosaRegion Table;
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
+ void add_name(::flatbuffers::Offset<::flatbuffers::String> name) {
+ fbb_.AddOffset(TosaRegion::VT_NAME, name);
+ }
+ void add_blocks(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tosa::TosaBasicBlock>>> blocks) {
+ fbb_.AddOffset(TosaRegion::VT_BLOCKS, blocks);
+ }
+ explicit TosaRegionBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ ::flatbuffers::Offset<TosaRegion> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = ::flatbuffers::Offset<TosaRegion>(end);
+ return o;
+ }
+};
+
+inline ::flatbuffers::Offset<TosaRegion> CreateTosaRegion(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ ::flatbuffers::Offset<::flatbuffers::String> name = 0,
+ ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tosa::TosaBasicBlock>>> blocks = 0) {
+ TosaRegionBuilder builder_(_fbb);
+ builder_.add_blocks(blocks);
+ builder_.add_name(name);
+ return builder_.Finish();
+}
+
+inline ::flatbuffers::Offset<TosaRegion> CreateTosaRegionDirect(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ const char *name = nullptr,
+ const std::vector<::flatbuffers::Offset<tosa::TosaBasicBlock>> *blocks = nullptr) {
+ auto name__ = name ? _fbb.CreateString(name) : 0;
+ auto blocks__ = blocks ? _fbb.CreateVector<::flatbuffers::Offset<tosa::TosaBasicBlock>>(*blocks) : 0;
+ return tosa::CreateTosaRegion(
+ _fbb,
+ name__,
+ blocks__);
+}
+
+struct TosaGraph FLATBUFFERS_FINAL_CLASS : private ::flatbuffers::Table {
typedef TosaGraphBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_VERSION = 4,
- VT_BLOCKS = 6
+ VT_REGIONS = 6
};
const tosa::Version *version() const {
return GetPointer<const tosa::Version *>(VT_VERSION);
}
- const flatbuffers::Vector<flatbuffers::Offset<tosa::TosaBasicBlock>> *blocks() const {
- return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tosa::TosaBasicBlock>> *>(VT_BLOCKS);
+ const ::flatbuffers::Vector<::flatbuffers::Offset<tosa::TosaRegion>> *regions() const {
+ return GetPointer<const ::flatbuffers::Vector<::flatbuffers::Offset<tosa::TosaRegion>> *>(VT_REGIONS);
}
- bool Verify(flatbuffers::Verifier &verifier) const {
+ bool Verify(::flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
- VerifyOffset(verifier, VT_VERSION) &&
+ VerifyOffsetRequired(verifier, VT_VERSION) &&
verifier.VerifyTable(version()) &&
- VerifyOffset(verifier, VT_BLOCKS) &&
- verifier.VerifyVector(blocks()) &&
- verifier.VerifyVectorOfTables(blocks()) &&
+ VerifyOffset(verifier, VT_REGIONS) &&
+ verifier.VerifyVector(regions()) &&
+ verifier.VerifyVectorOfTables(regions()) &&
verifier.EndTable();
}
};
struct TosaGraphBuilder {
typedef TosaGraph Table;
- flatbuffers::FlatBufferBuilder &fbb_;
- flatbuffers::uoffset_t start_;
- void add_version(flatbuffers::Offset<tosa::Version> version) {
+ ::flatbuffers::FlatBufferBuilder &fbb_;
+ ::flatbuffers::uoffset_t start_;
+ void add_version(::flatbuffers::Offset<tosa::Version> version) {
fbb_.AddOffset(TosaGraph::VT_VERSION, version);
}
- void add_blocks(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tosa::TosaBasicBlock>>> blocks) {
- fbb_.AddOffset(TosaGraph::VT_BLOCKS, blocks);
+ void add_regions(::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tosa::TosaRegion>>> regions) {
+ fbb_.AddOffset(TosaGraph::VT_REGIONS, regions);
}
- explicit TosaGraphBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ explicit TosaGraphBuilder(::flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
- TosaGraphBuilder &operator=(const TosaGraphBuilder &);
- flatbuffers::Offset<TosaGraph> Finish() {
+ ::flatbuffers::Offset<TosaGraph> Finish() {
const auto end = fbb_.EndTable(start_);
- auto o = flatbuffers::Offset<TosaGraph>(end);
+ auto o = ::flatbuffers::Offset<TosaGraph>(end);
+ fbb_.Required(o, TosaGraph::VT_VERSION);
return o;
}
};
-inline flatbuffers::Offset<TosaGraph> CreateTosaGraph(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<tosa::Version> version = 0,
- flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tosa::TosaBasicBlock>>> blocks = 0) {
+inline ::flatbuffers::Offset<TosaGraph> CreateTosaGraph(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ ::flatbuffers::Offset<tosa::Version> version = 0,
+ ::flatbuffers::Offset<::flatbuffers::Vector<::flatbuffers::Offset<tosa::TosaRegion>>> regions = 0) {
TosaGraphBuilder builder_(_fbb);
- builder_.add_blocks(blocks);
+ builder_.add_regions(regions);
builder_.add_version(version);
return builder_.Finish();
}
-inline flatbuffers::Offset<TosaGraph> CreateTosaGraphDirect(
- flatbuffers::FlatBufferBuilder &_fbb,
- flatbuffers::Offset<tosa::Version> version = 0,
- const std::vector<flatbuffers::Offset<tosa::TosaBasicBlock>> *blocks = nullptr) {
- auto blocks__ = blocks ? _fbb.CreateVector<flatbuffers::Offset<tosa::TosaBasicBlock>>(*blocks) : 0;
+inline ::flatbuffers::Offset<TosaGraph> CreateTosaGraphDirect(
+ ::flatbuffers::FlatBufferBuilder &_fbb,
+ ::flatbuffers::Offset<tosa::Version> version = 0,
+ const std::vector<::flatbuffers::Offset<tosa::TosaRegion>> *regions = nullptr) {
+ auto regions__ = regions ? _fbb.CreateVector<::flatbuffers::Offset<tosa::TosaRegion>>(*regions) : 0;
return tosa::CreateTosaGraph(
_fbb,
version,
- blocks__);
+ regions__);
}
-inline bool VerifyAttribute(flatbuffers::Verifier &verifier, const void *obj, Attribute type) {
+inline bool VerifyAttribute(::flatbuffers::Verifier &verifier, const void *obj, Attribute type) {
switch (type) {
case Attribute_NONE: {
return true;
@@ -2595,18 +2709,6 @@ inline bool VerifyAttribute(flatbuffers::Verifier &verifier, const void *obj, At
auto ptr = reinterpret_cast<const tosa::AxisAttribute *>(obj);
return verifier.VerifyTable(ptr);
}
- case Attribute_ReshapeAttribute: {
- auto ptr = reinterpret_cast<const tosa::ReshapeAttribute *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case Attribute_SliceAttribute: {
- auto ptr = reinterpret_cast<const tosa::SliceAttribute *>(obj);
- return verifier.VerifyTable(ptr);
- }
- case Attribute_TileAttribute: {
- auto ptr = reinterpret_cast<const tosa::TileAttribute *>(obj);
- return verifier.VerifyTable(ptr);
- }
case Attribute_ResizeAttribute: {
auto ptr = reinterpret_cast<const tosa::ResizeAttribute *>(obj);
return verifier.VerifyTable(ptr);
@@ -2655,14 +2757,26 @@ inline bool VerifyAttribute(flatbuffers::Verifier &verifier, const void *obj, At
auto ptr = reinterpret_cast<const tosa::NegateAttribute *>(obj);
return verifier.VerifyTable(ptr);
}
+ case Attribute_CustomAttribute: {
+ auto ptr = reinterpret_cast<const tosa::CustomAttribute *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case Attribute_FFTAttribute: {
+ auto ptr = reinterpret_cast<const tosa::FFTAttribute *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case Attribute_RFFTAttribute: {
+ auto ptr = reinterpret_cast<const tosa::RFFTAttribute *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
default: return true;
}
}
-inline bool VerifyAttributeVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types) {
+inline bool VerifyAttributeVector(::flatbuffers::Verifier &verifier, const ::flatbuffers::Vector<::flatbuffers::Offset<void>> *values, const ::flatbuffers::Vector<uint8_t> *types) {
if (!values || !types) return !values && !types;
if (values->size() != types->size()) return false;
- for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {
+ for (::flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {
if (!VerifyAttribute(
verifier, values->Get(i), types->GetEnum<Attribute>(i))) {
return false;
@@ -2672,11 +2786,11 @@ inline bool VerifyAttributeVector(flatbuffers::Verifier &verifier, const flatbuf
}
inline const tosa::TosaGraph *GetTosaGraph(const void *buf) {
- return flatbuffers::GetRoot<tosa::TosaGraph>(buf);
+ return ::flatbuffers::GetRoot<tosa::TosaGraph>(buf);
}
inline const tosa::TosaGraph *GetSizePrefixedTosaGraph(const void *buf) {
- return flatbuffers::GetSizePrefixedRoot<tosa::TosaGraph>(buf);
+ return ::flatbuffers::GetSizePrefixedRoot<tosa::TosaGraph>(buf);
}
inline const char *TosaGraphIdentifier() {
@@ -2684,17 +2798,22 @@ inline const char *TosaGraphIdentifier() {
}
inline bool TosaGraphBufferHasIdentifier(const void *buf) {
- return flatbuffers::BufferHasIdentifier(
+ return ::flatbuffers::BufferHasIdentifier(
buf, TosaGraphIdentifier());
}
+inline bool SizePrefixedTosaGraphBufferHasIdentifier(const void *buf) {
+ return ::flatbuffers::BufferHasIdentifier(
+ buf, TosaGraphIdentifier(), true);
+}
+
inline bool VerifyTosaGraphBuffer(
- flatbuffers::Verifier &verifier) {
+ ::flatbuffers::Verifier &verifier) {
return verifier.VerifyBuffer<tosa::TosaGraph>(TosaGraphIdentifier());
}
inline bool VerifySizePrefixedTosaGraphBuffer(
- flatbuffers::Verifier &verifier) {
+ ::flatbuffers::Verifier &verifier) {
return verifier.VerifySizePrefixedBuffer<tosa::TosaGraph>(TosaGraphIdentifier());
}
@@ -2703,14 +2822,14 @@ inline const char *TosaGraphExtension() {
}
inline void FinishTosaGraphBuffer(
- flatbuffers::FlatBufferBuilder &fbb,
- flatbuffers::Offset<tosa::TosaGraph> root) {
+ ::flatbuffers::FlatBufferBuilder &fbb,
+ ::flatbuffers::Offset<tosa::TosaGraph> root) {
fbb.Finish(root, TosaGraphIdentifier());
}
inline void FinishSizePrefixedTosaGraphBuffer(
- flatbuffers::FlatBufferBuilder &fbb,
- flatbuffers::Offset<tosa::TosaGraph> root) {
+ ::flatbuffers::FlatBufferBuilder &fbb,
+ ::flatbuffers::Offset<tosa::TosaGraph> root) {
fbb.FinishSizePrefixed(root, TosaGraphIdentifier());
}
diff --git a/include/tosa_serialization_handler.h b/include/tosa_serialization_handler.h
index 53dcf1a..f5f9e58 100644
--- a/include/tosa_serialization_handler.h
+++ b/include/tosa_serialization_handler.h
@@ -1,5 +1,5 @@
-// Copyright (c) 2020-2021, ARM Limited.
+// Copyright (c) 2020-2024, ARM Limited.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -18,6 +18,7 @@
#include "attribute.h"
#include "flatbuffers/idl.h"
#include "flatbuffers/util.h"
+#include "float_utils.h"
#include "numpy_utils.h"
#include "tosa_generated.h"
#include <cstdint>
@@ -27,7 +28,7 @@
// Keep version number in sync with the version default value with schema/tosa.fbs
#define TOSA_VERSION_MAJOR 0
-#define TOSA_VERSION_MINOR 31
+#define TOSA_VERSION_MINOR 100
#define TOSA_VERSION_PATCH 0
#define TOSA_VERSION_DRAFT true
#define TENSOR_BUFFER_FORCE_ALIGNMENT 8
@@ -57,7 +58,7 @@ struct TosaVersion
enum class compat_t
{
COMPLETELY_COMPATIBLE,
- PARTIALLY_COMPATIBLE,
+ BACKWARD_COMPATIBLE,
NOT_COMPATIBLE
};
@@ -86,17 +87,53 @@ struct TosaVersion
return str;
}
- compat_t is_compatible(const TosaVersion& rhs) const
+ static bool less_than(const TosaVersion& version1, const TosaVersion& version2)
{
- if (rhs._major == _major && rhs._minor == _minor)
+ if (version1._major < version2._major)
{
- if (rhs._patch == _patch && rhs._draft == _draft)
+ return true;
+ }
+ else if (version1._major == version2._major)
+ {
+ if (version1._minor < version2._minor)
{
- return TosaVersion::compat_t::COMPLETELY_COMPATIBLE;
+ return true;
}
- else
+ else if (version1._minor == version2._minor)
{
- return TosaVersion::compat_t::PARTIALLY_COMPATIBLE;
+ if (version1._patch < version2._patch)
+ {
+ return true;
+ }
+ else if (version1._patch == version2._patch)
+ {
+ if (version1._draft == true && version2._draft == false)
+ {
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+ }
+
+ static TosaVersion::compat_t is_compatible(const TosaVersion& tosa_fb_version,
+ const TosaVersion& serializer_version)
+ {
+ bool major_match = (serializer_version._major == tosa_fb_version._major);
+ bool minor_match = (serializer_version._minor == tosa_fb_version._minor);
+ bool patch_match = (serializer_version._patch == tosa_fb_version._patch);
+ bool draft_match = (serializer_version._draft == tosa_fb_version._draft);
+
+ if (major_match && minor_match && patch_match && draft_match)
+ return TosaVersion::compat_t::COMPLETELY_COMPATIBLE;
+
+ // We currently support backward compatibility starting from 0.100.0
+ if ((tosa_fb_version._major == 0 && tosa_fb_version._minor >= 100) || (tosa_fb_version._major > 0))
+ {
+ if (less_than(tosa_fb_version, serializer_version))
+ {
+ return TosaVersion::compat_t::BACKWARD_COMPATIBLE;
}
}
return TosaVersion::compat_t::NOT_COMPATIBLE;
@@ -112,11 +149,17 @@ public:
TosaSerializationTensor(const flatbuffers::String* name,
const flatbuffers::Vector<int32_t>* shape,
DType dtype,
- const flatbuffers::Vector<uint8_t>* data);
+ const flatbuffers::Vector<uint8_t>* data,
+ const bool variable = false,
+ const bool is_unranked = false,
+ const flatbuffers::String* variable_name = NULL);
TosaSerializationTensor(const std::string& name,
const std::vector<int32_t>& shape,
DType dtype,
- const std::vector<uint8_t>& data);
+ const std::vector<uint8_t>& data,
+ const bool variable = false,
+ const bool is_unranked = false,
+ const std::string& variable_name = "");
TosaSerializationTensor();
~TosaSerializationTensor();
@@ -129,14 +172,26 @@ public:
{
return _shape;
}
- DType GetDtype()
+ DType GetDtype() const
{
return _dtype;
}
+ bool GetVariable() const
+ {
+ return _variable;
+ }
const std::vector<uint8_t>& GetData() const
{
return _data;
}
+ bool GetIsUnranked() const
+ {
+ return _is_unranked;
+ }
+ const std::string GetVariableName() const
+ {
+ return _variable_name;
+ }
// modifier
void SetDtype(DType dtype)
@@ -155,12 +210,28 @@ public:
{
_data = std::move(data);
}
+ void SetIsUnranked(const bool value)
+ {
+ _is_unranked = value;
+ }
+ void SetDimSize(size_t dim, uint32_t new_size)
+ {
+ if (dim >= _shape.size())
+ {
+ printf("dim is out of bound\n");
+ assert(0);
+ }
+ _shape[dim] = new_size;
+ }
private:
DType _dtype; /* data type enumeration, see tosa_isa_generated.h */
std::vector<int32_t> _shape; /* shape of the tensor */
std::string _name; /* name of the tensor, used for solving dependency */
+ bool _variable; /* is this a variable tensor */
std::vector<uint8_t> _data; /* data array */
+ bool _is_unranked; /* whether this is an unranked tensor */
+ std::string _variable_name; /* name for variable tensors */
};
class TosaSerializationOperator
@@ -216,11 +287,13 @@ class TosaSerializationBasicBlock
public:
// constructor and destructor
TosaSerializationBasicBlock(const std::string& name,
+ const std::string& region_name,
const std::vector<TosaSerializationOperator*>& operators,
const std::vector<TosaSerializationTensor*>& tensors,
const std::vector<std::string>& inputs,
const std::vector<std::string>& outputs);
TosaSerializationBasicBlock(std::string&& name,
+ std::string&& region_name,
std::vector<TosaSerializationOperator*>&& operators,
std::vector<TosaSerializationTensor*>&& tensors,
std::vector<std::string>&& inputs,
@@ -232,10 +305,15 @@ public:
{
return _name;
}
+ std::string GetRegionName() const
+ {
+ return _region_name;
+ }
std::vector<TosaSerializationOperator*>& GetOperators()
{
return _operators;
}
+
std::vector<TosaSerializationTensor*>& GetTensors()
{
return _tensors;
@@ -259,19 +337,59 @@ public:
{
return _inputs;
}
+
std::vector<std::string>& GetOutputs()
{
return _outputs;
}
private:
- std::string _name; /* name of basic block */
+ std::string _name; /* name of basic block */
+ std::string _region_name;
std::vector<TosaSerializationOperator*> _operators; /* TosaSerializationOperator list */
std::vector<TosaSerializationTensor*> _tensors; /* TosaSerializationTensor list */
std::vector<std::string> _inputs; /* array of string to specify block inputs */
std::vector<std::string> _outputs; /* array of string to specify block outputs */
};
+class TosaSerializationRegion
+{
+public:
+ // constructor and desctructor
+ TosaSerializationRegion(const std::string& name, const std::vector<TosaSerializationBasicBlock*>& blocks);
+ TosaSerializationRegion(const std::string&& name, const std::vector<TosaSerializationBasicBlock*>&& blocks);
+ ~TosaSerializationRegion();
+
+ // accessors
+ std::string GetName() const
+ {
+ return this->_name;
+ }
+
+ std::vector<TosaSerializationBasicBlock*>& GetBlocks()
+ {
+ return this->_blocks;
+ }
+
+ TosaSerializationBasicBlock* GetBlockByName(std::string name)
+ {
+ TosaSerializationBasicBlock* result = nullptr;
+ for (auto block : GetBlocks())
+ {
+ if (block->GetName() == name)
+ {
+ result = block;
+ break;
+ }
+ }
+ return result;
+ }
+
+private:
+ std::string _name; /* name of basic block */
+ std::vector<TosaSerializationBasicBlock*> _blocks; /* TosaSerializationBasicBlock list */
+};
+
/*
* this is a helper class for writing/reading Tosa ISA
* supported format: .tosa (flatbuffer), .json
@@ -294,7 +412,12 @@ public:
tosa_err_t LoadFileSchema(const char* schema_filename);
// data format conversion. little-endian.
+ static tosa_err_t ConvertBF16toU8(const std::vector<float>& in, std::vector<uint8_t>& out);
+ static tosa_err_t ConvertFP8E4M3toU8(const std::vector<float>& in, std::vector<uint8_t>& out);
+ static tosa_err_t ConvertFP8E5M2toU8(const std::vector<float>& in, std::vector<uint8_t>& out);
+ static tosa_err_t ConvertF16toU8(const std::vector<float>& in, std::vector<uint8_t>& out);
static tosa_err_t ConvertF32toU8(const std::vector<float>& in, std::vector<uint8_t>& out);
+ static tosa_err_t ConvertI64toU8(const std::vector<int64_t>& in, std::vector<uint8_t>& out);
static tosa_err_t ConvertI48toU8(const std::vector<int64_t>& in, std::vector<uint8_t>& out);
static tosa_err_t ConvertI32toU8(const std::vector<int32_t>& in, std::vector<uint8_t>& out);
static tosa_err_t ConvertI16toU8(const std::vector<int16_t>& in, std::vector<uint8_t>& out);
@@ -302,7 +425,13 @@ public:
static tosa_err_t ConvertI4toU8(const std::vector<int8_t>& in, std::vector<uint8_t>& out);
static tosa_err_t ConvertBooltoU8(const std::vector<bool>& in, std::vector<uint8_t>& out);
+ static tosa_err_t ConvertU8toBF16(const std::vector<uint8_t>& in, uint32_t out_size, std::vector<float>& out);
+ static tosa_err_t ConvertU8toFP8E4M3(const std::vector<uint8_t>& in, uint32_t out_size, std::vector<float>& out);
+ static tosa_err_t ConvertU8toFP8E5M2(const std::vector<uint8_t>& in, uint32_t out_size, std::vector<float>& out);
+ static tosa_err_t
+ ConvertU8toF16(const std::vector<uint8_t>& in, uint32_t out_size, std::vector<half_float::half>& out);
static tosa_err_t ConvertU8toF32(const std::vector<uint8_t>& in, uint32_t out_size, std::vector<float>& out);
+ static tosa_err_t ConvertU8toI64(const std::vector<uint8_t>& in, uint32_t out_size, std::vector<int64_t>& out);
static tosa_err_t ConvertU8toI48(const std::vector<uint8_t>& in, uint32_t out_size, std::vector<int64_t>& out);
static tosa_err_t ConvertU8toI32(const std::vector<uint8_t>& in, uint32_t out_size, std::vector<int32_t>& out);
static tosa_err_t ConvertU8toI16(const std::vector<uint8_t>& in, uint32_t out_size, std::vector<int16_t>& out);
@@ -310,6 +439,8 @@ public:
static tosa_err_t ConvertU8toI4(const std::vector<uint8_t>& in, uint32_t out_size, std::vector<int8_t>& out);
static tosa_err_t ConvertU8toBool(const std::vector<uint8_t>& in, uint32_t out_size, std::vector<bool>& out);
+ static void ForceAlignTensorData(std::vector<uint8_t>& buf);
+
// version
const TosaVersion& GetVersion()
{
@@ -317,39 +448,29 @@ public:
}
// accessor
- std::vector<TosaSerializationBasicBlock*>& GetBlocks()
+ std::vector<TosaSerializationRegion*>& GetRegions()
{
- return _blocks;
+ return _regions;
}
- TosaSerializationBasicBlock* GetBlockByName(std::string name)
+ TosaSerializationRegion* GetMainRegion()
{
- TosaSerializationBasicBlock* result = nullptr;
- for (auto block : GetBlocks())
+ return _regions[0];
+ }
+
+ TosaSerializationRegion* GetRegionByName(std::string name)
+ {
+ TosaSerializationRegion* result = nullptr;
+ for (auto region : GetRegions())
{
- if (block->GetName() == name)
+ if (region->GetName() == name)
{
- result = block;
+ result = region;
break;
}
}
return result;
}
- TosaSerializationBasicBlock* GetMainBlock()
- {
- TosaSerializationBasicBlock* main_block = GetBlockByName(std::string("main"));
- assert(main_block);
- return main_block;
- }
-
- std::vector<std::string>& GetInputs()
- {
- return GetMainBlock()->GetInputs();
- }
- std::vector<std::string>& GetOutputs()
- {
- return GetMainBlock()->GetOutputs();
- }
bool GetSchemaLoaded() const
{
@@ -360,14 +481,13 @@ protected:
tosa_err_t Clear();
tosa_err_t Deserialize(const uint8_t* buf);
tosa_err_t Serialize();
- TosaVersion ParseTosaSchemaVersion(std::string schema);
private:
- TosaVersion _version; /* version struct */
- flatbuffers::FlatBufferBuilder _builder; /* flatbuffer builder */
- flatbuffers::Parser _parser; /* flatbuffer parser, used for json parsing */
- std::vector<TosaSerializationBasicBlock*> _blocks; /* array structure to store all TosaSerializationBasicBlock */
- bool _schemaLoaded; /* is the schema properly loaded? */
+ TosaVersion _version; /* version struct */
+ flatbuffers::FlatBufferBuilder _builder; /* flatbuffer builder */
+ flatbuffers::Parser _parser; /* flatbuffer parser, used for json parsing */
+ std::vector<TosaSerializationRegion*> _regions; /* array structure to store all TosaSerializationRegion */
+ bool _schemaLoaded; /* is the schema properly loaded? */
};
} // namespace tosa
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..6cfb8f6
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,43 @@
+# Copyright (c) 2023, ARM Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Description:
+# Packaging for the TOSA serialization lib
+#
+
+[build-system]
+requires = [
+ "setuptools>=42",
+ "wheel",
+ "setuptools_scm[toml]>=6.0"
+]
+build-backend = "setuptools.build_meta"
+
+[project]
+name="tosa_serialization_lib"
+description="TOSA serialization library"
+authors=[ { "name" = "Arm Ltd" } ]
+license={ "file" = "LICENSE.txt" }
+dynamic = ["version"]
+
+[tool.setuptools]
+packages = [ "tosa", "serializer" ]
+
+[tool.setuptools.package-dir]
+tosa = "python/tosa"
+serializer = "python/serializer"
+
+[tool.setuptools_scm]
+version_scheme = "no-guess-dev"
diff --git a/python/serializer/tosa_serializer.py b/python/serializer/tosa_serializer.py
index ec1c12d..bbfb37e 100644
--- a/python/serializer/tosa_serializer.py
+++ b/python/serializer/tosa_serializer.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2020-2022, ARM Limited.
+# Copyright (c) 2020-2024, ARM Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,6 +13,7 @@
# limitations under the License.
import os
+import serializer.tosa_serializer as ts
import json
import flatbuffers
import numpy as np
@@ -20,6 +21,7 @@ import struct
from enum import IntEnum, unique
from tosa import (
TosaGraph,
+ TosaRegion,
TosaBasicBlock,
TosaTensor,
TosaOperator,
@@ -30,7 +32,7 @@ import tosa.Op as TosaOp
# Keep version number in sync with the version default value with schema/tosa.fbs
TOSA_VERSION_MAJOR = 0
-TOSA_VERSION_MINOR = 31
+TOSA_VERSION_MINOR = 100
TOSA_VERSION_PATCH = 0
TOSA_VERSION_DRAFT = True
TOSA_VERSION = [
@@ -56,8 +58,13 @@ DTypeNames = [
"INT16",
"INT32",
"INT48",
- "FLOAT",
+ "FP32",
"UINT16",
+ "FP16",
+ "BF16",
+ "SHAPE",
+ "FP8E4M3",
+ "FP8E5M2",
]
ByteMask = np.uint64(0xFF)
@@ -90,6 +97,7 @@ class TosaSerializerUnion:
self.bools = []
self.floats = []
self.strings = []
+ self.int16vecs = []
self.intvecs = []
self.fpvecs = []
@@ -106,6 +114,9 @@ class TosaSerializerUnion:
for fcn, val in self.intvecs:
intVecList.append((fcn, TosaSerializer.serializeInt32Vec(builder, val)))
+ for fcn, val in self.int16vecs:
+ intVecList.append((fcn, TosaSerializer.serializeInt16Vec(builder, val)))
+
for fcn, val in self.fpvecs:
fpVecList.append((fcn, TosaSerializer.serializeFpVec(builder, val)))
@@ -141,7 +152,15 @@ class TosaSerializerAttribute(TosaSerializerUnion):
def __init__(self):
super().__init__()
- def PoolAttribute(self, kernel, stride, pad, input_zp, output_zp):
+ def PoolAttribute(
+ self,
+ kernel,
+ stride,
+ pad,
+ input_zp,
+ output_zp,
+ acc_type,
+ ):
from tosa import PoolAttribute as a, Attribute
self.utype = Attribute.Attribute().PoolAttribute
@@ -152,8 +171,11 @@ class TosaSerializerAttribute(TosaSerializerUnion):
self.intvecs.append((a.AddStride, stride))
self.ints.append((a.AddInputZp, input_zp))
self.ints.append((a.AddOutputZp, output_zp))
+ self.ints.append((a.AddAccType, acc_type))
- def ConvAttribute(self, pad, stride, dilation, input_zp, weight_zp):
+ def ConvAttribute(
+ self, pad, stride, dilation, input_zp, weight_zp, local_bound, acc_type
+ ):
from tosa import ConvAttribute as a, Attribute
self.utype = Attribute.Attribute().ConvAttribute
@@ -164,8 +186,12 @@ class TosaSerializerAttribute(TosaSerializerUnion):
self.intvecs.append((a.AddDilation, dilation))
self.ints.append((a.AddInputZp, input_zp))
self.ints.append((a.AddWeightZp, weight_zp))
+ self.bools.append((a.AddLocalBound, local_bound))
+ self.ints.append((a.AddAccType, acc_type))
- def TransposeConvAttribute(self, outpad, stride, output_shape, input_zp, weight_zp):
+ def TransposeConvAttribute(
+ self, outpad, stride, output_shape, input_zp, weight_zp, local_bound, acc_type
+ ):
from tosa import TransposeConvAttribute as a, Attribute
self.utype = Attribute.Attribute().TransposeConvAttribute
@@ -176,16 +202,21 @@ class TosaSerializerAttribute(TosaSerializerUnion):
self.intvecs.append((a.AddOutputShape, output_shape))
self.ints.append((a.AddInputZp, input_zp))
self.ints.append((a.AddWeightZp, weight_zp))
+ self.bools.append((a.AddLocalBound, local_bound))
+ self.ints.append((a.AddAccType, acc_type))
- def PadAttribute(self, padding, pad_const_int, pad_const_fp):
+ def PadAttribute(self, serializer_builder, pad_const_val_as_bytes):
from tosa import PadAttribute as a, Attribute
self.utype = Attribute.Attribute().PadAttribute
self.optFcns = (a.Start, a.End)
- self.intvecs.append((a.AddPadding, padding))
- self.ints.append((a.AddPadConstInt, pad_const_int))
- self.floats.append((a.AddPadConstFp, pad_const_fp))
+ # serialize pad_const_val_as_bytes as uint8 vector
+ serialized_pad_const_val = ts.TosaSerializer.serializeUint8Vec(
+ serializer_builder, pad_const_val_as_bytes
+ )
+
+ self.floats.append((a.AddPadConst, serialized_pad_const_val))
def AxisAttribute(self, axis):
from tosa import AxisAttribute as a, Attribute
@@ -195,61 +226,43 @@ class TosaSerializerAttribute(TosaSerializerUnion):
self.ints.append((a.AddAxis, axis))
- def ReshapeAttribute(self, new_shape):
- from tosa import ReshapeAttribute as a, Attribute
-
- self.utype = Attribute.Attribute().ReshapeAttribute
- self.optFcns = (a.Start, a.End)
-
- self.intvecs.append((a.AddNewShape, new_shape))
-
- def SliceAttribute(self, start, size):
- from tosa import SliceAttribute as a, Attribute
-
- self.utype = Attribute.Attribute().SliceAttribute
- self.optFcns = (a.Start, a.End)
-
- self.intvecs.append((a.AddStart, start))
- self.intvecs.append((a.AddSize, size))
-
- def TileAttribute(self, multiples):
- from tosa import TileAttribute as a, Attribute
-
- self.utype = Attribute.Attribute().TileAttribute
- self.optFcns = (a.Start, a.End)
-
- self.intvecs.append((a.AddMultiples, multiples))
-
- def ResizeAttribute(
- self, output_size, stride, offset, shift, stride_fp, offset_fp, mode
- ):
+ def ResizeAttribute(self, scale, offset, border, mode):
from tosa import ResizeAttribute as a, Attribute
self.utype = Attribute.Attribute().ResizeAttribute
self.optFcns = (a.Start, a.End)
- self.intvecs.append((a.AddOutputSize, output_size))
- self.intvecs.append((a.AddStride, stride))
- self.intvecs.append((a.AddOffset, offset))
- self.ints.append((a.AddShift, shift))
- self.fpvecs.append((a.AddStrideFp, stride_fp))
- self.fpvecs.append((a.AddOffsetFp, offset_fp))
+ self.int16vecs.append((a.AddScale, scale))
+ self.int16vecs.append((a.AddOffset, offset))
+ self.int16vecs.append((a.AddBorder, border))
self.ints.append((a.AddMode, mode))
- def ClampAttribute(self, minint, maxint, minfp, maxfp):
+ def ClampAttribute(self, serializer_builder, min_val_as_bytes, max_val_as_bytes):
from tosa import ClampAttribute as a, Attribute
self.utype = Attribute.Attribute().ClampAttribute
self.optFcns = (a.Start, a.End)
- self.ints.append((a.AddMinInt, minint))
- self.ints.append((a.AddMaxInt, maxint))
+ # min/max float attributes serialized as uint8 vectors
+ serialized_min_val = ts.TosaSerializer.serializeUint8Vec(
+ serializer_builder, min_val_as_bytes
+ )
+ serialized_max_val = ts.TosaSerializer.serializeUint8Vec(
+ serializer_builder, max_val_as_bytes
+ )
- self.ints.append((a.AddMinFp, minfp))
- self.ints.append((a.AddMaxFp, maxfp))
+ self.floats.append((a.AddMinVal, serialized_min_val))
+ self.floats.append((a.AddMaxVal, serialized_max_val))
def RescaleAttribute(
- self, input_zp, output_zp, multiplier, shift, scale32, double_round, per_channel
+ self,
+ input_zp,
+ output_zp,
+ scale32,
+ double_round,
+ per_channel,
+ input_unsigned,
+ output_unsigned,
):
from tosa import RescaleAttribute as a, Attribute
@@ -258,11 +271,11 @@ class TosaSerializerAttribute(TosaSerializerUnion):
self.ints.append((a.AddInputZp, input_zp))
self.ints.append((a.AddOutputZp, output_zp))
- self.intvecs.append((a.AddMultiplier, multiplier))
- self.intvecs.append((a.AddShift, shift))
self.bools.append((a.AddScale32, scale32))
self.bools.append((a.AddDoubleRound, double_round))
self.bools.append((a.AddPerChannel, per_channel))
+ self.bools.append((a.AddInputUnsigned, input_unsigned))
+ self.bools.append((a.AddOutputUnsigned, output_unsigned))
def MulAttribute(self, shift):
from tosa import MulAttribute as a, Attribute
@@ -283,23 +296,23 @@ class TosaSerializerAttribute(TosaSerializerUnion):
self.bools.append((a.AddRound, round))
- def CondIfAttribute(self, then_branch, else_branch):
+ def CondIfAttribute(self, then_graph, else_graph):
from tosa import CondIfAttribute as a, Attribute
self.utype = Attribute.Attribute().CondIfAttribute
self.optFcns = (a.Start, a.End)
- self.strings.append((a.AddThenBranch, then_branch))
- self.strings.append((a.AddElseBranch, else_branch))
+ self.strings.append((a.AddThenGraph, then_graph))
+ self.strings.append((a.AddElseGraph, else_graph))
- def WhileLoopAttribute(self, cond_branch, body_branch):
+ def WhileLoopAttribute(self, cond_graph, body_graph):
from tosa import WhileLoopAttribute as a, Attribute
self.utype = Attribute.Attribute().WhileLoopAttribute
self.optFcns = (a.Start, a.End)
- self.strings.append((a.AddCondBranch, cond_branch))
- self.strings.append((a.AddBodyBranch, body_branch))
+ self.strings.append((a.AddCondGraph, cond_graph))
+ self.strings.append((a.AddBodyGraph, body_graph))
def TransposeAttribute(self, perms):
from tosa import TransposeAttribute as a, Attribute
@@ -315,7 +328,7 @@ class TosaSerializerAttribute(TosaSerializerUnion):
self.utype = Attribute.Attribute().TableAttribute
self.optFcns = (a.Start, a.End)
- self.intvecs.append((a.AddTable, table))
+ self.int16vecs.append((a.AddTable, table))
def MatMulAttribute(self, A_zp, B_zp):
from tosa import MatMulAttribute as a, Attribute
@@ -344,6 +357,23 @@ class TosaSerializerAttribute(TosaSerializerUnion):
self.ints.append((a.AddInput1Zp, input1_zp))
self.ints.append((a.AddOutputZp, output_zp))
+ def FFTAttribute(self, inverse, local_bound):
+ from tosa import FFTAttribute as a, Attribute
+
+ self.utype = Attribute.Attribute().FFTAttribute
+ self.optFcns = (a.Start, a.End)
+
+ self.bools.append((a.AddInverse, inverse))
+ self.bools.append((a.AddLocalBound, local_bound))
+
+ def RFFTAttribute(self, local_bound):
+ from tosa import RFFTAttribute as a, Attribute
+
+ self.utype = Attribute.Attribute().RFFTAttribute
+ self.optFcns = (a.Start, a.End)
+
+ self.bools.append((a.AddLocalBound, local_bound))
+
class TosaSerializerTensor:
def __init__(
@@ -363,15 +393,30 @@ class TosaSerializerTensor:
self.shape = shape
self.dtype = dtype
+ if (
+ dtype == DType.FP32
+ or dtype == DType.BF16
+ or dtype == DType.FP8E4M3
+ or dtype == DType.FP8E5M2
+ ):
+ fntype = np.float32
+ elif dtype == DType.FP16:
+ fntype = np.float16
+ else:
+ fntype = int
+
if isinstance(data, np.ndarray):
- data = data.flatten().astype(int).tolist()
- data = list(map(int, data))
- self.data = data
+ data = data.flatten().astype(fntype).tolist()
+ data = list(map(fntype, data))
elif isinstance(data, list):
- data = list(map(int, data))
- self.data = data
+ data = list(map(fntype, data))
+ elif data is not None:
+ # Assume data is rank 0 data type
+ data = list(map(fntype, [data]))
else:
- self.data = None
+ data = None
+
+ self.data = data
# Filename for placeholder tensors. These get generated by the test generation
# process and are written to disk, but are considered input tensors by the
@@ -381,12 +426,12 @@ class TosaSerializerTensor:
self.placeholderFilename = placeholderFilename
def __str__(self):
- str = "TosaSerializerTensor name: {} shape: {} dtype: {}".format(
+ concatString = "TosaSerializerTensor name: {} shape: {} dtype: {}".format(
self.name,
self.shape,
DTypeNames[self.dtype],
)
- return str
+ return concatString
def setDtype(self, dtype):
self.dtype = dtype
@@ -395,60 +440,7 @@ class TosaSerializerTensor:
fb_name = builder.CreateString(self.name)
fb_shapes = TosaSerializer.serializeInt32Vec(builder, self.shape)
if self.data:
- u8_data = list()
- # little endianess
- if self.dtype == DType.BOOL:
- for val in self.data:
- val_u8 = np.uint8(val)
- u8_data.append(val_u8)
- elif self.dtype == DType.INT4:
- in_size = len(self.data)
- out_size = (in_size + 1) // 2
- for i in range(out_size):
- val_0 = self.data[2 * i]
- if (2 * i + 1) < in_size:
- val_1 = self.data[2 * i + 1]
- else:
- val_1 = 0
- val_i8 = (val_0 & 0xF) | ((val_1 & 0xF) << 4)
- val_u8 = np.uint8(val_i8)
- u8_data.append(val_u8)
- elif self.dtype == DType.INT8:
- for val in self.data:
- val_u8 = np.uint8(val)
- u8_data.append(val_u8)
- elif self.dtype == DType.INT16:
- for val in self.data:
- val_u16 = np.uint16(val)
- b0 = val_u16 & ByteMask
- b1 = (val_u16 >> np.uint16(8)) & ByteMask
- u8_data.extend([b0, b1])
- elif self.dtype == DType.INT32:
- for val in self.data:
- val_u32 = np.uint32(val)
- b0 = val_u32 & ByteMask
- b1 = (val_u32 >> np.uint32(8)) & ByteMask
- b2 = (val_u32 >> np.uint32(16)) & ByteMask
- b3 = (val_u32 >> np.uint32(24)) & ByteMask
- u8_data.extend([b0, b1, b2, b3])
- elif self.dtype == DType.INT48:
- for val in self.data:
- val_u64 = np.uint64(val)
- b0 = val_u64 & ByteMask
- b1 = (val_u64 >> np.uint64(8)) & ByteMask
- b2 = (val_u64 >> np.uint64(16)) & ByteMask
- b3 = (val_u64 >> np.uint64(24)) & ByteMask
- b4 = (val_u64 >> np.uint64(32)) & ByteMask
- b5 = (val_u64 >> np.uint64(40)) & ByteMask
- u8_data.extend([b0, b1, b2, b3, b4, b5])
- elif self.dtype == DType.FLOAT:
- for val in self.data:
- b = struct.pack("!f", val)
- u8_data.extend([b[3], b[2], b[1], b[0]])
- else:
- raise Exception(
- "unsupported data type {}".format(DTypeNames[self.dtype])
- )
+ u8_data = TosaSerializer.convertDataToUint8Vec(self.dtype, self.data)
fb_data = TosaSerializer.serializeUint8Vec(builder, u8_data)
TosaTensor.Start(builder)
@@ -469,14 +461,14 @@ class TosaSerializerOperator:
self.outputs = TosaSerializer.toList(outputs)
def __str__(self):
- str = "Op {}\n----\n".format(self.op)
+ concatString = "Op {}\n----\n".format(self.op)
for i in self.inputs:
- str = str + " Input: {}\n".format(i)
+ concatString = concatString + " Input: {}\n".format(i)
for o in self.outputs:
- str = str + " Output: {}\n".format(o)
+ concatString = concatString + " Output: {}\n".format(o)
- return str
+ return concatString
def serialize(self, builder):
fb_inputs = TosaSerializer.serializeStrVec(
@@ -561,41 +553,39 @@ class TosaSerializerBasicBlock:
return TosaBasicBlock.End(builder)
+# How CONSTs are treated in the flatbuffer
@unique
-class TensorDir(IntEnum):
- PLACEHOLDER = 0
- CONST = 1
- INTERMEDIATE = 2
- RESULT = 3
+class ConstMode(IntEnum):
+ EMBED = 0
+ EMBED_DUMP = 1
+ INPUTS = 2
-class TosaSerializer:
- def __init__(self, pathPrefix):
- self.add_compat_methods()
- # Get the global TOSA version if not already defined
-
- self.builder = flatbuffers.Builder(0)
-
+class TosaSerializerRegion:
+ def __init__(self, name, pathPrefix, constMode=ConstMode.EMBED):
+ self.name = name
self.basicBlocks = []
- self.startBasicBlock("main")
- self.pathPrefix = pathPrefix
-
- # Indicies used for adding/naming tensors
self.currInputIdx = 0
self.currConstIdx = 0
self.currLayerIdx = 1
self.currResultIdx = 0
+ self.pathPrefix = pathPrefix
+ self.constMode = constMode
- # Is this an illegal test that is expected to fail?
- self.expectedReturnCode = 0
- self.expectedFailure = False
- self.expectedFailureDesc = ""
+ def addBasicBlock(self, name):
+ self.currBasicBlock = TosaSerializerBasicBlock(name)
+ self.basicBlocks.append(self.currBasicBlock)
- def __str__(self):
- str = ""
- for bb in self.basicBlocks:
- str = str + bb.__str__()
- return str
+ def serialize(self, builder):
+ fb_name = builder.CreateString(self.name)
+ fbv_basicBlocks = TosaSerializer.serializeObjVec(
+ builder, self.basicBlocks, TosaRegion.StartBlocksVector
+ )
+
+ TosaRegion.Start(builder)
+ TosaRegion.AddName(builder, fb_name)
+ TosaRegion.AddBlocks(builder, fbv_basicBlocks)
+ return TosaRegion.End(builder)
def addPlaceholder(self, shape, dtype, vals):
if not self.currBasicBlock:
@@ -614,21 +604,42 @@ class TosaSerializer:
return tens
- def addConst(self, shape, dtype, vals):
+ def addConst(self, shape, dtype, vals, name=None):
if not self.currBasicBlock:
raise Exception("addTensor called without valid basic block")
- name = "const-{}".format(self.currInputIdx)
- self.currInputIdx = self.currInputIdx + 1
+ if name is None:
+ name = "const-{}".format(self.currInputIdx)
+ self.currInputIdx = self.currInputIdx + 1
- tens = self.currBasicBlock.addTensor(name, shape, dtype, vals)
+ if self.constMode == ConstMode.INPUTS:
+ # Save const as input file
+ filename = "{}.npy".format(name)
+ tensor_vals = None
+ self.currBasicBlock.addInput(name)
+ else:
+ # Embed const in flatbuffer
+ filename = None
+ tensor_vals = vals
+
+ tens = self.currBasicBlock.addTensor(name, shape, dtype, tensor_vals, filename)
# Add the operator now
- self.currBasicBlock.addOperator(TosaOp.Op().CONST, [], name)
+ if dtype == DType.SHAPE:
+ self.currBasicBlock.addOperator(TosaOp.Op().CONST_SHAPE, [], name)
+ else:
+ self.currBasicBlock.addOperator(TosaOp.Op().CONST, [], name)
+
+ # Save the const data to file for debug or as input files
+ if vals is not None and self.constMode in [
+ ConstMode.EMBED_DUMP,
+ ConstMode.INPUTS,
+ ]:
+ filename = "{}.npy".format(name)
+ np.save(os.path.join(self.pathPrefix, filename), vals, False)
return tens
def addIntermediate(self, shape, dtype):
-
if not self.currBasicBlock:
raise Exception("addTensor called without valid basic block")
@@ -640,7 +651,13 @@ class TosaSerializer:
return tens
def addInputTensor(self, tensor):
- self.currBasicBlock.addTensor(tensor.name, tensor.shape, tensor.dtype)
+ self.currBasicBlock.addTensor(
+ tensor.name,
+ tensor.shape,
+ tensor.dtype,
+ tensor.data,
+ tensor.placeholderFilename,
+ )
self.currBasicBlock.addInput(tensor.name)
def addOutputTensor(self, tensor):
@@ -658,7 +675,6 @@ class TosaSerializer:
return tens
def addOperator(self, op, inputs, outputs, attributes=None):
-
if op == TosaOp.Op().CONST:
raise Exception("Use addConstTensor() to add CONST ops")
@@ -669,6 +685,62 @@ class TosaSerializer:
attributes,
)
+
+@unique
+class TensorDir(IntEnum):
+ PLACEHOLDER = 0
+ CONST = 1
+ INTERMEDIATE = 2
+ RESULT = 3
+
+
+class TosaSerializer:
+ def __init__(self, pathPrefix, constMode=ConstMode.EMBED):
+ self.builder = flatbuffers.Builder(0)
+
+ # Enables inspection of constant data outside of graph
+ self.constMode = constMode
+
+ self.regions = []
+ self.startRegion("main", pathPrefix)
+
+ self.currRegion.addBasicBlock("main")
+
+ # Is this an illegal test that is expected to fail?
+ self.expectedReturnCode = 0
+ self.expectedFailure = False
+ self.expectedFailureDesc = ""
+
+ def __str__(self):
+ concatString = ""
+ for region in self.regions:
+ concatString = concatString + str(region)
+ return concatString
+
+ def addPlaceholder(self, shape, dtype, vals):
+ return self.currRegion.addPlaceholder(shape, dtype, vals)
+
+ def addConst(self, shape, dtype, vals, name=None):
+ return self.currRegion.addConst(shape, dtype, vals, name)
+
+ def addIntermediate(self, shape, dtype):
+ return self.currRegion.addIntermediate(shape, dtype)
+
+ def addInputTensor(self, tensor):
+ self.currRegion.addInputTensor(tensor)
+
+ def addOutputTensor(self, tensor):
+ self.currRegion.addOutputTensor(tensor)
+
+ def addOutput(self, shape, dtype):
+ return self.currRegion.addOutput(shape, dtype)
+
+ def addOperator(self, op, inputs, outputs, attributes=None):
+ return self.currRegion.addOperator(op, inputs, outputs, attributes)
+
+ def addBasicBlock(self, name):
+ self.currRegion.addBasicBlock(name)
+
def setExpectedReturnCode(self, val, fail, desc=""):
self.expectedReturnCode = val
@@ -680,19 +752,19 @@ class TosaSerializer:
builder = self.builder
Version.Start(builder)
- Version.Add_major(builder, TOSA_VERSION[0])
- Version.Add_minor(builder, TOSA_VERSION[1])
- Version.Add_patch(builder, TOSA_VERSION[2])
- Version.Add_draft(builder, TOSA_VERSION[3])
+ Version.Add_Major(builder, TOSA_VERSION[0])
+ Version.Add_Minor(builder, TOSA_VERSION[1])
+ Version.Add_Patch(builder, TOSA_VERSION[2])
+ Version.Add_Draft(builder, TOSA_VERSION[3])
version = Version.End(builder)
- fbv_bb = TosaSerializer.serializeObjVec(
- builder, self.basicBlocks, TosaGraph.StartBlocksVector
+ fbv_region = TosaSerializer.serializeObjVec(
+ builder, self.regions, TosaGraph.StartRegionsVector
)
TosaGraph.Start(builder)
TosaGraph.AddVersion(builder, version)
- TosaGraph.AddBlocks(builder, fbv_bb)
+ TosaGraph.AddRegions(builder, fbv_region)
graph = TosaGraph.End(builder)
self.builder.Finish(graph, TOSA_GRAPH_IDENTIFIER)
@@ -709,16 +781,17 @@ class TosaSerializer:
ofm_name = []
ofm_file = []
- for b in self.basicBlocks:
- if b.name == "main":
- for i in b.inputs:
- ifm_name.append(i)
- ifm_file.append(b.tensors[i].placeholderFilename)
- for o in b.outputs:
- ofm_name.append(o)
- # Make up an OFM filename here. One isn't generated until the
- # reference tool is run, so any name is a good name
- ofm_file.append("ref-{}.npy".format(o))
+ for region in self.regions:
+ for block in region.basicBlocks:
+ if block and block.name == "main":
+ for i in block.inputs:
+ ifm_name.append(i)
+ ifm_file.append(block.tensors[i].placeholderFilename)
+ for o in block.outputs:
+ ofm_name.append(o)
+ # Make up an OFM filename here. One isn't generated until the
+ # reference tool is run, so any name is a good name
+ ofm_file.append("ref-{}.npy".format(o))
test_desc["ifm_name"] = ifm_name
test_desc["ifm_file"] = ifm_file
@@ -731,9 +804,9 @@ class TosaSerializer:
return json.dumps(test_desc, indent=" ")
- def startBasicBlock(self, name):
- self.currBasicBlock = TosaSerializerBasicBlock(name)
- self.basicBlocks.append(self.currBasicBlock)
+ def startRegion(self, name, pathPrefix):
+ self.currRegion = TosaSerializerRegion(name, pathPrefix, self.constMode)
+ self.regions.append(self.currRegion)
@staticmethod
def serializeStrVec(builder, vec, start_fcn):
@@ -757,6 +830,16 @@ class TosaSerializer:
return builder.EndVector(len(vec))
@staticmethod
+ def serializeInt16Vec(builder, vec):
+ builder.StartVector(2, len(vec), 4)
+ for v in vec[::-1]:
+ builder.PrependInt16(v)
+ try:
+ return builder.EndVector()
+ except TypeError:
+ return builder.EndVector(len(vec))
+
+ @staticmethod
def serializeInt32Vec(builder, vec):
builder.StartVector(4, len(vec), 4)
for v in vec[::-1]:
@@ -797,369 +880,104 @@ class TosaSerializer:
else:
return [val]
- # Remove when switching to flatbuffers 2.0
- # contains a mapping of the deprecated 1.12 method to the 2.0 version
-
- def add_compat_methods(self):
-
- from tosa import ArithmeticRightShiftAttribute
-
- if not hasattr(ArithmeticRightShiftAttribute, "Start"):
- ArithmeticRightShiftAttribute.Start = (
- ArithmeticRightShiftAttribute.ArithmeticRightShiftAttributeStart
- )
- ArithmeticRightShiftAttribute.AddRound = (
- ArithmeticRightShiftAttribute.ArithmeticRightShiftAttributeAddRound
- )
- ArithmeticRightShiftAttribute.End = (
- ArithmeticRightShiftAttribute.ArithmeticRightShiftAttributeEnd
- )
- from tosa import AxisAttribute
-
- if not hasattr(AxisAttribute, "Start"):
- AxisAttribute.Start = AxisAttribute.AxisAttributeStart
- AxisAttribute.AddAxis = AxisAttribute.AxisAttributeAddAxis
- AxisAttribute.End = AxisAttribute.AxisAttributeEnd
- from tosa import ClampAttribute
-
- if not hasattr(ClampAttribute, "Start"):
- ClampAttribute.Start = ClampAttribute.ClampAttributeStart
- ClampAttribute.AddMinInt = ClampAttribute.ClampAttributeAddMinInt
- ClampAttribute.AddMaxInt = ClampAttribute.ClampAttributeAddMaxInt
- ClampAttribute.AddMinFp = ClampAttribute.ClampAttributeAddMinFp
- ClampAttribute.AddMaxFp = ClampAttribute.ClampAttributeAddMaxFp
- ClampAttribute.End = ClampAttribute.ClampAttributeEnd
- from tosa import CondIfAttribute
-
- if not hasattr(CondIfAttribute, "Start"):
- CondIfAttribute.Start = CondIfAttribute.CondIfAttributeStart
- CondIfAttribute.AddThenBranch = CondIfAttribute.CondIfAttributeAddThenBranch
- CondIfAttribute.AddElseBranch = CondIfAttribute.CondIfAttributeAddElseBranch
- CondIfAttribute.End = CondIfAttribute.CondIfAttributeEnd
- from tosa import ConvAttribute
-
- if not hasattr(ConvAttribute, "Start"):
- ConvAttribute.Start = ConvAttribute.ConvAttributeStart
- ConvAttribute.AddPad = ConvAttribute.ConvAttributeAddPad
- ConvAttribute.StartPadVector = ConvAttribute.ConvAttributeStartPadVector
- ConvAttribute.AddStride = ConvAttribute.ConvAttributeAddStride
- ConvAttribute.StartStrideVector = (
- ConvAttribute.ConvAttributeStartStrideVector
- )
- ConvAttribute.AddDilation = ConvAttribute.ConvAttributeAddDilation
- ConvAttribute.StartDilationVector = (
- ConvAttribute.ConvAttributeStartDilationVector
- )
- ConvAttribute.AddInputZp = ConvAttribute.ConvAttributeAddInputZp
- ConvAttribute.AddWeightZp = ConvAttribute.ConvAttributeAddWeightZp
- ConvAttribute.End = ConvAttribute.ConvAttributeEnd
- from tosa import FullyConnectedAttribute
-
- if not hasattr(FullyConnectedAttribute, "Start"):
- FullyConnectedAttribute.Start = (
- FullyConnectedAttribute.FullyConnectedAttributeStart
- )
- FullyConnectedAttribute.AddInputZp = (
- FullyConnectedAttribute.FullyConnectedAttributeAddInputZp
- )
- FullyConnectedAttribute.AddWeightZp = (
- FullyConnectedAttribute.FullyConnectedAttributeAddWeightZp
- )
- FullyConnectedAttribute.End = (
- FullyConnectedAttribute.FullyConnectedAttributeEnd
- )
- from tosa import MatMulAttribute
-
- if not hasattr(MatMulAttribute, "Start"):
- MatMulAttribute.Start = MatMulAttribute.MatMulAttributeStart
- MatMulAttribute.AddAZp = MatMulAttribute.MatMulAttributeAddAZp
- MatMulAttribute.AddBZp = MatMulAttribute.MatMulAttributeAddBZp
- MatMulAttribute.End = MatMulAttribute.MatMulAttributeEnd
- from tosa import PoolAttribute
-
- if not hasattr(PoolAttribute, "Start"):
- PoolAttribute.Start = PoolAttribute.PoolAttributeStart
- PoolAttribute.AddPad = PoolAttribute.PoolAttributeAddPad
- PoolAttribute.StartPadVector = PoolAttribute.PoolAttributeStartPadVector
- PoolAttribute.AddKernel = PoolAttribute.PoolAttributeAddKernel
- PoolAttribute.StartKernelVector = (
- PoolAttribute.PoolAttributeStartKernelVector
- )
- PoolAttribute.AddStride = PoolAttribute.PoolAttributeAddStride
- PoolAttribute.StartStrideVector = (
- PoolAttribute.PoolAttributeStartStrideVector
- )
- PoolAttribute.AddInputZp = PoolAttribute.PoolAttributeAddInputZp
- PoolAttribute.AddOutputZp = PoolAttribute.PoolAttributeAddOutputZp
- PoolAttribute.End = PoolAttribute.PoolAttributeEnd
- from tosa import MulAttribute
-
- if not hasattr(MulAttribute, "Start"):
- MulAttribute.Start = MulAttribute.MulAttributeStart
- MulAttribute.AddShift = MulAttribute.MulAttributeAddShift
- MulAttribute.End = MulAttribute.MulAttributeEnd
- from tosa import PadAttribute
-
- if not hasattr(PadAttribute, "Start"):
- PadAttribute.Start = PadAttribute.PadAttributeStart
- PadAttribute.AddPadding = PadAttribute.PadAttributeAddPadding
- PadAttribute.StartPaddingVector = (
- PadAttribute.PadAttributeStartPaddingVector
- )
- PadAttribute.AddPadConstInt = PadAttribute.PadAttributeAddPadConstInt
- PadAttribute.AddPadConstFp = PadAttribute.PadAttributeAddPadConstFp
- PadAttribute.End = PadAttribute.PadAttributeEnd
- from tosa import PoolAttribute
-
- if not hasattr(PoolAttribute, "Start"):
- PoolAttribute.Start = PoolAttribute.PoolAttributeStart
- PoolAttribute.AddPad = PoolAttribute.PoolAttributeAddPad
- PoolAttribute.StartPadVector = PoolAttribute.PoolAttributeStartPadVector
- PoolAttribute.AddKernel = PoolAttribute.PoolAttributeAddKernel
- PoolAttribute.StartKernelVector = (
- PoolAttribute.PoolAttributeStartKernelVector
- )
- PoolAttribute.AddStride = PoolAttribute.PoolAttributeAddStride
- PoolAttribute.StartStrideVector = (
- PoolAttribute.PoolAttributeStartStrideVector
- )
- PoolAttribute.AddInputZp = PoolAttribute.PoolAttributeAddInputZp
- PoolAttribute.AddOutputZp = PoolAttribute.PoolAttributeAddOutputZp
- PoolAttribute.End = PoolAttribute.PoolAttributeEnd
- from tosa import RescaleAttribute
-
- if not hasattr(RescaleAttribute, "Start"):
- RescaleAttribute.Start = RescaleAttribute.RescaleAttributeStart
- RescaleAttribute.AddInputZp = RescaleAttribute.RescaleAttributeAddInputZp
- RescaleAttribute.AddOutputZp = RescaleAttribute.RescaleAttributeAddOutputZp
- RescaleAttribute.AddMultiplier = (
- RescaleAttribute.RescaleAttributeAddMultiplier
- )
- RescaleAttribute.StartMultiplierVector = (
- RescaleAttribute.RescaleAttributeStartMultiplierVector
- )
- RescaleAttribute.AddShift = RescaleAttribute.RescaleAttributeAddShift
- RescaleAttribute.StartShiftVector = (
- RescaleAttribute.RescaleAttributeStartShiftVector
- )
- RescaleAttribute.AddScale32 = RescaleAttribute.RescaleAttributeAddScale32
- RescaleAttribute.AddDoubleRound = (
- RescaleAttribute.RescaleAttributeAddDoubleRound
- )
- RescaleAttribute.AddPerChannel = (
- RescaleAttribute.RescaleAttributeAddPerChannel
- )
- RescaleAttribute.End = RescaleAttribute.RescaleAttributeEnd
- from tosa import ReshapeAttribute
-
- if not hasattr(ReshapeAttribute, "Start"):
- ReshapeAttribute.Start = ReshapeAttribute.ReshapeAttributeStart
- ReshapeAttribute.AddNewShape = ReshapeAttribute.ReshapeAttributeAddNewShape
- ReshapeAttribute.StartNewShapeVector = (
- ReshapeAttribute.ReshapeAttributeStartNewShapeVector
- )
- ReshapeAttribute.End = ReshapeAttribute.ReshapeAttributeEnd
- from tosa import ResizeAttribute
-
- if not hasattr(ResizeAttribute, "Start"):
- ResizeAttribute.Start = ResizeAttribute.ResizeAttributeStart
- ResizeAttribute.AddOutputSize = ResizeAttribute.ResizeAttributeAddOutputSize
- ResizeAttribute.StartOutputSizeVector = (
- ResizeAttribute.ResizeAttributeStartOutputSizeVector
- )
- ResizeAttribute.AddStride = ResizeAttribute.ResizeAttributeAddStride
- ResizeAttribute.StartStrideVector = (
- ResizeAttribute.ResizeAttributeStartStrideVector
- )
- ResizeAttribute.AddOffset = ResizeAttribute.ResizeAttributeAddOffset
- ResizeAttribute.StartOffsetVector = (
- ResizeAttribute.ResizeAttributeStartOffsetVector
- )
- ResizeAttribute.AddShift = ResizeAttribute.ResizeAttributeAddShift
- ResizeAttribute.AddStrideFp = ResizeAttribute.ResizeAttributeAddStrideFp
- ResizeAttribute.StartStrideFpVector = (
- ResizeAttribute.ResizeAttributeStartStrideFpVector
- )
- ResizeAttribute.AddOffsetFp = ResizeAttribute.ResizeAttributeAddOffsetFp
- ResizeAttribute.StartOffsetFpVector = (
- ResizeAttribute.ResizeAttributeStartOffsetFpVector
- )
- ResizeAttribute.AddMode = ResizeAttribute.ResizeAttributeAddMode
- ResizeAttribute.End = ResizeAttribute.ResizeAttributeEnd
- from tosa import SliceAttribute
-
- if not hasattr(SliceAttribute, "Start"):
- SliceAttribute.Start = SliceAttribute.SliceAttributeStart
- SliceAttribute.AddStart = SliceAttribute.SliceAttributeAddStart
- SliceAttribute.StartStartVector = (
- SliceAttribute.SliceAttributeStartStartVector
- )
- SliceAttribute.AddSize = SliceAttribute.SliceAttributeAddSize
- SliceAttribute.StartSizeVector = (
- SliceAttribute.SliceAttributeStartSizeVector
- )
- SliceAttribute.End = SliceAttribute.SliceAttributeEnd
- from tosa import TableAttribute
-
- if not hasattr(TableAttribute, "Start"):
- TableAttribute.Start = TableAttribute.TableAttributeStart
- TableAttribute.AddTable = TableAttribute.TableAttributeAddTable
- TableAttribute.StartTableVector = (
- TableAttribute.TableAttributeStartTableVector
- )
- TableAttribute.End = TableAttribute.TableAttributeEnd
- from tosa import TileAttribute
-
- if not hasattr(TileAttribute, "Start"):
- TileAttribute.Start = TileAttribute.TileAttributeStart
- TileAttribute.AddMultiples = TileAttribute.TileAttributeAddMultiples
- TileAttribute.StartMultiplesVector = (
- TileAttribute.TileAttributeStartMultiplesVector
- )
- TileAttribute.End = TileAttribute.TileAttributeEnd
- from tosa import TosaBasicBlock
-
- if not hasattr(TosaBasicBlock, "Start"):
- TosaBasicBlock.Start = TosaBasicBlock.TosaBasicBlockStart
- TosaBasicBlock.AddName = TosaBasicBlock.TosaBasicBlockAddName
- TosaBasicBlock.AddOperators = TosaBasicBlock.TosaBasicBlockAddOperators
- TosaBasicBlock.StartOperatorsVector = (
- TosaBasicBlock.TosaBasicBlockStartOperatorsVector
- )
- TosaBasicBlock.AddTensors = TosaBasicBlock.TosaBasicBlockAddTensors
- TosaBasicBlock.StartTensorsVector = (
- TosaBasicBlock.TosaBasicBlockStartTensorsVector
- )
- TosaBasicBlock.AddInputs = TosaBasicBlock.TosaBasicBlockAddInputs
- TosaBasicBlock.StartInputsVector = (
- TosaBasicBlock.TosaBasicBlockStartInputsVector
- )
- TosaBasicBlock.AddOutputs = TosaBasicBlock.TosaBasicBlockAddOutputs
- TosaBasicBlock.StartOutputsVector = (
- TosaBasicBlock.TosaBasicBlockStartOutputsVector
- )
- TosaBasicBlock.End = TosaBasicBlock.TosaBasicBlockEnd
- from tosa import TosaGraph
-
- if not hasattr(TosaGraph, "Start"):
- TosaGraph.Start = TosaGraph.TosaGraphStart
- TosaGraph.AddVersion = TosaGraph.TosaGraphAddVersion
- TosaGraph.AddBlocks = TosaGraph.TosaGraphAddBlocks
- TosaGraph.StartBlocksVector = TosaGraph.TosaGraphStartBlocksVector
- TosaGraph.End = TosaGraph.TosaGraphEnd
- from tosa import TosaOperator
-
- if not hasattr(TosaOperator, "Start"):
- TosaOperator.Start = TosaOperator.TosaOperatorStart
- TosaOperator.AddOp = TosaOperator.TosaOperatorAddOp
- TosaOperator.AddAttributeType = TosaOperator.TosaOperatorAddAttributeType
- TosaOperator.AddAttribute = TosaOperator.TosaOperatorAddAttribute
- TosaOperator.AddInputs = TosaOperator.TosaOperatorAddInputs
- TosaOperator.StartInputsVector = TosaOperator.TosaOperatorStartInputsVector
- TosaOperator.AddOutputs = TosaOperator.TosaOperatorAddOutputs
- TosaOperator.StartOutputsVector = (
- TosaOperator.TosaOperatorStartOutputsVector
- )
- TosaOperator.End = TosaOperator.TosaOperatorEnd
- from tosa import TosaTensor
-
- if not hasattr(TosaTensor, "Start"):
- TosaTensor.Start = TosaTensor.TosaTensorStart
- TosaTensor.AddName = TosaTensor.TosaTensorAddName
- TosaTensor.AddShape = TosaTensor.TosaTensorAddShape
- TosaTensor.StartShapeVector = TosaTensor.TosaTensorStartShapeVector
- TosaTensor.AddType = TosaTensor.TosaTensorAddType
- TosaTensor.AddData = TosaTensor.TosaTensorAddData
- TosaTensor.StartDataVector = TosaTensor.TosaTensorStartDataVector
- TosaTensor.End = TosaTensor.TosaTensorEnd
- from tosa import TransposeAttribute
-
- if not hasattr(TransposeAttribute, "Start"):
- TransposeAttribute.Start = TransposeAttribute.TransposeAttributeStart
- TransposeAttribute.AddPerms = TransposeAttribute.TransposeAttributeAddPerms
- TransposeAttribute.StartPermsVector = (
- TransposeAttribute.TransposeAttributeStartPermsVector
- )
- TransposeAttribute.End = TransposeAttribute.TransposeAttributeEnd
- from tosa import TransposeConvAttribute
-
- if not hasattr(TransposeConvAttribute, "Start"):
- TransposeConvAttribute.Start = (
- TransposeConvAttribute.TransposeConvAttributeStart
- )
- TransposeConvAttribute.AddOutPad = (
- TransposeConvAttribute.TransposeConvAttributeAddOutPad
- )
- TransposeConvAttribute.StartOutPadVector = (
- TransposeConvAttribute.TransposeConvAttributeStartOutPadVector
- )
- TransposeConvAttribute.AddStride = (
- TransposeConvAttribute.TransposeConvAttributeAddStride
- )
- TransposeConvAttribute.StartStrideVector = (
- TransposeConvAttribute.TransposeConvAttributeStartStrideVector
- )
- TransposeConvAttribute.AddOutputShape = (
- TransposeConvAttribute.TransposeConvAttributeAddOutputShape
- )
- TransposeConvAttribute.StartOutputShapeVector = (
- TransposeConvAttribute.TransposeConvAttributeStartOutputShapeVector
- )
- TransposeConvAttribute.AddInputZp = (
- TransposeConvAttribute.TransposeConvAttributeAddInputZp
- )
- TransposeConvAttribute.AddWeightZp = (
- TransposeConvAttribute.TransposeConvAttributeAddWeightZp
- )
- TransposeConvAttribute.End = (
- TransposeConvAttribute.TransposeConvAttributeEnd
- )
- from tosa import Version
-
- if not hasattr(Version, "Start"):
- Version.Start = Version.VersionStart
- Version.Add_major = Version.VersionAdd_major
- Version.Add_minor = Version.VersionAdd_minor
- Version.Add_patch = Version.VersionAdd_patch
- Version.Add_draft = Version.VersionAdd_draft
- Version.End = Version.VersionEnd
- from tosa import MatMulAttribute
-
- if not hasattr(MatMulAttribute, "Start"):
- MatMulAttribute.Start = MatMulAttribute.MatMulAttributeStart
- MatMulAttribute.AddAZp = MatMulAttribute.MatMulAttributeAddAZp
- MatMulAttribute.AddBZp = MatMulAttribute.MatMulAttributeAddBZp
- MatMulAttribute.End = MatMulAttribute.MatMulAttributeEnd
- from tosa import FullyConnectedAttribute
-
- if not hasattr(FullyConnectedAttribute, "Start"):
- FullyConnectedAttribute.Start = (
- FullyConnectedAttribute.FullyConnectedAttributeStart
- )
- FullyConnectedAttribute.AddInputZp = (
- FullyConnectedAttribute.FullyConnectedAttributeAddInputZp
- )
- FullyConnectedAttribute.AddWeightZp = (
- FullyConnectedAttribute.FullyConnectedAttributeAddWeightZp
- )
- FullyConnectedAttribute.End = (
- FullyConnectedAttribute.FullyConnectedAttributeEnd
- )
- from tosa import NegateAttribute
-
- if not hasattr(NegateAttribute, "Start"):
- NegateAttribute.Start = NegateAttribute.NegateAttributeStart
- NegateAttribute.AddInput1Zp = NegateAttribute.NegateAttributeAddInput1Zp
- NegateAttribute.AddOutputZp = NegateAttribute.NegateAttributeAddOutputZp
- NegateAttribute.End = NegateAttribute.NegateAttributeEnd
- from tosa import WhileLoopAttribute
-
- if not hasattr(WhileLoopAttribute, "Start"):
- WhileLoopAttribute.Start = WhileLoopAttribute.WhileLoopAttributeStart
- WhileLoopAttribute.AddCondBranch = (
- WhileLoopAttribute.WhileLoopAttributeAddCondBranch
- )
- WhileLoopAttribute.AddBodyBranch = (
- WhileLoopAttribute.WhileLoopAttributeAddBodyBranch
- )
- WhileLoopAttribute.End = WhileLoopAttribute.WhileLoopAttributeEnd
+ @staticmethod
+ def convertDataToUint8Vec(dtype, data):
+ u8_data = list()
+ # little endianess
+ if dtype == DType.BOOL:
+ for val in data:
+ val_u8 = np.uint8(val)
+ u8_data.append(val_u8)
+ elif dtype == DType.INT4:
+ in_size = len(data)
+ out_size = (in_size + 1) // 2
+ for i in range(out_size):
+ val_0 = data[2 * i]
+ if (2 * i + 1) < in_size:
+ val_1 = data[2 * i + 1]
+ else:
+ val_1 = 0
+ val_i8 = (val_0 & 0xF) | ((val_1 & 0xF) << 4)
+ val_u8 = np.uint8(val_i8)
+ u8_data.append(val_u8)
+ elif dtype == DType.INT8:
+ for val in data:
+ val_u8 = np.array(val).astype(dtype=np.uint8)
+ u8_data.append(val_u8)
+ elif dtype == DType.INT16:
+ for val in data:
+ val_u16 = np.array(val).astype(dtype=np.uint16)
+ b0 = val_u16 & ByteMask
+ b1 = (val_u16 >> np.uint16(8)) & ByteMask
+ u8_data.extend([b0, b1])
+ elif dtype == DType.INT32:
+ for val in data:
+ val_u32 = np.array(val).astype(dtype=np.uint32)
+ b0 = val_u32 & ByteMask
+ b1 = (val_u32 >> np.uint32(8)) & ByteMask
+ b2 = (val_u32 >> np.uint32(16)) & ByteMask
+ b3 = (val_u32 >> np.uint32(24)) & ByteMask
+ u8_data.extend([b0, b1, b2, b3])
+ elif dtype == DType.INT48:
+ for val in data:
+ val_u64 = np.uint64(val)
+ b0 = val_u64 & ByteMask
+ b1 = (val_u64 >> np.uint64(8)) & ByteMask
+ b2 = (val_u64 >> np.uint64(16)) & ByteMask
+ b3 = (val_u64 >> np.uint64(24)) & ByteMask
+ b4 = (val_u64 >> np.uint64(32)) & ByteMask
+ b5 = (val_u64 >> np.uint64(40)) & ByteMask
+ u8_data.extend([b0, b1, b2, b3, b4, b5])
+ elif dtype == DType.SHAPE:
+ for val in data:
+ val_u64 = np.uint64(val)
+ b0 = val_u64 & ByteMask
+ b1 = (val_u64 >> np.uint64(8)) & ByteMask
+ b2 = (val_u64 >> np.uint64(16)) & ByteMask
+ b3 = (val_u64 >> np.uint64(24)) & ByteMask
+ b4 = (val_u64 >> np.uint64(32)) & ByteMask
+ b5 = (val_u64 >> np.uint64(40)) & ByteMask
+ b6 = (val_u64 >> np.uint64(48)) & ByteMask
+ b7 = (val_u64 >> np.uint64(56)) & ByteMask
+ u8_data.extend([b0, b1, b2, b3, b4, b5, b6, b7])
+ elif dtype == DType.FP16:
+ np_arr = np.array(data, dtype=np.float16)
+ u8_data.extend(np_arr.view(np.uint8))
+ elif dtype == DType.FP32:
+ # for val in data:
+ # b = struct.pack("!f", val)
+ # u8_data.extend([b[3], b[2], b[1], b[0]])
+ np_arr = np.array(data, dtype=np.float32)
+ u8_data.extend(np_arr.view(np.uint8))
+ elif dtype == DType.BF16:
+ for val in data:
+ # convert val to little endian byte arrays b
+ b = struct.pack("<f", val)
+ # val => [ b[3], b[2], b[1], b[0] ]
+ # keep only most significant 2 bytes for bf16
+ # in little endian ordering
+ u8_data.extend([b[2], b[3]])
+ elif dtype == DType.FP8E4M3:
+ for val in data:
+ # convert val to fp8_bits then to single byte
+ f32_as_int = struct.unpack(">L", struct.pack(">f", val))[0]
+ f32_bits = f"{f32_as_int:032b}"
+ fp8_bits = f32_bits[0] + f32_bits[1:5] + f32_bits[9:12]
+ fp8_bytes = int(fp8_bits, 2).to_bytes(1, byteorder="little")
+ u8_data.extend(fp8_bytes)
+ elif dtype == DType.FP8E5M2:
+ for val in data:
+ # convert val to fp8_bits then to single byte
+ f32_as_int = struct.unpack(">L", struct.pack(">f", val))[0]
+ f32_bits = f"{f32_as_int:032b}"
+ fp8_bits = f32_bits[0] + f32_bits[1:6] + f32_bits[9:11]
+ fp8_bytes = int(fp8_bits, 2).to_bytes(1, byteorder="little")
+ u8_data.extend(fp8_bytes)
+ elif dtype == TosaDType.DType:
+ # Serialize DType enum data as uint8 bytes
+ for val in data:
+ np_arr = np.array(data, dtype=np.uint32)
+ u8_data.extend(np_arr.view(np.uint8))
+ else:
+ raise Exception("unsupported data type {}".format(DTypeNames[dtype]))
+ return u8_data
diff --git a/python/tosa/ArithmeticRightShiftAttribute.py b/python/tosa/ArithmeticRightShiftAttribute.py
index 226dc0e..f2aaeb5 100644
--- a/python/tosa/ArithmeticRightShiftAttribute.py
+++ b/python/tosa/ArithmeticRightShiftAttribute.py
@@ -10,13 +10,17 @@ class ArithmeticRightShiftAttribute(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsArithmeticRightShiftAttribute(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ArithmeticRightShiftAttribute()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsArithmeticRightShiftAttribute(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def ArithmeticRightShiftAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -31,6 +35,20 @@ class ArithmeticRightShiftAttribute(object):
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
-def ArithmeticRightShiftAttributeStart(builder): builder.StartObject(1)
-def ArithmeticRightShiftAttributeAddRound(builder, round): builder.PrependBoolSlot(0, round, 0)
-def ArithmeticRightShiftAttributeEnd(builder): return builder.EndObject()
+def ArithmeticRightShiftAttributeStart(builder):
+ builder.StartObject(1)
+
+def Start(builder):
+ ArithmeticRightShiftAttributeStart(builder)
+
+def ArithmeticRightShiftAttributeAddRound(builder, round):
+ builder.PrependBoolSlot(0, round, 0)
+
+def AddRound(builder, round):
+ ArithmeticRightShiftAttributeAddRound(builder, round)
+
+def ArithmeticRightShiftAttributeEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return ArithmeticRightShiftAttributeEnd(builder)
diff --git a/python/tosa/Attribute.py b/python/tosa/Attribute.py
index 166de8e..6abdcfb 100644
--- a/python/tosa/Attribute.py
+++ b/python/tosa/Attribute.py
@@ -9,19 +9,18 @@ class Attribute(object):
TransposeConvAttribute = 3
PadAttribute = 4
AxisAttribute = 5
- ReshapeAttribute = 6
- SliceAttribute = 7
- TileAttribute = 8
- ResizeAttribute = 9
- ClampAttribute = 10
- RescaleAttribute = 11
- MulAttribute = 12
- ArithmeticRightShiftAttribute = 13
- CondIfAttribute = 14
- WhileLoopAttribute = 15
- TransposeAttribute = 16
- TableAttribute = 17
- MatMulAttribute = 18
- FullyConnectedAttribute = 19
- NegateAttribute = 20
-
+ ResizeAttribute = 6
+ ClampAttribute = 7
+ RescaleAttribute = 8
+ MulAttribute = 9
+ ArithmeticRightShiftAttribute = 10
+ CondIfAttribute = 11
+ WhileLoopAttribute = 12
+ TransposeAttribute = 13
+ TableAttribute = 14
+ MatMulAttribute = 15
+ FullyConnectedAttribute = 16
+ NegateAttribute = 17
+ CustomAttribute = 18
+ FFTAttribute = 19
+ RFFTAttribute = 20
diff --git a/python/tosa/AxisAttribute.py b/python/tosa/AxisAttribute.py
index 30876b7..7ce4a63 100644
--- a/python/tosa/AxisAttribute.py
+++ b/python/tosa/AxisAttribute.py
@@ -10,13 +10,17 @@ class AxisAttribute(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsAxisAttribute(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = AxisAttribute()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsAxisAttribute(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def AxisAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -31,6 +35,20 @@ class AxisAttribute(object):
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
-def AxisAttributeStart(builder): builder.StartObject(1)
-def AxisAttributeAddAxis(builder, axis): builder.PrependInt32Slot(0, axis, 0)
-def AxisAttributeEnd(builder): return builder.EndObject()
+def AxisAttributeStart(builder):
+ builder.StartObject(1)
+
+def Start(builder):
+ AxisAttributeStart(builder)
+
+def AxisAttributeAddAxis(builder, axis):
+ builder.PrependInt32Slot(0, axis, 0)
+
+def AddAxis(builder, axis):
+ AxisAttributeAddAxis(builder, axis)
+
+def AxisAttributeEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return AxisAttributeEnd(builder)
diff --git a/python/tosa/ClampAttribute.py b/python/tosa/ClampAttribute.py
index 066dd4f..6a41498 100644
--- a/python/tosa/ClampAttribute.py
+++ b/python/tosa/ClampAttribute.py
@@ -10,13 +10,17 @@ class ClampAttribute(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsClampAttribute(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ClampAttribute()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsClampAttribute(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def ClampAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -25,36 +29,91 @@ class ClampAttribute(object):
self._tab = flatbuffers.table.Table(buf, pos)
# ClampAttribute
- def MinInt(self):
+ def MinVal(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ a = self._tab.Vector(o)
+ return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# ClampAttribute
- def MaxInt(self):
+ def MinValAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
+ return 0
+
+ # ClampAttribute
+ def MinValLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # ClampAttribute
+ def MinValIsNone(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ return o == 0
+
+ # ClampAttribute
+ def MaxVal(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ a = self._tab.Vector(o)
+ return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# ClampAttribute
- def MinFp(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ def MaxValAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
- return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
- return 0.0
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
+ return 0
# ClampAttribute
- def MaxFp(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
+ def MaxValLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
- return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
- return 0.0
-
-def ClampAttributeStart(builder): builder.StartObject(4)
-def ClampAttributeAddMinInt(builder, minInt): builder.PrependInt32Slot(0, minInt, 0)
-def ClampAttributeAddMaxInt(builder, maxInt): builder.PrependInt32Slot(1, maxInt, 0)
-def ClampAttributeAddMinFp(builder, minFp): builder.PrependFloat32Slot(2, minFp, 0.0)
-def ClampAttributeAddMaxFp(builder, maxFp): builder.PrependFloat32Slot(3, maxFp, 0.0)
-def ClampAttributeEnd(builder): return builder.EndObject()
+ return self._tab.VectorLen(o)
+ return 0
+
+ # ClampAttribute
+ def MaxValIsNone(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ return o == 0
+
+def ClampAttributeStart(builder):
+ builder.StartObject(2)
+
+def Start(builder):
+ ClampAttributeStart(builder)
+
+def ClampAttributeAddMinVal(builder, minVal):
+ builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(minVal), 0)
+
+def AddMinVal(builder, minVal):
+ ClampAttributeAddMinVal(builder, minVal)
+
+def ClampAttributeStartMinValVector(builder, numElems):
+ return builder.StartVector(1, numElems, 1)
+
+def StartMinValVector(builder, numElems: int) -> int:
+ return ClampAttributeStartMinValVector(builder, numElems)
+
+def ClampAttributeAddMaxVal(builder, maxVal):
+ builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(maxVal), 0)
+
+def AddMaxVal(builder, maxVal):
+ ClampAttributeAddMaxVal(builder, maxVal)
+
+def ClampAttributeStartMaxValVector(builder, numElems):
+ return builder.StartVector(1, numElems, 1)
+
+def StartMaxValVector(builder, numElems: int) -> int:
+ return ClampAttributeStartMaxValVector(builder, numElems)
+
+def ClampAttributeEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return ClampAttributeEnd(builder)
diff --git a/python/tosa/CondIfAttribute.py b/python/tosa/CondIfAttribute.py
index 57e5cb7..8f2aa9b 100644
--- a/python/tosa/CondIfAttribute.py
+++ b/python/tosa/CondIfAttribute.py
@@ -10,13 +10,17 @@ class CondIfAttribute(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsCondIfAttribute(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = CondIfAttribute()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsCondIfAttribute(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def CondIfAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -25,20 +29,39 @@ class CondIfAttribute(object):
self._tab = flatbuffers.table.Table(buf, pos)
# CondIfAttribute
- def ThenBranch(self):
+ def ThenGraph(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# CondIfAttribute
- def ElseBranch(self):
+ def ElseGraph(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
-def CondIfAttributeStart(builder): builder.StartObject(2)
-def CondIfAttributeAddThenBranch(builder, thenBranch): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(thenBranch), 0)
-def CondIfAttributeAddElseBranch(builder, elseBranch): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(elseBranch), 0)
-def CondIfAttributeEnd(builder): return builder.EndObject()
+def CondIfAttributeStart(builder):
+ builder.StartObject(2)
+
+def Start(builder):
+ CondIfAttributeStart(builder)
+
+def CondIfAttributeAddThenGraph(builder, thenGraph):
+ builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(thenGraph), 0)
+
+def AddThenGraph(builder, thenGraph):
+ CondIfAttributeAddThenGraph(builder, thenGraph)
+
+def CondIfAttributeAddElseGraph(builder, elseGraph):
+ builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(elseGraph), 0)
+
+def AddElseGraph(builder, elseGraph):
+ CondIfAttributeAddElseGraph(builder, elseGraph)
+
+def CondIfAttributeEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return CondIfAttributeEnd(builder)
diff --git a/python/tosa/ConvAttribute.py b/python/tosa/ConvAttribute.py
index 8244ea5..dfa75dc 100644
--- a/python/tosa/ConvAttribute.py
+++ b/python/tosa/ConvAttribute.py
@@ -10,13 +10,17 @@ class ConvAttribute(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsConvAttribute(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ConvAttribute()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsConvAttribute(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def ConvAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -119,13 +123,88 @@ class ConvAttribute(object):
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
-def ConvAttributeStart(builder): builder.StartObject(5)
-def ConvAttributeAddPad(builder, pad): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(pad), 0)
-def ConvAttributeStartPadVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def ConvAttributeAddStride(builder, stride): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(stride), 0)
-def ConvAttributeStartStrideVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def ConvAttributeAddDilation(builder, dilation): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(dilation), 0)
-def ConvAttributeStartDilationVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def ConvAttributeAddInputZp(builder, inputZp): builder.PrependInt32Slot(3, inputZp, 0)
-def ConvAttributeAddWeightZp(builder, weightZp): builder.PrependInt32Slot(4, weightZp, 0)
-def ConvAttributeEnd(builder): return builder.EndObject()
+ # ConvAttribute
+ def LocalBound(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+ if o != 0:
+ return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+ return False
+
+ # ConvAttribute
+ def AccType(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
+ return 0
+
+def ConvAttributeStart(builder):
+ builder.StartObject(7)
+
+def Start(builder):
+ ConvAttributeStart(builder)
+
+def ConvAttributeAddPad(builder, pad):
+ builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(pad), 0)
+
+def AddPad(builder, pad):
+ ConvAttributeAddPad(builder, pad)
+
+def ConvAttributeStartPadVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def StartPadVector(builder, numElems: int) -> int:
+ return ConvAttributeStartPadVector(builder, numElems)
+
+def ConvAttributeAddStride(builder, stride):
+ builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(stride), 0)
+
+def AddStride(builder, stride):
+ ConvAttributeAddStride(builder, stride)
+
+def ConvAttributeStartStrideVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def StartStrideVector(builder, numElems: int) -> int:
+ return ConvAttributeStartStrideVector(builder, numElems)
+
+def ConvAttributeAddDilation(builder, dilation):
+ builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(dilation), 0)
+
+def AddDilation(builder, dilation):
+ ConvAttributeAddDilation(builder, dilation)
+
+def ConvAttributeStartDilationVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def StartDilationVector(builder, numElems: int) -> int:
+ return ConvAttributeStartDilationVector(builder, numElems)
+
+def ConvAttributeAddInputZp(builder, inputZp):
+ builder.PrependInt32Slot(3, inputZp, 0)
+
+def AddInputZp(builder, inputZp):
+ ConvAttributeAddInputZp(builder, inputZp)
+
+def ConvAttributeAddWeightZp(builder, weightZp):
+ builder.PrependInt32Slot(4, weightZp, 0)
+
+def AddWeightZp(builder, weightZp):
+ ConvAttributeAddWeightZp(builder, weightZp)
+
+def ConvAttributeAddLocalBound(builder, localBound):
+ builder.PrependBoolSlot(5, localBound, 0)
+
+def AddLocalBound(builder, localBound):
+ ConvAttributeAddLocalBound(builder, localBound)
+
+def ConvAttributeAddAccType(builder, accType):
+ builder.PrependUint32Slot(6, accType, 0)
+
+def AddAccType(builder, accType):
+ ConvAttributeAddAccType(builder, accType)
+
+def ConvAttributeEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return ConvAttributeEnd(builder)
diff --git a/python/tosa/CustomAttribute.py b/python/tosa/CustomAttribute.py
new file mode 100644
index 0000000..db35dca
--- /dev/null
+++ b/python/tosa/CustomAttribute.py
@@ -0,0 +1,106 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tosa
+
+import flatbuffers
+from flatbuffers.compat import import_numpy
+np = import_numpy()
+
+class CustomAttribute(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAs(cls, buf, offset=0):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = CustomAttribute()
+ x.Init(buf, n + offset)
+ return x
+
+ @classmethod
+ def GetRootAsCustomAttribute(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
+ def CustomAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+ return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
+
+ # CustomAttribute
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # CustomAttribute
+ def OperatorName(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.String(o + self._tab.Pos)
+ return None
+
+ # CustomAttribute
+ def DomainName(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.String(o + self._tab.Pos)
+ return None
+
+ # CustomAttribute
+ def ImplementationAttrs(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ a = self._tab.Vector(o)
+ return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
+ return 0
+
+ # CustomAttribute
+ def ImplementationAttrsAsNumpy(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
+ return 0
+
+ # CustomAttribute
+ def ImplementationAttrsLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # CustomAttribute
+ def ImplementationAttrsIsNone(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
+ return o == 0
+
+def CustomAttributeStart(builder):
+ builder.StartObject(3)
+
+def Start(builder):
+ CustomAttributeStart(builder)
+
+def CustomAttributeAddOperatorName(builder, operatorName):
+ builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(operatorName), 0)
+
+def AddOperatorName(builder, operatorName):
+ CustomAttributeAddOperatorName(builder, operatorName)
+
+def CustomAttributeAddDomainName(builder, domainName):
+ builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(domainName), 0)
+
+def AddDomainName(builder, domainName):
+ CustomAttributeAddDomainName(builder, domainName)
+
+def CustomAttributeAddImplementationAttrs(builder, implementationAttrs):
+ builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(implementationAttrs), 0)
+
+def AddImplementationAttrs(builder, implementationAttrs):
+ CustomAttributeAddImplementationAttrs(builder, implementationAttrs)
+
+def CustomAttributeStartImplementationAttrsVector(builder, numElems):
+ return builder.StartVector(1, numElems, 1)
+
+def StartImplementationAttrsVector(builder, numElems: int) -> int:
+ return CustomAttributeStartImplementationAttrsVector(builder, numElems)
+
+def CustomAttributeEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return CustomAttributeEnd(builder)
diff --git a/python/tosa/DType.py b/python/tosa/DType.py
index ad9048b..e585cb9 100644
--- a/python/tosa/DType.py
+++ b/python/tosa/DType.py
@@ -11,6 +11,10 @@ class DType(object):
INT16 = 5
INT32 = 6
INT48 = 7
- FLOAT = 8
+ FP32 = 8
UINT16 = 9
-
+ FP16 = 10
+ BF16 = 11
+ SHAPE = 12
+ FP8E4M3 = 13
+ FP8E5M2 = 14
diff --git a/python/tosa/FFTAttribute.py b/python/tosa/FFTAttribute.py
new file mode 100644
index 0000000..d1624c2
--- /dev/null
+++ b/python/tosa/FFTAttribute.py
@@ -0,0 +1,67 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tosa
+
+import flatbuffers
+from flatbuffers.compat import import_numpy
+np = import_numpy()
+
+class FFTAttribute(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAs(cls, buf, offset=0):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = FFTAttribute()
+ x.Init(buf, n + offset)
+ return x
+
+ @classmethod
+ def GetRootAsFFTAttribute(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
+ def FFTAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+ return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
+
+ # FFTAttribute
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # FFTAttribute
+ def Inverse(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+ return False
+
+ # FFTAttribute
+ def LocalBound(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+ return False
+
+def FFTAttributeStart(builder):
+ builder.StartObject(2)
+
+def Start(builder):
+ FFTAttributeStart(builder)
+
+def FFTAttributeAddInverse(builder, inverse):
+ builder.PrependBoolSlot(0, inverse, 0)
+
+def AddInverse(builder, inverse):
+ FFTAttributeAddInverse(builder, inverse)
+
+def FFTAttributeAddLocalBound(builder, localBound):
+ builder.PrependBoolSlot(1, localBound, 0)
+
+def AddLocalBound(builder, localBound):
+ FFTAttributeAddLocalBound(builder, localBound)
+
+def FFTAttributeEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return FFTAttributeEnd(builder)
diff --git a/python/tosa/FullyConnectedAttribute.py b/python/tosa/FullyConnectedAttribute.py
index 62b480d..8854503 100644
--- a/python/tosa/FullyConnectedAttribute.py
+++ b/python/tosa/FullyConnectedAttribute.py
@@ -10,13 +10,17 @@ class FullyConnectedAttribute(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsFullyConnectedAttribute(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = FullyConnectedAttribute()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsFullyConnectedAttribute(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def FullyConnectedAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -38,7 +42,26 @@ class FullyConnectedAttribute(object):
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
-def FullyConnectedAttributeStart(builder): builder.StartObject(2)
-def FullyConnectedAttributeAddInputZp(builder, inputZp): builder.PrependInt32Slot(0, inputZp, 0)
-def FullyConnectedAttributeAddWeightZp(builder, weightZp): builder.PrependInt32Slot(1, weightZp, 0)
-def FullyConnectedAttributeEnd(builder): return builder.EndObject()
+def FullyConnectedAttributeStart(builder):
+ builder.StartObject(2)
+
+def Start(builder):
+ FullyConnectedAttributeStart(builder)
+
+def FullyConnectedAttributeAddInputZp(builder, inputZp):
+ builder.PrependInt32Slot(0, inputZp, 0)
+
+def AddInputZp(builder, inputZp):
+ FullyConnectedAttributeAddInputZp(builder, inputZp)
+
+def FullyConnectedAttributeAddWeightZp(builder, weightZp):
+ builder.PrependInt32Slot(1, weightZp, 0)
+
+def AddWeightZp(builder, weightZp):
+ FullyConnectedAttributeAddWeightZp(builder, weightZp)
+
+def FullyConnectedAttributeEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return FullyConnectedAttributeEnd(builder)
diff --git a/python/tosa/MatMulAttribute.py b/python/tosa/MatMulAttribute.py
index 601f13f..325428a 100644
--- a/python/tosa/MatMulAttribute.py
+++ b/python/tosa/MatMulAttribute.py
@@ -10,13 +10,17 @@ class MatMulAttribute(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsMatMulAttribute(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = MatMulAttribute()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsMatMulAttribute(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def MatMulAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -38,7 +42,26 @@ class MatMulAttribute(object):
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
-def MatMulAttributeStart(builder): builder.StartObject(2)
-def MatMulAttributeAddAZp(builder, aZp): builder.PrependInt32Slot(0, aZp, 0)
-def MatMulAttributeAddBZp(builder, bZp): builder.PrependInt32Slot(1, bZp, 0)
-def MatMulAttributeEnd(builder): return builder.EndObject()
+def MatMulAttributeStart(builder):
+ builder.StartObject(2)
+
+def Start(builder):
+ MatMulAttributeStart(builder)
+
+def MatMulAttributeAddAZp(builder, aZp):
+ builder.PrependInt32Slot(0, aZp, 0)
+
+def AddAZp(builder, aZp):
+ MatMulAttributeAddAZp(builder, aZp)
+
+def MatMulAttributeAddBZp(builder, bZp):
+ builder.PrependInt32Slot(1, bZp, 0)
+
+def AddBZp(builder, bZp):
+ MatMulAttributeAddBZp(builder, bZp)
+
+def MatMulAttributeEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return MatMulAttributeEnd(builder)
diff --git a/python/tosa/MulAttribute.py b/python/tosa/MulAttribute.py
index 79be4d3..236d3f5 100644
--- a/python/tosa/MulAttribute.py
+++ b/python/tosa/MulAttribute.py
@@ -10,13 +10,17 @@ class MulAttribute(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsMulAttribute(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = MulAttribute()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsMulAttribute(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def MulAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -31,6 +35,20 @@ class MulAttribute(object):
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
-def MulAttributeStart(builder): builder.StartObject(1)
-def MulAttributeAddShift(builder, shift): builder.PrependInt32Slot(0, shift, 0)
-def MulAttributeEnd(builder): return builder.EndObject()
+def MulAttributeStart(builder):
+ builder.StartObject(1)
+
+def Start(builder):
+ MulAttributeStart(builder)
+
+def MulAttributeAddShift(builder, shift):
+ builder.PrependInt32Slot(0, shift, 0)
+
+def AddShift(builder, shift):
+ MulAttributeAddShift(builder, shift)
+
+def MulAttributeEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return MulAttributeEnd(builder)
diff --git a/python/tosa/NegateAttribute.py b/python/tosa/NegateAttribute.py
index 24a57dc..eae46f5 100644
--- a/python/tosa/NegateAttribute.py
+++ b/python/tosa/NegateAttribute.py
@@ -10,13 +10,17 @@ class NegateAttribute(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsNegateAttribute(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = NegateAttribute()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsNegateAttribute(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def NegateAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -38,7 +42,26 @@ class NegateAttribute(object):
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
-def NegateAttributeStart(builder): builder.StartObject(2)
-def NegateAttributeAddInput1Zp(builder, input1Zp): builder.PrependInt32Slot(0, input1Zp, 0)
-def NegateAttributeAddOutputZp(builder, outputZp): builder.PrependInt32Slot(1, outputZp, 0)
-def NegateAttributeEnd(builder): return builder.EndObject()
+def NegateAttributeStart(builder):
+ builder.StartObject(2)
+
+def Start(builder):
+ NegateAttributeStart(builder)
+
+def NegateAttributeAddInput1Zp(builder, input1Zp):
+ builder.PrependInt32Slot(0, input1Zp, 0)
+
+def AddInput1Zp(builder, input1Zp):
+ NegateAttributeAddInput1Zp(builder, input1Zp)
+
+def NegateAttributeAddOutputZp(builder, outputZp):
+ builder.PrependInt32Slot(1, outputZp, 0)
+
+def AddOutputZp(builder, outputZp):
+ NegateAttributeAddOutputZp(builder, outputZp)
+
+def NegateAttributeEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return NegateAttributeEnd(builder)
diff --git a/python/tosa/Op.py b/python/tosa/Op.py
index 181e457..35b2b80 100644
--- a/python/tosa/Op.py
+++ b/python/tosa/Op.py
@@ -72,4 +72,15 @@ class Op(object):
CUSTOM = 66
COND_IF = 67
WHILE_LOOP = 68
-
+ FFT2D = 69
+ RFFT2D = 70
+ ERF = 71
+ DIM = 72
+ CONST_SHAPE = 73
+ CONCAT_SHAPE = 74
+ ADD_SHAPE = 75
+ SUB_SHAPE = 76
+ MUL_SHAPE = 77
+ DIV_SHAPE = 78
+ COS = 79
+ SIN = 80
diff --git a/python/tosa/PadAttribute.py b/python/tosa/PadAttribute.py
index 0875481..301bf17 100644
--- a/python/tosa/PadAttribute.py
+++ b/python/tosa/PadAttribute.py
@@ -10,13 +10,17 @@ class PadAttribute(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsPadAttribute(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = PadAttribute()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsPadAttribute(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def PadAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -25,49 +29,52 @@ class PadAttribute(object):
self._tab = flatbuffers.table.Table(buf, pos)
# PadAttribute
- def Padding(self, j):
+ def PadConst(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+ return self._tab.Get(flatbuffers.number_types.Uint8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# PadAttribute
- def PaddingAsNumpy(self):
+ def PadConstAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint8Flags, o)
return 0
# PadAttribute
- def PaddingLength(self):
+ def PadConstLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# PadAttribute
- def PaddingIsNone(self):
+ def PadConstIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
- # PadAttribute
- def PadConstInt(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 0
+def PadAttributeStart(builder):
+ builder.StartObject(1)
- # PadAttribute
- def PadConstFp(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
- return 0.0
-
-def PadAttributeStart(builder): builder.StartObject(3)
-def PadAttributeAddPadding(builder, padding): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(padding), 0)
-def PadAttributeStartPaddingVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def PadAttributeAddPadConstInt(builder, padConstInt): builder.PrependInt32Slot(1, padConstInt, 0)
-def PadAttributeAddPadConstFp(builder, padConstFp): builder.PrependFloat32Slot(2, padConstFp, 0.0)
-def PadAttributeEnd(builder): return builder.EndObject()
+def Start(builder):
+ PadAttributeStart(builder)
+
+def PadAttributeAddPadConst(builder, padConst):
+ builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(padConst), 0)
+
+def AddPadConst(builder, padConst):
+ PadAttributeAddPadConst(builder, padConst)
+
+def PadAttributeStartPadConstVector(builder, numElems):
+ return builder.StartVector(1, numElems, 1)
+
+def StartPadConstVector(builder, numElems: int) -> int:
+ return PadAttributeStartPadConstVector(builder, numElems)
+
+def PadAttributeEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return PadAttributeEnd(builder)
diff --git a/python/tosa/PoolAttribute.py b/python/tosa/PoolAttribute.py
index 8b6903e..c13e038 100644
--- a/python/tosa/PoolAttribute.py
+++ b/python/tosa/PoolAttribute.py
@@ -10,13 +10,17 @@ class PoolAttribute(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsPoolAttribute(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = PoolAttribute()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsPoolAttribute(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def PoolAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -119,13 +123,75 @@ class PoolAttribute(object):
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
-def PoolAttributeStart(builder): builder.StartObject(5)
-def PoolAttributeAddPad(builder, pad): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(pad), 0)
-def PoolAttributeStartPadVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def PoolAttributeAddKernel(builder, kernel): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(kernel), 0)
-def PoolAttributeStartKernelVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def PoolAttributeAddStride(builder, stride): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(stride), 0)
-def PoolAttributeStartStrideVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def PoolAttributeAddInputZp(builder, inputZp): builder.PrependInt32Slot(3, inputZp, 0)
-def PoolAttributeAddOutputZp(builder, outputZp): builder.PrependInt32Slot(4, outputZp, 0)
-def PoolAttributeEnd(builder): return builder.EndObject()
+ # PoolAttribute
+ def AccType(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
+ return 0
+
+def PoolAttributeStart(builder):
+ builder.StartObject(6)
+
+def Start(builder):
+ PoolAttributeStart(builder)
+
+def PoolAttributeAddPad(builder, pad):
+ builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(pad), 0)
+
+def AddPad(builder, pad):
+ PoolAttributeAddPad(builder, pad)
+
+def PoolAttributeStartPadVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def StartPadVector(builder, numElems: int) -> int:
+ return PoolAttributeStartPadVector(builder, numElems)
+
+def PoolAttributeAddKernel(builder, kernel):
+ builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(kernel), 0)
+
+def AddKernel(builder, kernel):
+ PoolAttributeAddKernel(builder, kernel)
+
+def PoolAttributeStartKernelVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def StartKernelVector(builder, numElems: int) -> int:
+ return PoolAttributeStartKernelVector(builder, numElems)
+
+def PoolAttributeAddStride(builder, stride):
+ builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(stride), 0)
+
+def AddStride(builder, stride):
+ PoolAttributeAddStride(builder, stride)
+
+def PoolAttributeStartStrideVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def StartStrideVector(builder, numElems: int) -> int:
+ return PoolAttributeStartStrideVector(builder, numElems)
+
+def PoolAttributeAddInputZp(builder, inputZp):
+ builder.PrependInt32Slot(3, inputZp, 0)
+
+def AddInputZp(builder, inputZp):
+ PoolAttributeAddInputZp(builder, inputZp)
+
+def PoolAttributeAddOutputZp(builder, outputZp):
+ builder.PrependInt32Slot(4, outputZp, 0)
+
+def AddOutputZp(builder, outputZp):
+ PoolAttributeAddOutputZp(builder, outputZp)
+
+def PoolAttributeAddAccType(builder, accType):
+ builder.PrependUint32Slot(5, accType, 0)
+
+def AddAccType(builder, accType):
+ PoolAttributeAddAccType(builder, accType)
+
+def PoolAttributeEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return PoolAttributeEnd(builder)
diff --git a/python/tosa/RFFTAttribute.py b/python/tosa/RFFTAttribute.py
new file mode 100644
index 0000000..7f76024
--- /dev/null
+++ b/python/tosa/RFFTAttribute.py
@@ -0,0 +1,54 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tosa
+
+import flatbuffers
+from flatbuffers.compat import import_numpy
+np = import_numpy()
+
+class RFFTAttribute(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAs(cls, buf, offset=0):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = RFFTAttribute()
+ x.Init(buf, n + offset)
+ return x
+
+ @classmethod
+ def GetRootAsRFFTAttribute(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
+ def RFFTAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+ return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
+
+ # RFFTAttribute
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # RFFTAttribute
+ def LocalBound(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+ return False
+
+def RFFTAttributeStart(builder):
+ builder.StartObject(1)
+
+def Start(builder):
+ RFFTAttributeStart(builder)
+
+def RFFTAttributeAddLocalBound(builder, localBound):
+ builder.PrependBoolSlot(0, localBound, 0)
+
+def AddLocalBound(builder, localBound):
+ RFFTAttributeAddLocalBound(builder, localBound)
+
+def RFFTAttributeEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return RFFTAttributeEnd(builder)
diff --git a/python/tosa/RescaleAttribute.py b/python/tosa/RescaleAttribute.py
index 8ba68aa..12e7ced 100644
--- a/python/tosa/RescaleAttribute.py
+++ b/python/tosa/RescaleAttribute.py
@@ -10,13 +10,17 @@ class RescaleAttribute(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsRescaleAttribute(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = RescaleAttribute()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsRescaleAttribute(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def RescaleAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -39,88 +43,90 @@ class RescaleAttribute(object):
return 0
# RescaleAttribute
- def Multiplier(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # RescaleAttribute
- def MultiplierAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # RescaleAttribute
- def MultiplierLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # RescaleAttribute
- def MultiplierIsNone(self):
+ def Scale32(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
- return o == 0
-
- # RescaleAttribute
- def Shift(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # RescaleAttribute
- def ShiftAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
+ return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+ return False
# RescaleAttribute
- def ShiftLength(self):
+ def DoubleRound(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # RescaleAttribute
- def ShiftIsNone(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
- return o == 0
+ return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+ return False
# RescaleAttribute
- def Scale32(self):
+ def PerChannel(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# RescaleAttribute
- def DoubleRound(self):
+ def InputUnsigned(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# RescaleAttribute
- def PerChannel(self):
+ def OutputUnsigned(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
-def RescaleAttributeStart(builder): builder.StartObject(7)
-def RescaleAttributeAddInputZp(builder, inputZp): builder.PrependInt32Slot(0, inputZp, 0)
-def RescaleAttributeAddOutputZp(builder, outputZp): builder.PrependInt32Slot(1, outputZp, 0)
-def RescaleAttributeAddMultiplier(builder, multiplier): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(multiplier), 0)
-def RescaleAttributeStartMultiplierVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def RescaleAttributeAddShift(builder, shift): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(shift), 0)
-def RescaleAttributeStartShiftVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def RescaleAttributeAddScale32(builder, scale32): builder.PrependBoolSlot(4, scale32, 0)
-def RescaleAttributeAddDoubleRound(builder, doubleRound): builder.PrependBoolSlot(5, doubleRound, 0)
-def RescaleAttributeAddPerChannel(builder, perChannel): builder.PrependBoolSlot(6, perChannel, 0)
-def RescaleAttributeEnd(builder): return builder.EndObject()
+def RescaleAttributeStart(builder):
+ builder.StartObject(7)
+
+def Start(builder):
+ RescaleAttributeStart(builder)
+
+def RescaleAttributeAddInputZp(builder, inputZp):
+ builder.PrependInt32Slot(0, inputZp, 0)
+
+def AddInputZp(builder, inputZp):
+ RescaleAttributeAddInputZp(builder, inputZp)
+
+def RescaleAttributeAddOutputZp(builder, outputZp):
+ builder.PrependInt32Slot(1, outputZp, 0)
+
+def AddOutputZp(builder, outputZp):
+ RescaleAttributeAddOutputZp(builder, outputZp)
+
+def RescaleAttributeAddScale32(builder, scale32):
+ builder.PrependBoolSlot(2, scale32, 0)
+
+def AddScale32(builder, scale32):
+ RescaleAttributeAddScale32(builder, scale32)
+
+def RescaleAttributeAddDoubleRound(builder, doubleRound):
+ builder.PrependBoolSlot(3, doubleRound, 0)
+
+def AddDoubleRound(builder, doubleRound):
+ RescaleAttributeAddDoubleRound(builder, doubleRound)
+
+def RescaleAttributeAddPerChannel(builder, perChannel):
+ builder.PrependBoolSlot(4, perChannel, 0)
+
+def AddPerChannel(builder, perChannel):
+ RescaleAttributeAddPerChannel(builder, perChannel)
+
+def RescaleAttributeAddInputUnsigned(builder, inputUnsigned):
+ builder.PrependBoolSlot(5, inputUnsigned, 0)
+
+def AddInputUnsigned(builder, inputUnsigned):
+ RescaleAttributeAddInputUnsigned(builder, inputUnsigned)
+
+def RescaleAttributeAddOutputUnsigned(builder, outputUnsigned):
+ builder.PrependBoolSlot(6, outputUnsigned, 0)
+
+def AddOutputUnsigned(builder, outputUnsigned):
+ RescaleAttributeAddOutputUnsigned(builder, outputUnsigned)
+
+def RescaleAttributeEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return RescaleAttributeEnd(builder)
diff --git a/python/tosa/ReshapeAttribute.py b/python/tosa/ReshapeAttribute.py
deleted file mode 100644
index 73b1ee8..0000000
--- a/python/tosa/ReshapeAttribute.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tosa
-
-import flatbuffers
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class ReshapeAttribute(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsReshapeAttribute(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = ReshapeAttribute()
- x.Init(buf, n + offset)
- return x
-
- @classmethod
- def ReshapeAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
- return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
-
- # ReshapeAttribute
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # ReshapeAttribute
- def NewShape(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # ReshapeAttribute
- def NewShapeAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # ReshapeAttribute
- def NewShapeLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # ReshapeAttribute
- def NewShapeIsNone(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- return o == 0
-
-def ReshapeAttributeStart(builder): builder.StartObject(1)
-def ReshapeAttributeAddNewShape(builder, newShape): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(newShape), 0)
-def ReshapeAttributeStartNewShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def ReshapeAttributeEnd(builder): return builder.EndObject()
diff --git a/python/tosa/ResizeAttribute.py b/python/tosa/ResizeAttribute.py
index 1ed2dc0..96bfa56 100644
--- a/python/tosa/ResizeAttribute.py
+++ b/python/tosa/ResizeAttribute.py
@@ -10,13 +10,17 @@ class ResizeAttribute(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsResizeAttribute(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ResizeAttribute()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsResizeAttribute(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def ResizeAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -25,165 +29,143 @@ class ResizeAttribute(object):
self._tab = flatbuffers.table.Table(buf, pos)
# ResizeAttribute
- def OutputSize(self, j):
+ def Scale(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+ return self._tab.Get(flatbuffers.number_types.Int16Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 2))
return 0
# ResizeAttribute
- def OutputSizeAsNumpy(self):
+ def ScaleAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int16Flags, o)
return 0
# ResizeAttribute
- def OutputSizeLength(self):
+ def ScaleLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# ResizeAttribute
- def OutputSizeIsNone(self):
+ def ScaleIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
# ResizeAttribute
- def Stride(self, j):
+ def Offset(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+ return self._tab.Get(flatbuffers.number_types.Int16Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 2))
return 0
# ResizeAttribute
- def StrideAsNumpy(self):
+ def OffsetAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int16Flags, o)
return 0
# ResizeAttribute
- def StrideLength(self):
+ def OffsetLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# ResizeAttribute
- def StrideIsNone(self):
+ def OffsetIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
# ResizeAttribute
- def Offset(self, j):
+ def Border(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
+ return self._tab.Get(flatbuffers.number_types.Int16Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 2))
return 0
# ResizeAttribute
- def OffsetAsNumpy(self):
+ def BorderAsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
+ return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int16Flags, o)
return 0
# ResizeAttribute
- def OffsetLength(self):
+ def BorderLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# ResizeAttribute
- def OffsetIsNone(self):
+ def BorderIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
return o == 0
# ResizeAttribute
- def Shift(self):
+ def Mode(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
- return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
+ return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
- # ResizeAttribute
- def StrideFp(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
+def ResizeAttributeStart(builder):
+ builder.StartObject(4)
- # ResizeAttribute
- def StrideFpAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
- return 0
+def Start(builder):
+ ResizeAttributeStart(builder)
- # ResizeAttribute
- def StrideFpLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
+def ResizeAttributeAddScale(builder, scale):
+ builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(scale), 0)
- # ResizeAttribute
- def StrideFpIsNone(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
- return o == 0
+def AddScale(builder, scale):
+ ResizeAttributeAddScale(builder, scale)
- # ResizeAttribute
- def OffsetFp(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
+def ResizeAttributeStartScaleVector(builder, numElems):
+ return builder.StartVector(2, numElems, 2)
- # ResizeAttribute
- def OffsetFpAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o)
- return 0
+def StartScaleVector(builder, numElems: int) -> int:
+ return ResizeAttributeStartScaleVector(builder, numElems)
- # ResizeAttribute
- def OffsetFpLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
+def ResizeAttributeAddOffset(builder, offset):
+ builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(offset), 0)
- # ResizeAttribute
- def OffsetFpIsNone(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
- return o == 0
+def AddOffset(builder, offset):
+ ResizeAttributeAddOffset(builder, offset)
- # ResizeAttribute
- def Mode(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
- if o != 0:
- return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
- return 0
+def ResizeAttributeStartOffsetVector(builder, numElems):
+ return builder.StartVector(2, numElems, 2)
+
+def StartOffsetVector(builder, numElems: int) -> int:
+ return ResizeAttributeStartOffsetVector(builder, numElems)
+
+def ResizeAttributeAddBorder(builder, border):
+ builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(border), 0)
+
+def AddBorder(builder, border):
+ ResizeAttributeAddBorder(builder, border)
+
+def ResizeAttributeStartBorderVector(builder, numElems):
+ return builder.StartVector(2, numElems, 2)
+
+def StartBorderVector(builder, numElems: int) -> int:
+ return ResizeAttributeStartBorderVector(builder, numElems)
+
+def ResizeAttributeAddMode(builder, mode):
+ builder.PrependUint32Slot(3, mode, 0)
+
+def AddMode(builder, mode):
+ ResizeAttributeAddMode(builder, mode)
+
+def ResizeAttributeEnd(builder):
+ return builder.EndObject()
-def ResizeAttributeStart(builder): builder.StartObject(7)
-def ResizeAttributeAddOutputSize(builder, outputSize): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(outputSize), 0)
-def ResizeAttributeStartOutputSizeVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def ResizeAttributeAddStride(builder, stride): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(stride), 0)
-def ResizeAttributeStartStrideVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def ResizeAttributeAddOffset(builder, offset): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(offset), 0)
-def ResizeAttributeStartOffsetVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def ResizeAttributeAddShift(builder, shift): builder.PrependInt32Slot(3, shift, 0)
-def ResizeAttributeAddStrideFp(builder, strideFp): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(strideFp), 0)
-def ResizeAttributeStartStrideFpVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def ResizeAttributeAddOffsetFp(builder, offsetFp): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(offsetFp), 0)
-def ResizeAttributeStartOffsetFpVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def ResizeAttributeAddMode(builder, mode): builder.PrependUint32Slot(6, mode, 0)
-def ResizeAttributeEnd(builder): return builder.EndObject()
+def End(builder):
+ return ResizeAttributeEnd(builder)
diff --git a/python/tosa/ResizeMode.py b/python/tosa/ResizeMode.py
index 65bcd5d..388ecda 100644
--- a/python/tosa/ResizeMode.py
+++ b/python/tosa/ResizeMode.py
@@ -6,4 +6,3 @@ class ResizeMode(object):
UNKNOWN = 0
NEAREST = 1
BILINEAR = 2
-
diff --git a/python/tosa/SliceAttribute.py b/python/tosa/SliceAttribute.py
deleted file mode 100644
index d3f6073..0000000
--- a/python/tosa/SliceAttribute.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tosa
-
-import flatbuffers
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class SliceAttribute(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsSliceAttribute(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = SliceAttribute()
- x.Init(buf, n + offset)
- return x
-
- @classmethod
- def SliceAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
- return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
-
- # SliceAttribute
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # SliceAttribute
- def Start(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # SliceAttribute
- def StartAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # SliceAttribute
- def StartLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # SliceAttribute
- def StartIsNone(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- return o == 0
-
- # SliceAttribute
- def Size(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # SliceAttribute
- def SizeAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # SliceAttribute
- def SizeLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # SliceAttribute
- def SizeIsNone(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
- return o == 0
-
-def SliceAttributeStart(builder): builder.StartObject(2)
-def SliceAttributeAddStart(builder, start): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(start), 0)
-def SliceAttributeStartStartVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def SliceAttributeAddSize(builder, size): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(size), 0)
-def SliceAttributeStartSizeVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def SliceAttributeEnd(builder): return builder.EndObject()
diff --git a/python/tosa/TableAttribute.py b/python/tosa/TableAttribute.py
index 49a5c9a..6caa1f2 100644
--- a/python/tosa/TableAttribute.py
+++ b/python/tosa/TableAttribute.py
@@ -10,13 +10,17 @@ class TableAttribute(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsTableAttribute(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = TableAttribute()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsTableAttribute(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def TableAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -51,7 +55,26 @@ class TableAttribute(object):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
-def TableAttributeStart(builder): builder.StartObject(1)
-def TableAttributeAddTable(builder, table): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(table), 0)
-def TableAttributeStartTableVector(builder, numElems): return builder.StartVector(2, numElems, 2)
-def TableAttributeEnd(builder): return builder.EndObject()
+def TableAttributeStart(builder):
+ builder.StartObject(1)
+
+def Start(builder):
+ TableAttributeStart(builder)
+
+def TableAttributeAddTable(builder, table):
+ builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(table), 0)
+
+def AddTable(builder, table):
+ TableAttributeAddTable(builder, table)
+
+def TableAttributeStartTableVector(builder, numElems):
+ return builder.StartVector(2, numElems, 2)
+
+def StartTableVector(builder, numElems: int) -> int:
+ return TableAttributeStartTableVector(builder, numElems)
+
+def TableAttributeEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return TableAttributeEnd(builder)
diff --git a/python/tosa/TileAttribute.py b/python/tosa/TileAttribute.py
deleted file mode 100644
index 03dd0fb..0000000
--- a/python/tosa/TileAttribute.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# automatically generated by the FlatBuffers compiler, do not modify
-
-# namespace: tosa
-
-import flatbuffers
-from flatbuffers.compat import import_numpy
-np = import_numpy()
-
-class TileAttribute(object):
- __slots__ = ['_tab']
-
- @classmethod
- def GetRootAsTileAttribute(cls, buf, offset):
- n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
- x = TileAttribute()
- x.Init(buf, n + offset)
- return x
-
- @classmethod
- def TileAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
- return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
-
- # TileAttribute
- def Init(self, buf, pos):
- self._tab = flatbuffers.table.Table(buf, pos)
-
- # TileAttribute
- def Multiples(self, j):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- a = self._tab.Vector(o)
- return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
- return 0
-
- # TileAttribute
- def MultiplesAsNumpy(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
- return 0
-
- # TileAttribute
- def MultiplesLength(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- if o != 0:
- return self._tab.VectorLen(o)
- return 0
-
- # TileAttribute
- def MultiplesIsNone(self):
- o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
- return o == 0
-
-def TileAttributeStart(builder): builder.StartObject(1)
-def TileAttributeAddMultiples(builder, multiples): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(multiples), 0)
-def TileAttributeStartMultiplesVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TileAttributeEnd(builder): return builder.EndObject()
diff --git a/python/tosa/TosaBasicBlock.py b/python/tosa/TosaBasicBlock.py
index 1c93c63..b31f455 100644
--- a/python/tosa/TosaBasicBlock.py
+++ b/python/tosa/TosaBasicBlock.py
@@ -10,13 +10,17 @@ class TosaBasicBlock(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsTosaBasicBlock(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = TosaBasicBlock()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsTosaBasicBlock(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def TosaBasicBlockBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -121,14 +125,68 @@ class TosaBasicBlock(object):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
return o == 0
-def TosaBasicBlockStart(builder): builder.StartObject(5)
-def TosaBasicBlockAddName(builder, name): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
-def TosaBasicBlockAddOperators(builder, operators): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(operators), 0)
-def TosaBasicBlockStartOperatorsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TosaBasicBlockAddTensors(builder, tensors): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(tensors), 0)
-def TosaBasicBlockStartTensorsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TosaBasicBlockAddInputs(builder, inputs): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0)
-def TosaBasicBlockStartInputsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TosaBasicBlockAddOutputs(builder, outputs): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0)
-def TosaBasicBlockStartOutputsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TosaBasicBlockEnd(builder): return builder.EndObject()
+def TosaBasicBlockStart(builder):
+ builder.StartObject(5)
+
+def Start(builder):
+ TosaBasicBlockStart(builder)
+
+def TosaBasicBlockAddName(builder, name):
+ builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
+
+def AddName(builder, name):
+ TosaBasicBlockAddName(builder, name)
+
+def TosaBasicBlockAddOperators(builder, operators):
+ builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(operators), 0)
+
+def AddOperators(builder, operators):
+ TosaBasicBlockAddOperators(builder, operators)
+
+def TosaBasicBlockStartOperatorsVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def StartOperatorsVector(builder, numElems: int) -> int:
+ return TosaBasicBlockStartOperatorsVector(builder, numElems)
+
+def TosaBasicBlockAddTensors(builder, tensors):
+ builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(tensors), 0)
+
+def AddTensors(builder, tensors):
+ TosaBasicBlockAddTensors(builder, tensors)
+
+def TosaBasicBlockStartTensorsVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def StartTensorsVector(builder, numElems: int) -> int:
+ return TosaBasicBlockStartTensorsVector(builder, numElems)
+
+def TosaBasicBlockAddInputs(builder, inputs):
+ builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0)
+
+def AddInputs(builder, inputs):
+ TosaBasicBlockAddInputs(builder, inputs)
+
+def TosaBasicBlockStartInputsVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def StartInputsVector(builder, numElems: int) -> int:
+ return TosaBasicBlockStartInputsVector(builder, numElems)
+
+def TosaBasicBlockAddOutputs(builder, outputs):
+ builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0)
+
+def AddOutputs(builder, outputs):
+ TosaBasicBlockAddOutputs(builder, outputs)
+
+def TosaBasicBlockStartOutputsVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def StartOutputsVector(builder, numElems: int) -> int:
+ return TosaBasicBlockStartOutputsVector(builder, numElems)
+
+def TosaBasicBlockEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return TosaBasicBlockEnd(builder)
diff --git a/python/tosa/TosaGraph.py b/python/tosa/TosaGraph.py
index eceffdb..84b51a7 100644
--- a/python/tosa/TosaGraph.py
+++ b/python/tosa/TosaGraph.py
@@ -10,13 +10,17 @@ class TosaGraph(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsTosaGraph(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = TosaGraph()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsTosaGraph(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def TosaGraphBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -36,32 +40,56 @@ class TosaGraph(object):
return None
# TosaGraph
- def Blocks(self, j):
+ def Regions(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
- from tosa.TosaBasicBlock import TosaBasicBlock
- obj = TosaBasicBlock()
+ from tosa.TosaRegion import TosaRegion
+ obj = TosaRegion()
obj.Init(self._tab.Bytes, x)
return obj
return None
# TosaGraph
- def BlocksLength(self):
+ def RegionsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# TosaGraph
- def BlocksIsNone(self):
+ def RegionsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
-def TosaGraphStart(builder): builder.StartObject(2)
-def TosaGraphAddVersion(builder, version): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(version), 0)
-def TosaGraphAddBlocks(builder, blocks): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(blocks), 0)
-def TosaGraphStartBlocksVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TosaGraphEnd(builder): return builder.EndObject()
+def TosaGraphStart(builder):
+ builder.StartObject(2)
+
+def Start(builder):
+ TosaGraphStart(builder)
+
+def TosaGraphAddVersion(builder, version):
+ builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(version), 0)
+
+def AddVersion(builder, version):
+ TosaGraphAddVersion(builder, version)
+
+def TosaGraphAddRegions(builder, regions):
+ builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(regions), 0)
+
+def AddRegions(builder, regions):
+ TosaGraphAddRegions(builder, regions)
+
+def TosaGraphStartRegionsVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def StartRegionsVector(builder, numElems: int) -> int:
+ return TosaGraphStartRegionsVector(builder, numElems)
+
+def TosaGraphEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return TosaGraphEnd(builder)
diff --git a/python/tosa/TosaOperator.py b/python/tosa/TosaOperator.py
index fd11f76..2b889ad 100644
--- a/python/tosa/TosaOperator.py
+++ b/python/tosa/TosaOperator.py
@@ -10,13 +10,17 @@ class TosaOperator(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsTosaOperator(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = TosaOperator()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsTosaOperator(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def TosaOperatorBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -88,12 +92,56 @@ class TosaOperator(object):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
return o == 0
-def TosaOperatorStart(builder): builder.StartObject(5)
-def TosaOperatorAddOp(builder, op): builder.PrependUint32Slot(0, op, 0)
-def TosaOperatorAddAttributeType(builder, attributeType): builder.PrependUint8Slot(1, attributeType, 0)
-def TosaOperatorAddAttribute(builder, attribute): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(attribute), 0)
-def TosaOperatorAddInputs(builder, inputs): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0)
-def TosaOperatorStartInputsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TosaOperatorAddOutputs(builder, outputs): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0)
-def TosaOperatorStartOutputsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TosaOperatorEnd(builder): return builder.EndObject()
+def TosaOperatorStart(builder):
+ builder.StartObject(5)
+
+def Start(builder):
+ TosaOperatorStart(builder)
+
+def TosaOperatorAddOp(builder, op):
+ builder.PrependUint32Slot(0, op, 0)
+
+def AddOp(builder, op):
+ TosaOperatorAddOp(builder, op)
+
+def TosaOperatorAddAttributeType(builder, attributeType):
+ builder.PrependUint8Slot(1, attributeType, 0)
+
+def AddAttributeType(builder, attributeType):
+ TosaOperatorAddAttributeType(builder, attributeType)
+
+def TosaOperatorAddAttribute(builder, attribute):
+ builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(attribute), 0)
+
+def AddAttribute(builder, attribute):
+ TosaOperatorAddAttribute(builder, attribute)
+
+def TosaOperatorAddInputs(builder, inputs):
+ builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0)
+
+def AddInputs(builder, inputs):
+ TosaOperatorAddInputs(builder, inputs)
+
+def TosaOperatorStartInputsVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def StartInputsVector(builder, numElems: int) -> int:
+ return TosaOperatorStartInputsVector(builder, numElems)
+
+def TosaOperatorAddOutputs(builder, outputs):
+ builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(outputs), 0)
+
+def AddOutputs(builder, outputs):
+ TosaOperatorAddOutputs(builder, outputs)
+
+def TosaOperatorStartOutputsVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def StartOutputsVector(builder, numElems: int) -> int:
+ return TosaOperatorStartOutputsVector(builder, numElems)
+
+def TosaOperatorEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return TosaOperatorEnd(builder)
diff --git a/python/tosa/TosaRegion.py b/python/tosa/TosaRegion.py
new file mode 100644
index 0000000..7fd6e3c
--- /dev/null
+++ b/python/tosa/TosaRegion.py
@@ -0,0 +1,91 @@
+# automatically generated by the FlatBuffers compiler, do not modify
+
+# namespace: tosa
+
+import flatbuffers
+from flatbuffers.compat import import_numpy
+np = import_numpy()
+
+class TosaRegion(object):
+ __slots__ = ['_tab']
+
+ @classmethod
+ def GetRootAs(cls, buf, offset=0):
+ n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
+ x = TosaRegion()
+ x.Init(buf, n + offset)
+ return x
+
+ @classmethod
+ def GetRootAsTosaRegion(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
+ def TosaRegionBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
+ return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
+
+ # TosaRegion
+ def Init(self, buf, pos):
+ self._tab = flatbuffers.table.Table(buf, pos)
+
+ # TosaRegion
+ def Name(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
+ if o != 0:
+ return self._tab.String(o + self._tab.Pos)
+ return None
+
+ # TosaRegion
+ def Blocks(self, j):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ x = self._tab.Vector(o)
+ x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
+ x = self._tab.Indirect(x)
+ from tosa.TosaBasicBlock import TosaBasicBlock
+ obj = TosaBasicBlock()
+ obj.Init(self._tab.Bytes, x)
+ return obj
+ return None
+
+ # TosaRegion
+ def BlocksLength(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ if o != 0:
+ return self._tab.VectorLen(o)
+ return 0
+
+ # TosaRegion
+ def BlocksIsNone(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
+ return o == 0
+
+def TosaRegionStart(builder):
+ builder.StartObject(2)
+
+def Start(builder):
+ TosaRegionStart(builder)
+
+def TosaRegionAddName(builder, name):
+ builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
+
+def AddName(builder, name):
+ TosaRegionAddName(builder, name)
+
+def TosaRegionAddBlocks(builder, blocks):
+ builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(blocks), 0)
+
+def AddBlocks(builder, blocks):
+ TosaRegionAddBlocks(builder, blocks)
+
+def TosaRegionStartBlocksVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def StartBlocksVector(builder, numElems: int) -> int:
+ return TosaRegionStartBlocksVector(builder, numElems)
+
+def TosaRegionEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return TosaRegionEnd(builder)
diff --git a/python/tosa/TosaTensor.py b/python/tosa/TosaTensor.py
index a6f609d..3fb9f86 100644
--- a/python/tosa/TosaTensor.py
+++ b/python/tosa/TosaTensor.py
@@ -10,13 +10,17 @@ class TosaTensor(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsTosaTensor(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = TosaTensor()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsTosaTensor(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def TosaTensorBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -92,11 +96,89 @@ class TosaTensor(object):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
return o == 0
-def TosaTensorStart(builder): builder.StartObject(4)
-def TosaTensorAddName(builder, name): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
-def TosaTensorAddShape(builder, shape): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0)
-def TosaTensorStartShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TosaTensorAddType(builder, type): builder.PrependUint32Slot(2, type, 0)
-def TosaTensorAddData(builder, data): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(data), 0)
-def TosaTensorStartDataVector(builder, numElems): return builder.StartVector(1, numElems, 1)
-def TosaTensorEnd(builder): return builder.EndObject()
+ # TosaTensor
+ def Variable(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
+ if o != 0:
+ return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+ return False
+
+ # TosaTensor
+ def IsUnranked(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+ if o != 0:
+ return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+ return False
+
+ # TosaTensor
+ def VariableName(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
+ if o != 0:
+ return self._tab.String(o + self._tab.Pos)
+ return None
+
+def TosaTensorStart(builder):
+ builder.StartObject(7)
+
+def Start(builder):
+ TosaTensorStart(builder)
+
+def TosaTensorAddName(builder, name):
+ builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
+
+def AddName(builder, name):
+ TosaTensorAddName(builder, name)
+
+def TosaTensorAddShape(builder, shape):
+ builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0)
+
+def AddShape(builder, shape):
+ TosaTensorAddShape(builder, shape)
+
+def TosaTensorStartShapeVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def StartShapeVector(builder, numElems: int) -> int:
+ return TosaTensorStartShapeVector(builder, numElems)
+
+def TosaTensorAddType(builder, type):
+ builder.PrependUint32Slot(2, type, 0)
+
+def AddType(builder, type):
+ TosaTensorAddType(builder, type)
+
+def TosaTensorAddData(builder, data):
+ builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(data), 0)
+
+def AddData(builder, data):
+ TosaTensorAddData(builder, data)
+
+def TosaTensorStartDataVector(builder, numElems):
+ return builder.StartVector(1, numElems, 1)
+
+def StartDataVector(builder, numElems: int) -> int:
+ return TosaTensorStartDataVector(builder, numElems)
+
+def TosaTensorAddVariable(builder, variable):
+ builder.PrependBoolSlot(4, variable, 0)
+
+def AddVariable(builder, variable):
+ TosaTensorAddVariable(builder, variable)
+
+def TosaTensorAddIsUnranked(builder, isUnranked):
+ builder.PrependBoolSlot(5, isUnranked, 0)
+
+def AddIsUnranked(builder, isUnranked):
+ TosaTensorAddIsUnranked(builder, isUnranked)
+
+def TosaTensorAddVariableName(builder, variableName):
+ builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(variableName), 0)
+
+def AddVariableName(builder, variableName):
+ TosaTensorAddVariableName(builder, variableName)
+
+def TosaTensorEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return TosaTensorEnd(builder)
diff --git a/python/tosa/TransposeAttribute.py b/python/tosa/TransposeAttribute.py
index 82e6b3a..71cfdf0 100644
--- a/python/tosa/TransposeAttribute.py
+++ b/python/tosa/TransposeAttribute.py
@@ -10,13 +10,17 @@ class TransposeAttribute(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsTransposeAttribute(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = TransposeAttribute()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsTransposeAttribute(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def TransposeAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -51,7 +55,26 @@ class TransposeAttribute(object):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
-def TransposeAttributeStart(builder): builder.StartObject(1)
-def TransposeAttributeAddPerms(builder, perms): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(perms), 0)
-def TransposeAttributeStartPermsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TransposeAttributeEnd(builder): return builder.EndObject()
+def TransposeAttributeStart(builder):
+ builder.StartObject(1)
+
+def Start(builder):
+ TransposeAttributeStart(builder)
+
+def TransposeAttributeAddPerms(builder, perms):
+ builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(perms), 0)
+
+def AddPerms(builder, perms):
+ TransposeAttributeAddPerms(builder, perms)
+
+def TransposeAttributeStartPermsVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def StartPermsVector(builder, numElems: int) -> int:
+ return TransposeAttributeStartPermsVector(builder, numElems)
+
+def TransposeAttributeEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return TransposeAttributeEnd(builder)
diff --git a/python/tosa/TransposeConvAttribute.py b/python/tosa/TransposeConvAttribute.py
index 8ca5ba7..e5397a8 100644
--- a/python/tosa/TransposeConvAttribute.py
+++ b/python/tosa/TransposeConvAttribute.py
@@ -10,13 +10,17 @@ class TransposeConvAttribute(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsTransposeConvAttribute(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = TransposeConvAttribute()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsTransposeConvAttribute(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def TransposeConvAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -119,13 +123,88 @@ class TransposeConvAttribute(object):
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
-def TransposeConvAttributeStart(builder): builder.StartObject(5)
-def TransposeConvAttributeAddOutPad(builder, outPad): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(outPad), 0)
-def TransposeConvAttributeStartOutPadVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TransposeConvAttributeAddStride(builder, stride): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(stride), 0)
-def TransposeConvAttributeStartStrideVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TransposeConvAttributeAddOutputShape(builder, outputShape): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(outputShape), 0)
-def TransposeConvAttributeStartOutputShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4)
-def TransposeConvAttributeAddInputZp(builder, inputZp): builder.PrependInt32Slot(3, inputZp, 0)
-def TransposeConvAttributeAddWeightZp(builder, weightZp): builder.PrependInt32Slot(4, weightZp, 0)
-def TransposeConvAttributeEnd(builder): return builder.EndObject()
+ # TransposeConvAttribute
+ def LocalBound(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
+ if o != 0:
+ return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
+ return False
+
+ # TransposeConvAttribute
+ def AccType(self):
+ o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
+ if o != 0:
+ return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
+ return 0
+
+def TransposeConvAttributeStart(builder):
+ builder.StartObject(7)
+
+def Start(builder):
+ TransposeConvAttributeStart(builder)
+
+def TransposeConvAttributeAddOutPad(builder, outPad):
+ builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(outPad), 0)
+
+def AddOutPad(builder, outPad):
+ TransposeConvAttributeAddOutPad(builder, outPad)
+
+def TransposeConvAttributeStartOutPadVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def StartOutPadVector(builder, numElems: int) -> int:
+ return TransposeConvAttributeStartOutPadVector(builder, numElems)
+
+def TransposeConvAttributeAddStride(builder, stride):
+ builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(stride), 0)
+
+def AddStride(builder, stride):
+ TransposeConvAttributeAddStride(builder, stride)
+
+def TransposeConvAttributeStartStrideVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def StartStrideVector(builder, numElems: int) -> int:
+ return TransposeConvAttributeStartStrideVector(builder, numElems)
+
+def TransposeConvAttributeAddOutputShape(builder, outputShape):
+ builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(outputShape), 0)
+
+def AddOutputShape(builder, outputShape):
+ TransposeConvAttributeAddOutputShape(builder, outputShape)
+
+def TransposeConvAttributeStartOutputShapeVector(builder, numElems):
+ return builder.StartVector(4, numElems, 4)
+
+def StartOutputShapeVector(builder, numElems: int) -> int:
+ return TransposeConvAttributeStartOutputShapeVector(builder, numElems)
+
+def TransposeConvAttributeAddInputZp(builder, inputZp):
+ builder.PrependInt32Slot(3, inputZp, 0)
+
+def AddInputZp(builder, inputZp):
+ TransposeConvAttributeAddInputZp(builder, inputZp)
+
+def TransposeConvAttributeAddWeightZp(builder, weightZp):
+ builder.PrependInt32Slot(4, weightZp, 0)
+
+def AddWeightZp(builder, weightZp):
+ TransposeConvAttributeAddWeightZp(builder, weightZp)
+
+def TransposeConvAttributeAddLocalBound(builder, localBound):
+ builder.PrependBoolSlot(5, localBound, 0)
+
+def AddLocalBound(builder, localBound):
+ TransposeConvAttributeAddLocalBound(builder, localBound)
+
+def TransposeConvAttributeAddAccType(builder, accType):
+ builder.PrependUint32Slot(6, accType, 0)
+
+def AddAccType(builder, accType):
+ TransposeConvAttributeAddAccType(builder, accType)
+
+def TransposeConvAttributeEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return TransposeConvAttributeEnd(builder)
diff --git a/python/tosa/Version.py b/python/tosa/Version.py
index 06c3ba3..369f356 100644
--- a/python/tosa/Version.py
+++ b/python/tosa/Version.py
@@ -10,13 +10,17 @@ class Version(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsVersion(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Version()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsVersion(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def VersionBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -25,36 +29,65 @@ class Version(object):
self._tab = flatbuffers.table.Table(buf, pos)
# Version
- def _major(self):
+ def _Major(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 0
+ return -1
# Version
- def _minor(self):
+ def _Minor(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 31
+ return -1
# Version
- def _patch(self):
+ def _Patch(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
- return 0
+ return -1
# Version
- def _draft(self):
+ def _Draft(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return True
-def VersionStart(builder): builder.StartObject(4)
-def VersionAdd_major(builder, Major): builder.PrependInt32Slot(0, Major, 0)
-def VersionAdd_minor(builder, Minor): builder.PrependInt32Slot(1, Minor, 31)
-def VersionAdd_patch(builder, Patch): builder.PrependInt32Slot(2, Patch, 0)
-def VersionAdd_draft(builder, Draft): builder.PrependBoolSlot(3, Draft, 1)
-def VersionEnd(builder): return builder.EndObject()
+def VersionStart(builder):
+ builder.StartObject(4)
+
+def Start(builder):
+ VersionStart(builder)
+
+def VersionAdd_Major(builder, _Major):
+ builder.PrependInt32Slot(0, _Major, -1)
+
+def Add_Major(builder, _Major):
+ VersionAdd_Major(builder, _Major)
+
+def VersionAdd_Minor(builder, _Minor):
+ builder.PrependInt32Slot(1, _Minor, -1)
+
+def Add_Minor(builder, _Minor):
+ VersionAdd_Minor(builder, _Minor)
+
+def VersionAdd_Patch(builder, _Patch):
+ builder.PrependInt32Slot(2, _Patch, -1)
+
+def Add_Patch(builder, _Patch):
+ VersionAdd_Patch(builder, _Patch)
+
+def VersionAdd_Draft(builder, _Draft):
+ builder.PrependBoolSlot(3, _Draft, 1)
+
+def Add_Draft(builder, _Draft):
+ VersionAdd_Draft(builder, _Draft)
+
+def VersionEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return VersionEnd(builder)
diff --git a/python/tosa/WhileLoopAttribute.py b/python/tosa/WhileLoopAttribute.py
index 1e18bca..5148c26 100644
--- a/python/tosa/WhileLoopAttribute.py
+++ b/python/tosa/WhileLoopAttribute.py
@@ -10,13 +10,17 @@ class WhileLoopAttribute(object):
__slots__ = ['_tab']
@classmethod
- def GetRootAsWhileLoopAttribute(cls, buf, offset):
+ def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = WhileLoopAttribute()
x.Init(buf, n + offset)
return x
@classmethod
+ def GetRootAsWhileLoopAttribute(cls, buf, offset=0):
+ """This method is deprecated. Please switch to GetRootAs."""
+ return cls.GetRootAs(buf, offset)
+ @classmethod
def WhileLoopAttributeBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x4F\x53\x41", size_prefixed=size_prefixed)
@@ -25,20 +29,39 @@ class WhileLoopAttribute(object):
self._tab = flatbuffers.table.Table(buf, pos)
# WhileLoopAttribute
- def CondBranch(self):
+ def CondGraph(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# WhileLoopAttribute
- def BodyBranch(self):
+ def BodyGraph(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
-def WhileLoopAttributeStart(builder): builder.StartObject(2)
-def WhileLoopAttributeAddCondBranch(builder, condBranch): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(condBranch), 0)
-def WhileLoopAttributeAddBodyBranch(builder, bodyBranch): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(bodyBranch), 0)
-def WhileLoopAttributeEnd(builder): return builder.EndObject()
+def WhileLoopAttributeStart(builder):
+ builder.StartObject(2)
+
+def Start(builder):
+ WhileLoopAttributeStart(builder)
+
+def WhileLoopAttributeAddCondGraph(builder, condGraph):
+ builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(condGraph), 0)
+
+def AddCondGraph(builder, condGraph):
+ WhileLoopAttributeAddCondGraph(builder, condGraph)
+
+def WhileLoopAttributeAddBodyGraph(builder, bodyGraph):
+ builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(bodyGraph), 0)
+
+def AddBodyGraph(builder, bodyGraph):
+ WhileLoopAttributeAddBodyGraph(builder, bodyGraph)
+
+def WhileLoopAttributeEnd(builder):
+ return builder.EndObject()
+
+def End(builder):
+ return WhileLoopAttributeEnd(builder)
diff --git a/schema/tosa.fbs b/schema/tosa.fbs
index 57440ef..79b83b1 100644
--- a/schema/tosa.fbs
+++ b/schema/tosa.fbs
@@ -1,5 +1,5 @@
-// Copyright (c) 2020-2021, ARM Limited.
+// Copyright (c) 2020-2024, ARM Limited.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -20,6 +20,9 @@ file_identifier "TOSA";
// File extension of any written files.
file_extension "tosa";
+// NOTE: New values added to the schema should be placed
+// at the end of the list in order to keep schema stable.
+
enum DType:uint32 {
UNKNOWN = 0,
BOOL,
@@ -29,8 +32,13 @@ enum DType:uint32 {
INT16,
INT32,
INT48,
- FLOAT,
+ FP32,
UINT16,
+ FP16,
+ BF16,
+ SHAPE,
+ FP8E4M3,
+ FP8E5M2,
}
enum ResizeMode:uint32 {
@@ -41,8 +49,6 @@ enum ResizeMode:uint32 {
enum Op:uint32 {
UNKNOWN = 0,
-
- // Tensor Operator
ARGMAX,
AVG_POOL2D,
CONV2D,
@@ -52,14 +58,10 @@ enum Op:uint32 {
MATMUL,
MAX_POOL2D,
TRANSPOSE_CONV2D,
-
- // Activation
CLAMP,
RESERVED,
SIGMOID,
TANH,
-
- // Elementwise-Binary
ADD,
ARITHMETIC_RIGHT_SHIFT,
BITWISE_AND,
@@ -77,8 +79,6 @@ enum Op:uint32 {
POW,
SUB,
TABLE,
-
- // Elementwise-Unary
ABS,
BITWISE_NOT,
CEIL,
@@ -90,24 +90,16 @@ enum Op:uint32 {
NEGATE,
RECIPROCAL,
RSQRT,
-
- // Elementwise-Ternary
SELECT,
-
- // Logical
EQUAL,
GREATER,
GREATER_EQUAL,
-
- // Reduction
REDUCE_ANY,
REDUCE_ALL,
REDUCE_MAX,
REDUCE_MIN,
REDUCE_PRODUCT,
REDUCE_SUM,
-
- // Data layout operation
CONCAT,
PAD,
RESHAPE,
@@ -115,28 +107,28 @@ enum Op:uint32 {
SLICE,
TILE,
TRANSPOSE,
-
- // Gather/scatter operation
GATHER,
SCATTER,
-
- // Image
RESIZE,
-
- // Type conversion
CAST,
RESCALE,
-
- // Data Nodes
CONST,
IDENTITY,
-
- // Custom operations
CUSTOM,
-
- // Control flow operators
COND_IF,
WHILE_LOOP,
+ FFT2D,
+ RFFT2D,
+ ERF,
+ DIM,
+ CONST_SHAPE,
+ CONCAT_SHAPE,
+ ADD_SHAPE,
+ SUB_SHAPE,
+ MUL_SHAPE,
+ DIV_SHAPE,
+ COS,
+ SIN,
}
union Attribute {
@@ -145,9 +137,6 @@ union Attribute {
TransposeConvAttribute,
PadAttribute,
AxisAttribute,
- ReshapeAttribute,
- SliceAttribute,
- TileAttribute,
ResizeAttribute,
ClampAttribute,
RescaleAttribute,
@@ -159,7 +148,10 @@ union Attribute {
TableAttribute,
MatMulAttribute,
FullyConnectedAttribute,
- NegateAttribute
+ NegateAttribute,
+ CustomAttribute,
+ FFTAttribute,
+ RFFTAttribute,
}
table PoolAttribute {
@@ -168,6 +160,7 @@ table PoolAttribute {
stride: [int32];
input_zp: int32;
output_zp: int32;
+ acc_type: DType;
}
table ConvAttribute {
@@ -176,6 +169,8 @@ table ConvAttribute {
dilation: [int32];
input_zp: int32;
weight_zp: int32;
+ local_bound: bool;
+ acc_type: DType;
}
table TransposeConvAttribute {
@@ -184,56 +179,38 @@ table TransposeConvAttribute {
output_shape: [int32];
input_zp: int32;
weight_zp: int32;
+ local_bound: bool;
+ acc_type: DType;
}
table PadAttribute {
- padding: [int32];
- pad_const_int: int32;
- pad_const_fp: float;
+ pad_const: [ubyte] (force_align: 8);
}
table AxisAttribute {
axis: int32;
}
-table ReshapeAttribute {
- new_shape: [int32];
-}
-
-table SliceAttribute {
- start: [int32];
- size: [int32];
-}
-
-table TileAttribute {
- multiples: [int32];
-}
-
table ResizeAttribute {
- output_size: [int32];
- stride: [int32];
- offset: [int32];
- shift: int32;
- stride_fp: [float];
- offset_fp: [float];
+ scale: [int16];
+ offset: [int16];
+ border: [int16];
mode: ResizeMode;
}
table ClampAttribute {
- min_int: int32;
- max_int: int32;
- min_fp: float;
- max_fp: float;
+ min_val: [ubyte] (force_align: 8);
+ max_val: [ubyte] (force_align: 8);
}
table RescaleAttribute {
input_zp: int32;
output_zp: int32;
- multiplier: [int32];
- shift: [int32];
scale32: bool;
double_round: bool;
per_channel: bool;
+ input_unsigned: bool;
+ output_unsigned: bool;
}
table MulAttribute {
@@ -245,13 +222,13 @@ table ArithmeticRightShiftAttribute {
}
table CondIfAttribute {
- then_branch: string;
- else_branch: string;
+ then_graph: string;
+ else_graph: string;
}
table WhileLoopAttribute {
- cond_branch: string;
- body_branch: string;
+ cond_graph: string;
+ body_graph: string;
}
table TransposeAttribute {
@@ -277,10 +254,25 @@ table NegateAttribute {
output_zp: int32;
}
+table CustomAttribute {
+ operator_name:string;
+ domain_name:string;
+ implementation_attrs:[ubyte];
+}
+
+table FFTAttribute {
+ inverse: bool;
+ local_bound: bool;
+}
+
+table RFFTAttribute {
+ local_bound: bool;
+}
+
table Version {
- _major: int32 = 0;
- _minor: int32 = 31;
- _patch: int32 = 0;
+ _major: int32 = -1;
+ _minor: int32 = -1;
+ _patch: int32 = -1;
_draft: bool = true;
}
@@ -289,11 +281,14 @@ table TosaTensor {
shape:[int32]; // shape of the tensor
type:DType; // data type of the tensor
data: [ubyte] (force_align: 8); // raw data array if it's a constant tensor.
+ variable: bool; // is this a variable tensor
+ is_unranked: bool; // whether this is an unranked tensor
+ variable_name:string; // name for variable attribute
}
table TosaOperator {
op:Op; // operator enum
- attribute: Attribute; // union structure. operator attribute
+ attribute:Attribute; // union structure. operator attribute
inputs:[string]; // list of input tensor names
outputs:[string]; // list of output tensor names
}
@@ -306,9 +301,14 @@ table TosaBasicBlock {
outputs:[string]; // name of graph outputs
}
+table TosaRegion {
+ name:string; // name of region
+ blocks:[TosaBasicBlock]; // basic blocks array
+}
+
table TosaGraph {
- version: Version;
- blocks:[TosaBasicBlock]; // basic blocks array
+ version:Version (required);
+ regions:[TosaRegion]; // regions array
}
root_type TosaGraph;
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..3008a9e
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,18 @@
+# Copyright (c) 2023, ARM Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Setup script for backward compatibility."""
+from setuptools import setup
+
+setup()
diff --git a/src/numpy_utils.cpp b/src/numpy_utils.cpp
index 80c680f..e4171d7 100644
--- a/src/numpy_utils.cpp
+++ b/src/numpy_utils.cpp
@@ -1,5 +1,5 @@
-// Copyright (c) 2020-2021, ARM Limited.
+// Copyright (c) 2020-2024, ARM Limited.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,6 +14,9 @@
// limitations under the License.
#include "numpy_utils.h"
+#include "half.hpp"
+#include <algorithm>
+#include <memory>
// Magic NUMPY header
static const char NUMPY_HEADER_STR[] = "\x93NUMPY\x1\x0\x76\x0{";
@@ -21,28 +24,81 @@ static const int NUMPY_HEADER_SZ = 128;
// Maximum shape dimensions supported
static const int NUMPY_MAX_DIMS_SUPPORTED = 10;
-NumpyUtilities::NPError NumpyUtilities::readFromNpyFile(const char* filename, const uint32_t elems, bool* databuf)
-{
- const char dtype_str[] = "'|b1'";
- return readFromNpyFileCommon(filename, dtype_str, 1, elems, databuf, true);
-}
-
+// This is an entry function for reading 8-/16-/32-bit npy file.
+template <>
NumpyUtilities::NPError NumpyUtilities::readFromNpyFile(const char* filename, const uint32_t elems, int32_t* databuf)
{
- const char dtype_str[] = "'<i4'";
- return readFromNpyFileCommon(filename, dtype_str, sizeof(int32_t), elems, databuf, false);
-}
+ FILE* infile = nullptr;
+ NPError rc = HEADER_PARSE_ERROR;
+ assert(filename);
+ assert(databuf);
-NumpyUtilities::NPError NumpyUtilities::readFromNpyFile(const char* filename, const uint32_t elems, int64_t* databuf)
-{
- const char dtype_str[] = "'<i8'";
- return readFromNpyFileCommon(filename, dtype_str, sizeof(int64_t), elems, databuf, false);
-}
+ infile = fopen(filename, "rb");
+ if (!infile)
+ {
+ return FILE_NOT_FOUND;
+ }
-NumpyUtilities::NPError NumpyUtilities::readFromNpyFile(const char* filename, const uint32_t elems, float* databuf)
-{
- const char dtype_str[] = "'<f4'";
- return readFromNpyFileCommon(filename, dtype_str, sizeof(float), elems, databuf, false);
+ bool is_signed = false;
+ int length_per_byte = 0;
+ char byte_order;
+ rc = getHeader(infile, is_signed, length_per_byte, byte_order);
+ if (rc != NO_ERROR)
+ return rc;
+
+ switch (length_per_byte)
+ {
+ case 1:
+ if (is_signed)
+ {
+ int8_t* tmp_buf = new int8_t[elems];
+ rc = readFromNpyFile<int8_t>(filename, elems, tmp_buf);
+ copyBufferByElement(databuf, tmp_buf, elems);
+ delete[] tmp_buf;
+ }
+ else
+ {
+ uint8_t* tmp_buf = new uint8_t[elems];
+ rc = readFromNpyFile<uint8_t>(filename, elems, tmp_buf);
+ copyBufferByElement(databuf, tmp_buf, elems);
+ delete[] tmp_buf;
+ }
+ break;
+ case 2:
+ if (is_signed)
+ {
+ int16_t* tmp_buf = new int16_t[elems];
+ rc = readFromNpyFile<int16_t>(filename, elems, tmp_buf);
+ copyBufferByElement(databuf, tmp_buf, elems);
+ delete[] tmp_buf;
+ }
+ else
+ {
+ uint16_t* tmp_buf = new uint16_t[elems];
+ rc = readFromNpyFile<uint16_t>(filename, elems, tmp_buf);
+ copyBufferByElement(databuf, tmp_buf, elems);
+ delete[] tmp_buf;
+ }
+ break;
+ case 4:
+ if (is_signed)
+ {
+ bool is_bool;
+ const char* dtype_str = getDTypeString<int32_t>(is_bool);
+ rc = readFromNpyFileCommon(filename, dtype_str, sizeof(int32_t), elems, databuf, is_bool);
+ }
+ else
+ {
+ // uint32, not supported
+ rc = DATA_TYPE_NOT_SUPPORTED;
+ }
+ break;
+ default:
+ return DATA_TYPE_NOT_SUPPORTED;
+ break;
+ }
+
+ return rc;
}
NumpyUtilities::NPError NumpyUtilities::readFromNpyFileCommon(const char* filename,
@@ -101,6 +157,46 @@ NumpyUtilities::NPError NumpyUtilities::readFromNpyFileCommon(const char* filena
return rc;
}
+NumpyUtilities::NPError NumpyUtilities::getHeader(FILE* infile, bool& is_signed, int& bit_length, char& byte_order)
+{
+ char buf[NUMPY_HEADER_SZ + 1];
+ NPError rc = NO_ERROR;
+ assert(infile);
+
+ if (fread(buf, NUMPY_HEADER_SZ, 1, infile) != 1)
+ {
+ return HEADER_PARSE_ERROR;
+ }
+
+ // Validate the numpy magic number
+ if (memcmp(buf, NUMPY_HEADER_STR, sizeof(NUMPY_HEADER_STR) - 1))
+ {
+ return HEADER_PARSE_ERROR;
+ }
+
+ std::string dic_string(buf, NUMPY_HEADER_SZ);
+
+ std::string desc_str("descr':");
+ size_t offset = dic_string.find(desc_str);
+ if (offset == std::string::npos)
+ return HEADER_PARSE_ERROR;
+
+ offset += desc_str.size() + 1;
+ // Skip whitespace and the opening '
+ while (offset < dic_string.size() && (std::isspace(dic_string[offset]) || dic_string[offset] == '\''))
+ offset++;
+ // Check for overflow
+ if (offset + 2 > dic_string.size())
+ return HEADER_PARSE_ERROR;
+
+ byte_order = dic_string[offset];
+ is_signed = dic_string[offset + 1] == 'u' ? false : true;
+ bit_length = (int)dic_string[offset + 2] - '0';
+
+ rewind(infile);
+ return rc;
+}
+
NumpyUtilities::NPError NumpyUtilities::checkNpyHeader(FILE* infile, const uint32_t elems, const char* dtype_str)
{
char buf[NUMPY_HEADER_SZ + 1];
@@ -253,60 +349,6 @@ NumpyUtilities::NPError NumpyUtilities::checkNpyHeader(FILE* infile, const uint3
return rc;
}
-NumpyUtilities::NPError NumpyUtilities::writeToNpyFile(const char* filename, const uint32_t elems, const bool* databuf)
-{
- std::vector<int32_t> shape = { (int32_t)elems };
- return writeToNpyFile(filename, shape, databuf);
-}
-
-NumpyUtilities::NPError
- NumpyUtilities::writeToNpyFile(const char* filename, const std::vector<int32_t>& shape, const bool* databuf)
-{
- const char dtype_str[] = "'|b1'";
- return writeToNpyFileCommon(filename, dtype_str, 1, shape, databuf, true); // bools written as size 1
-}
-
-NumpyUtilities::NPError
- NumpyUtilities::writeToNpyFile(const char* filename, const uint32_t elems, const int32_t* databuf)
-{
- std::vector<int32_t> shape = { (int32_t)elems };
- return writeToNpyFile(filename, shape, databuf);
-}
-
-NumpyUtilities::NPError
- NumpyUtilities::writeToNpyFile(const char* filename, const std::vector<int32_t>& shape, const int32_t* databuf)
-{
- const char dtype_str[] = "'<i4'";
- return writeToNpyFileCommon(filename, dtype_str, sizeof(int32_t), shape, databuf, false);
-}
-
-NumpyUtilities::NPError
- NumpyUtilities::writeToNpyFile(const char* filename, const uint32_t elems, const int64_t* databuf)
-{
- std::vector<int32_t> shape = { (int32_t)elems };
- return writeToNpyFile(filename, shape, databuf);
-}
-
-NumpyUtilities::NPError
- NumpyUtilities::writeToNpyFile(const char* filename, const std::vector<int32_t>& shape, const int64_t* databuf)
-{
- const char dtype_str[] = "'<i8'";
- return writeToNpyFileCommon(filename, dtype_str, sizeof(int64_t), shape, databuf, false);
-}
-
-NumpyUtilities::NPError NumpyUtilities::writeToNpyFile(const char* filename, const uint32_t elems, const float* databuf)
-{
- std::vector<int32_t> shape = { (int32_t)elems };
- return writeToNpyFile(filename, shape, databuf);
-}
-
-NumpyUtilities::NPError
- NumpyUtilities::writeToNpyFile(const char* filename, const std::vector<int32_t>& shape, const float* databuf)
-{
- const char dtype_str[] = "'<f4'";
- return writeToNpyFileCommon(filename, dtype_str, sizeof(float), shape, databuf, false);
-}
-
NumpyUtilities::NPError NumpyUtilities::writeToNpyFileCommon(const char* filename,
const char* dtype_str,
const size_t elementsize,
@@ -390,12 +432,11 @@ NumpyUtilities::NPError
// Output the format dictionary
// Hard-coded for I32 for now
- headerPos +=
- snprintf(header + headerPos, NUMPY_HEADER_SZ - headerPos, "'descr': %s, 'fortran_order': False, 'shape': (%d,",
- dtype_str, shape.empty() ? 1 : shape[0]);
+ headerPos += snprintf(header + headerPos, NUMPY_HEADER_SZ - headerPos,
+ "'descr': %s, 'fortran_order': False, 'shape': (", dtype_str);
- // Remainder of shape array
- for (i = 1; i < shape.size(); i++)
+ // Add shape contents (if any - as this will be empty for rank 0)
+ for (i = 0; i < shape.size(); i++)
{
headerPos += snprintf(header + headerPos, NUMPY_HEADER_SZ - headerPos, " %d,", shape[i]);
}
diff --git a/src/tosa_serialization_handler.cpp b/src/tosa_serialization_handler.cpp
index 3a0ce43..85625cd 100644
--- a/src/tosa_serialization_handler.cpp
+++ b/src/tosa_serialization_handler.cpp
@@ -1,5 +1,5 @@
-// Copyright (c) 2020-2021, ARM Limited.
+// Copyright (c) 2020-2024, ARM Limited.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,18 +14,28 @@
// limitations under the License.
#include "tosa_serialization_handler.h"
+#include "half.hpp"
#include <iostream>
using namespace tosa;
+using fp8e4m3 = tosa::float_t<int8_t, 4, true, true, false>;
+using fp8e5m2 = tosa::float_t<int8_t, 5, true, true, true>;
+
TosaSerializationTensor::TosaSerializationTensor(const flatbuffers::String* name,
const flatbuffers::Vector<int32_t>* shape,
DType dtype,
- const flatbuffers::Vector<uint8_t>* data)
+ const flatbuffers::Vector<uint8_t>* data,
+ const bool variable,
+ const bool is_unranked,
+ const flatbuffers::String* variable_name)
{
- _dtype = dtype;
-
- std::copy(shape->begin(), shape->end(), std::back_inserter(_shape));
+ _dtype = dtype;
+ _variable = variable;
+ if (shape)
+ {
+ std::copy(shape->begin(), shape->end(), std::back_inserter(_shape));
+ }
assert(name);
_name = name->str();
@@ -34,23 +44,37 @@ TosaSerializationTensor::TosaSerializationTensor(const flatbuffers::String* name
{
std::copy(data->begin(), data->end(), std::back_inserter(_data));
}
+ _is_unranked = is_unranked;
+
+ if (variable_name)
+ {
+ _variable_name = variable_name->str();
+ }
}
TosaSerializationTensor::TosaSerializationTensor(const std::string& name,
const std::vector<int32_t>& shape,
DType dtype,
- const std::vector<uint8_t>& data)
+ const std::vector<uint8_t>& data,
+ const bool variable,
+ const bool is_unranked,
+ const std::string& variable_name)
{
- _dtype = dtype;
- _shape = shape;
- _name = name;
- _data = data;
+ _dtype = dtype;
+ _variable = variable;
+ _shape = shape;
+ _name = name;
+ _data = data;
+ _is_unranked = is_unranked;
+ _variable_name = variable_name;
}
TosaSerializationTensor::TosaSerializationTensor()
{
- _dtype = DType_UNKNOWN;
- _name = "UNKNOWN";
+ _dtype = DType_UNKNOWN;
+ _variable = false;
+ _name = "UNKNOWN";
+ _is_unranked = false;
}
TosaSerializationTensor::~TosaSerializationTensor()
@@ -112,29 +136,33 @@ TosaSerializationOperator::~TosaSerializationOperator()
}
TosaSerializationBasicBlock::TosaSerializationBasicBlock(const std::string& name,
+ const std::string& region_name,
const std::vector<TosaSerializationOperator*>& operators,
const std::vector<TosaSerializationTensor*>& tensors,
const std::vector<std::string>& inputs,
const std::vector<std::string>& outputs)
{
- _name = name;
- _operators = operators;
- _tensors = tensors;
- _inputs = inputs;
- _outputs = outputs;
+ _name = name;
+ _region_name = region_name;
+ _operators = operators;
+ _tensors = tensors;
+ _inputs = inputs;
+ _outputs = outputs;
}
TosaSerializationBasicBlock::TosaSerializationBasicBlock(std::string&& name,
+ std::string&& region_name,
std::vector<TosaSerializationOperator*>&& operators,
std::vector<TosaSerializationTensor*>&& tensors,
std::vector<std::string>&& inputs,
std::vector<std::string>&& outputs)
{
- _name = std::move(name);
- _operators = std::move(operators);
- _tensors = std::move(tensors);
- _inputs = std::move(inputs);
- _outputs = std::move(outputs);
+ _name = std::move(name);
+ _region_name = std::move(region_name);
+ _operators = std::move(operators);
+ _tensors = std::move(tensors);
+ _inputs = std::move(inputs);
+ _outputs = std::move(outputs);
}
TosaSerializationBasicBlock::~TosaSerializationBasicBlock()
@@ -152,65 +180,38 @@ TosaSerializationBasicBlock::~TosaSerializationBasicBlock()
}
}
-TosaSerializationHandler::TosaSerializationHandler()
+TosaSerializationRegion::TosaSerializationRegion(const std::string& name,
+ const std::vector<TosaSerializationBasicBlock*>& blocks)
{
- _schemaLoaded = false;
- _version = TosaVersion(TOSA_VERSION_MAJOR, TOSA_VERSION_MINOR, TOSA_VERSION_PATCH, TOSA_VERSION_DRAFT);
+ _name = name;
+ _blocks = blocks;
}
-TosaSerializationHandler::~TosaSerializationHandler()
+TosaSerializationRegion::TosaSerializationRegion(const std::string&& name,
+ const std::vector<TosaSerializationBasicBlock*>&& blocks)
{
- Clear(); // deallocate all basic blocks
+ _name = std::move(name);
+ _blocks = std::move(blocks);
}
-TosaVersion TosaSerializationHandler::ParseTosaSchemaVersion(std::string schema)
+TosaSerializationRegion::~TosaSerializationRegion()
{
- // Parse all 4 version fields in schema file
- static const char* keywords[4] = { "major: int32 = ", "minor: int32 = ", "patch: int32 = ", "draft: bool = " };
- string keyword_str[4];
- size_t search_pos = 0;
- size_t keyword_pos;
- size_t semicolon_pos;
- // parse integer field first
- for (int32_t i = 0; i < 4; i++)
- {
- keyword_pos = schema.find(keywords[i], search_pos);
- if (keyword_pos == std::string::npos)
- {
- printf("ERROR: can't find keyword \"%s\" in schema\n", keywords[i]);
- assert(0);
- }
- semicolon_pos = schema.find(';', keyword_pos);
- if (keyword_pos == std::string::npos)
- {
- printf("ERROR: can't find ';' in schema\n");
- assert(0);
- }
- keyword_str[i] =
- schema.substr(keyword_pos + strlen(keywords[i]), semicolon_pos - keyword_pos - strlen(keywords[i]));
- search_pos = semicolon_pos;
- }
-
- int32_t schema_major = 0;
- int32_t schema_minor = 0;
- int32_t schema_patch = 0;
- bool schema_draft = false;
- try
- {
- schema_major = stoi(keyword_str[0]);
- schema_minor = stoi(keyword_str[1]);
- schema_patch = stoi(keyword_str[2]);
- schema_draft = (keyword_str[3] == "true") ? true : false;
- }
- catch (std::invalid_argument& e)
+ // deallocate all blocks
+ for (auto block : GetBlocks())
{
- printf("ERROR: fail at stoi(): %s\n", e.what());
- assert(0);
+ delete block; // ~TosaSerializationBasicBlock()
}
+}
- TosaVersion schema_version(schema_major, schema_minor, schema_patch, schema_draft);
+TosaSerializationHandler::TosaSerializationHandler()
+{
+ _schemaLoaded = false;
+ _version = TosaVersion(TOSA_VERSION_MAJOR, TOSA_VERSION_MINOR, TOSA_VERSION_PATCH, TOSA_VERSION_DRAFT);
+}
- return schema_version;
+TosaSerializationHandler::~TosaSerializationHandler()
+{
+ Clear(); // deallocate all basic blocks
}
tosa_err_t TosaSerializationHandler::LoadFileSchema(const char* schema_filename)
@@ -227,23 +228,6 @@ tosa_err_t TosaSerializationHandler::LoadFileSchema(const char* schema_filename)
ok = _parser.Parse(schema.c_str());
- TosaVersion schema_version = ParseTosaSchemaVersion(schema);
-
- TosaVersion::compat_t is_compat = schema_version.is_compatible(GetVersion());
- switch (is_compat)
- {
- case TosaVersion::compat_t::COMPLETELY_COMPATIBLE:
- break;
- case TosaVersion::compat_t::PARTIALLY_COMPATIBLE:
- printf("WARNING: Schema flatbuffer version %s is partially compatible with serializer version %s\n",
- schema_version.to_string().c_str(), GetVersion().to_string().c_str());
- break;
- case TosaVersion::compat_t::NOT_COMPATIBLE:
- printf("ERROR: Schema flatbuffer version %s is not compatible with serializer version %s\n",
- schema_version.to_string().c_str(), GetVersion().to_string().c_str());
- return TOSA_VERSION_MISMATCH;
- }
-
if (!ok)
{
printf("Error parsing ISA schema file: %s\n", schema_filename);
@@ -308,7 +292,7 @@ tosa_err_t TosaSerializationHandler::SaveFileJson(const char* filename)
uint8_t* buf = _builder.GetBufferPointer();
- if (!GenerateText(_parser, buf, &jsongen))
+ if (GenText(_parser, buf, &jsongen))
{
printf("Couldn't serialize parsed data to JSON!\n");
return TOSA_FILE_ERROR;
@@ -399,11 +383,11 @@ tosa_err_t TosaSerializationHandler::SaveFileTosaFlatbuffer(const char* filename
tosa_err_t TosaSerializationHandler::Clear()
{
// deallocate all basic blocks
- for (auto bb : GetBlocks())
+ for (auto region : GetRegions())
{
- delete bb;
+ delete region;
}
- _blocks.clear();
+ _regions.clear();
return TOSA_OK;
}
@@ -416,20 +400,13 @@ tosa_err_t TosaSerializationHandler::Deserialize(const uint8_t* buf)
}
auto fb_tosa_graph = GetTosaGraph(buf);
auto fb_tosa_version = fb_tosa_graph->version();
- auto fb_tosa_blocks = fb_tosa_graph->blocks();
-
- std::vector<std::string> operator_inputs_container;
- std::vector<std::string> operator_outputs_container;
-
- std::vector<TosaSerializationOperator*> block_operators_container;
- std::vector<TosaSerializationTensor*> block_tensors_container;
- std::vector<std::string> block_inputs_container;
- std::vector<std::string> block_outputs_container;
+ auto fb_tosa_regions = fb_tosa_graph->regions();
TosaAttributeBase* typed_attribute = NULL;
TosaSerializationOperator* new_operator = NULL;
TosaSerializationBasicBlock* new_block = NULL;
TosaSerializationTensor* new_tensor = NULL;
+ TosaSerializationRegion* new_region = NULL;
// erase container
Clear();
@@ -437,226 +414,241 @@ tosa_err_t TosaSerializationHandler::Deserialize(const uint8_t* buf)
TosaVersion read_version(fb_tosa_version->_major(), fb_tosa_version->_minor(), fb_tosa_version->_patch(),
fb_tosa_version->_draft());
- TosaVersion::compat_t is_compat = read_version.is_compatible(GetVersion());
+ TosaVersion::compat_t is_compat = TosaVersion::is_compatible(read_version, GetVersion());
switch (is_compat)
{
case TosaVersion::compat_t::COMPLETELY_COMPATIBLE:
break;
- case TosaVersion::compat_t::PARTIALLY_COMPATIBLE:
- printf("WARNING: Read flatbuffer version %s is partially compatible with serializer version %s\n",
+ case TosaVersion::compat_t::BACKWARD_COMPATIBLE:
+ printf("WARNING: Different Tosa flatbuffer and serializer versions detected. Read Tosa flatbuffer version "
+ "%s is backward "
+ "compatible with serializer version %s\n",
read_version.to_string().c_str(), GetVersion().to_string().c_str());
break;
case TosaVersion::compat_t::NOT_COMPATIBLE:
- printf("ERROR: Read flatbuffer version %s is not compatible with serializer version %s\n",
+ printf("ERROR: Read Tosa flatbuffer version %s is not compatible with serializer version %s\n",
read_version.to_string().c_str(), GetVersion().to_string().c_str());
return TOSA_VERSION_MISMATCH;
}
- for (size_t i = 0; i < fb_tosa_blocks->size(); i++)
+ for (size_t i = 0; i < fb_tosa_regions->size(); i++)
{
- auto curr_block = fb_tosa_blocks->Get(i);
+ auto curr_region = fb_tosa_regions->Get(i);
+ auto region_name = curr_region->name()->str();
+ auto fb_tosa_blocks = curr_region->blocks();
- auto block_name = curr_block->name()->str();
+ new_region = new TosaSerializationRegion(curr_region->name()->str(), {});
+ this->GetRegions().push_back(new_region);
- auto fb_tosa_operators = curr_block->operators();
- block_operators_container.clear();
- for (size_t j = 0; j < fb_tosa_operators->size(); j++)
+ for (size_t i = 0; i < fb_tosa_blocks->size(); i++)
{
- auto curr_operator = fb_tosa_operators->Get(j);
+ std::vector<TosaSerializationOperator*> block_operators_container;
+ std::vector<TosaSerializationTensor*> block_tensors_container;
+ std::vector<std::string> block_inputs_container;
+ std::vector<std::string> block_outputs_container;
- auto operator_op = curr_operator->op();
- auto attribute_type = curr_operator->attribute_type();
- auto attribute = curr_operator->attribute();
+ auto curr_block = fb_tosa_blocks->Get(i);
- // input tensors
- auto operator_inputs = curr_operator->inputs();
- operator_inputs_container.clear();
- if (operator_inputs)
+ auto block_name = curr_block->name()->str();
+
+ auto fb_tosa_operators = curr_block->operators();
+ for (size_t j = 0; j < fb_tosa_operators->size(); j++)
{
- for (size_t k = 0; k < operator_inputs->size(); k++)
+ auto curr_operator = fb_tosa_operators->Get(j);
+
+ auto operator_op = curr_operator->op();
+ auto attribute_type = curr_operator->attribute_type();
+ auto attribute = curr_operator->attribute();
+
+ std::vector<std::string> operator_inputs_container;
+ std::vector<std::string> operator_outputs_container;
+
+ // input tensors
+ auto operator_inputs = curr_operator->inputs();
+ if (operator_inputs)
{
- auto curr_input = operator_inputs->Get(k);
- operator_inputs_container.push_back(curr_input->str());
+ for (size_t k = 0; k < operator_inputs->size(); k++)
+ {
+ auto curr_input = operator_inputs->Get(k);
+ operator_inputs_container.push_back(curr_input->str());
+ }
}
- }
- // output tensors
- auto operator_outputs = curr_operator->outputs();
- operator_outputs_container.clear();
- if (operator_outputs)
- {
- for (size_t k = 0; k < operator_outputs->size(); k++)
+ // output tensors
+ auto operator_outputs = curr_operator->outputs();
+ if (operator_outputs)
{
- auto curr_output = operator_outputs->Get(k);
- operator_outputs_container.push_back(curr_output->str());
+ for (size_t k = 0; k < operator_outputs->size(); k++)
+ {
+ auto curr_output = operator_outputs->Get(k);
+ operator_outputs_container.push_back(curr_output->str());
+ }
}
- }
- switch (attribute_type)
- {
- case Attribute_NONE:
- typed_attribute = new TosaNoneAttribute();
- break;
+ switch (attribute_type)
+ {
+ case Attribute_NONE:
+ typed_attribute = new TosaNoneAttribute();
+ break;
#define DEF_ATTRIBUTE(NAME, ...) \
case Attribute_##NAME##Attribute: \
typed_attribute = new Tosa##NAME##Attribute(attribute); \
break;
#include "attribute.def"
#undef DEF_ATTRIBUTE
- default:
- printf("TosaSerializationHandler::Deserialize(): Attribute %s not implemented yet\n",
- EnumNamesAttribute()[attribute_type]);
- return TOSA_INTERNAL_ERROR;
+ default:
+ printf("TosaSerializationHandler::Deserialize(): Attribute %s not implemented yet\n",
+ EnumNamesAttribute()[attribute_type]);
+ return TOSA_INTERNAL_ERROR;
+ }
+
+ new_operator = new TosaSerializationOperator(operator_op, attribute_type, typed_attribute,
+ operator_inputs_container, operator_outputs_container);
+ if (new_operator)
+ {
+ block_operators_container.push_back(new_operator);
+ }
+ else
+ {
+ return TOSA_MEMORY_ERROR;
+ }
+
+ if (typed_attribute)
+ delete typed_attribute;
}
- new_operator = new TosaSerializationOperator(operator_op, attribute_type, typed_attribute,
- operator_inputs_container, operator_outputs_container);
- if (new_operator)
+ auto block_inputs = curr_block->inputs();
+ auto block_outputs = curr_block->outputs();
+
+ for (size_t j = 0; j < block_inputs->size(); j++)
{
- block_operators_container.push_back(new_operator);
+ auto curr_block_input = block_inputs->Get(j);
+ block_inputs_container.push_back(curr_block_input->str());
}
- else
+ for (size_t j = 0; j < block_outputs->size(); j++)
{
- return TOSA_MEMORY_ERROR;
+ auto curr_block_output = block_outputs->Get(j);
+ block_outputs_container.push_back(curr_block_output->str());
}
- if (typed_attribute)
- delete typed_attribute;
- }
-
- auto fb_tosa_tensors = curr_block->tensors();
- block_tensors_container.clear();
- for (size_t j = 0; j < fb_tosa_tensors->size(); j++)
- {
- auto curr_tensor = fb_tosa_tensors->Get(j);
-
- auto tensor_name = curr_tensor->name();
- auto tensor_shape = curr_tensor->shape();
- auto tensor_type = curr_tensor->type();
- auto tensor_data = curr_tensor->data();
-
- new_tensor = new TosaSerializationTensor(tensor_name, tensor_shape, tensor_type, tensor_data);
- if (new_tensor)
+ auto fb_tosa_tensors = curr_block->tensors();
+ for (size_t j = 0; j < fb_tosa_tensors->size(); j++)
+ {
+ auto curr_tensor = fb_tosa_tensors->Get(j);
+
+ auto tensor_name = curr_tensor->name();
+ auto tensor_shape = curr_tensor->shape();
+ auto tensor_type = curr_tensor->type();
+ auto tensor_variable = curr_tensor->variable();
+ auto tensor_data = curr_tensor->data();
+ auto tensor_is_unranked = curr_tensor->is_unranked();
+ auto tensor_variable_name = curr_tensor->variable_name();
+
+ new_tensor = new TosaSerializationTensor(tensor_name, tensor_shape, tensor_type, tensor_data,
+ tensor_variable, tensor_is_unranked, tensor_variable_name);
+ if (new_tensor)
+ {
+ block_tensors_container.push_back(new_tensor);
+ }
+ else
+ {
+ return TOSA_MEMORY_ERROR;
+ }
+ }
+ new_block = new TosaSerializationBasicBlock(block_name, region_name, block_operators_container,
+ block_tensors_container, block_inputs_container,
+ block_outputs_container);
+ if (new_block)
{
- block_tensors_container.push_back(new_tensor);
+ new_region->GetBlocks().push_back(new_block);
}
else
{
return TOSA_MEMORY_ERROR;
}
- }
-
- auto block_inputs = curr_block->inputs();
- auto block_outputs = curr_block->outputs();
-
- block_inputs_container.clear();
- block_outputs_container.clear();
-
- for (size_t j = 0; j < block_inputs->size(); j++)
- {
- auto curr_block_input = block_inputs->Get(j);
- block_inputs_container.push_back(curr_block_input->str());
- }
- for (size_t j = 0; j < block_outputs->size(); j++)
- {
- auto curr_block_output = block_outputs->Get(j);
- block_outputs_container.push_back(curr_block_output->str());
- }
-
- new_block = new TosaSerializationBasicBlock(block_name, block_operators_container, block_tensors_container,
- block_inputs_container, block_outputs_container);
- if (new_block)
- {
- this->GetBlocks().push_back(new_block);
- }
- else
- {
- return TOSA_MEMORY_ERROR;
- }
+ } // end block for_loop
}
return TOSA_OK;
}
-tosa_err_t TosaSerializationHandler::Serialize()
+std::vector<uint8_t> float_to_u8_helper(float f_in)
{
- std::vector<flatbuffers::Offset<TosaBasicBlock>> fboffset_blocks;
-
- std::vector<flatbuffers::Offset<TosaOperator>> fboffset_block_operators;
- std::vector<flatbuffers::Offset<TosaTensor>> fboffset_block_tensors;
- std::vector<flatbuffers::Offset<flatbuffers::String>> fboffset_block_inputs;
- std::vector<flatbuffers::Offset<flatbuffers::String>> fboffset_block_outputs;
+ // Push back a single float value to the buffer with *NO PADDING*
+ // Therefore ConvertF32toU8 function not used
+ std::vector<uint8_t> u8_out;
+ uint32_t* val_u32 = reinterpret_cast<uint32_t*>(&f_in);
+ u8_out.push_back(*val_u32 & 0xFF);
+ u8_out.push_back((*val_u32 >> 8) & 0xFF);
+ u8_out.push_back((*val_u32 >> 16) & 0xFF);
+ u8_out.push_back((*val_u32 >> 24) & 0xFF);
+ return u8_out;
+}
- std::vector<flatbuffers::Offset<flatbuffers::String>> fboffset_operator_inputs;
- std::vector<flatbuffers::Offset<flatbuffers::String>> fboffset_operator_outputs;
+tosa_err_t TosaSerializationHandler::Serialize()
+{
+ // regions
+ std::vector<flatbuffers::Offset<TosaRegion>> fboffset_regions;
// translate TosaFlatbufferOperator to flatbuffers::Offset<TosaOperator>
- for (auto block : GetBlocks())
+ for (auto region : GetRegions())
{
- fboffset_block_operators.clear();
- fboffset_block_tensors.clear();
- fboffset_block_inputs.clear();
- fboffset_block_outputs.clear();
-
- auto block_name = _builder.CreateString(block->GetName().c_str());
-
- for (auto tensor_str : block->GetInputs())
- {
- auto tensor_name = _builder.CreateString(tensor_str.c_str());
- fboffset_block_inputs.push_back(tensor_name);
- }
-
- for (auto tensor_str : block->GetOutputs())
- {
- auto tensor_name = _builder.CreateString(tensor_str.c_str());
- fboffset_block_outputs.push_back(tensor_name);
- }
-
- auto fb_block_inputs = _builder.CreateVector(fboffset_block_inputs);
- auto fb_block_outputs = _builder.CreateVector(fboffset_block_outputs);
-
- for (auto op : block->GetOperators())
+ std::vector<flatbuffers::Offset<TosaBasicBlock>> fboffset_blocks;
+ for (auto block : region->GetBlocks())
{
- fboffset_operator_inputs.clear();
- fboffset_operator_outputs.clear();
-
- auto operator_op = op->GetOp();
- auto attribute_type = op->GetAttributeType();
-
- for (auto tensor_str : op->GetInputTensorNames())
+ std::vector<flatbuffers::Offset<TosaOperator>> fboffset_block_operators;
+ std::vector<flatbuffers::Offset<TosaTensor>> fboffset_block_tensors;
+ std::vector<flatbuffers::Offset<flatbuffers::String>> fboffset_block_inputs;
+ std::vector<flatbuffers::Offset<flatbuffers::String>> fboffset_block_outputs;
+ auto block_name = _builder.CreateString(block->GetName().c_str());
+ for (auto tensor_str : block->GetInputs())
{
auto tensor_name = _builder.CreateString(tensor_str.c_str());
- fboffset_operator_inputs.push_back(tensor_name);
+ fboffset_block_inputs.push_back(tensor_name);
}
-
- for (auto tensor_str : op->GetOutputTensorNames())
+ for (auto tensor_str : block->GetOutputs())
{
auto tensor_name = _builder.CreateString(tensor_str.c_str());
- fboffset_operator_outputs.push_back(tensor_name);
+ fboffset_block_outputs.push_back(tensor_name);
}
-
- auto fb_operator_inputs = _builder.CreateVector(fboffset_operator_inputs);
- auto fb_operator_outputs = _builder.CreateVector(fboffset_operator_outputs);
-
- flatbuffers::Offset<void> fb_attribute;
- switch (attribute_type)
+ auto fb_block_inputs = _builder.CreateVector(fboffset_block_inputs);
+ auto fb_block_outputs = _builder.CreateVector(fboffset_block_outputs);
+ for (auto op : block->GetOperators())
{
- case Attribute_NONE:
- fb_attribute = 0;
- break;
-
+ std::vector<flatbuffers::Offset<flatbuffers::String>> fboffset_operator_inputs;
+ std::vector<flatbuffers::Offset<flatbuffers::String>> fboffset_operator_outputs;
+ auto operator_op = op->GetOp();
+ auto attribute_type = op->GetAttributeType();
+ for (auto tensor_str : op->GetInputTensorNames())
+ {
+ auto tensor_name = _builder.CreateString(tensor_str.c_str());
+ fboffset_operator_inputs.push_back(tensor_name);
+ }
+ for (auto tensor_str : op->GetOutputTensorNames())
+ {
+ auto tensor_name = _builder.CreateString(tensor_str.c_str());
+ fboffset_operator_outputs.push_back(tensor_name);
+ }
+ auto fb_operator_inputs = _builder.CreateVector(fboffset_operator_inputs);
+ auto fb_operator_outputs = _builder.CreateVector(fboffset_operator_outputs);
+ flatbuffers::Offset<void> fb_attribute;
+ switch (attribute_type)
+ {
+ case Attribute_NONE:
+ fb_attribute = 0;
+ break;
#define DEF_ARGS_S_STR(NAME, V) , _builder.CreateString(reinterpret_cast<Tosa##NAME*>(op->GetAttribute())->V().c_str())
+#define DEF_ARGS_S_FP_as_U8(NAME, V) \
+ , _builder.CreateVector<uint8_t>(float_to_u8_helper(reinterpret_cast<Tosa##NAME*>(op->GetAttribute())->V()))
#define DEF_ARGS_S_DEFAULT(NAME, V) , reinterpret_cast<Tosa##NAME*>(op->GetAttribute())->V()
-
#define DEF_ARGS_S_int32_t(NAME, V) DEF_ARGS_S_DEFAULT(NAME, V)
-#define DEF_ARGS_S_float(NAME, V) DEF_ARGS_S_DEFAULT(NAME, V)
+#define DEF_ARGS_S_float(NAME, V) DEF_ARGS_S_FP_as_U8(NAME, V)
#define DEF_ARGS_S_bool(NAME, V) DEF_ARGS_S_DEFAULT(NAME, V)
#define DEF_ARGS_S_ResizeMode(NAME, V) DEF_ARGS_S_DEFAULT(NAME, V)
+#define DEF_ARGS_S_DType(NAME, V) DEF_ARGS_S_DEFAULT(NAME, V)
#define DEF_ARGS_S_string(NAME, V) DEF_ARGS_S_STR(NAME, V)
-
#define DEF_ARGS_S(NAME, T, V) DEF_ARGS_S_##T(NAME, V)
#define DEF_ARGS_V(NAME, T, V) , _builder.CreateVector<T>(reinterpret_cast<Tosa##NAME*>(op->GetAttribute())->V())
-
#define DEF_ARGS_1(NAME, T0, F0, V0) DEF_ARGS_##F0(NAME, T0, V0)
#define DEF_ARGS_2(NAME, T0, F0, V0, T1, F1, V1) DEF_ARGS_##F0(NAME, T0, V0) DEF_ARGS_##F1(NAME, T1, V1)
#define DEF_ARGS_3(NAME, T0, F0, V0, T1, F1, V1, T2, F2, V2) \
@@ -672,11 +664,20 @@ tosa_err_t TosaSerializationHandler::Serialize()
#define DEF_ARGS_7(NAME, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4, T5, F5, V5, T6, F6, V6) \
DEF_ARGS_##F0(NAME, T0, V0) DEF_ARGS_##F1(NAME, T1, V1) DEF_ARGS_##F2(NAME, T2, V2) DEF_ARGS_##F3(NAME, T3, V3) \
DEF_ARGS_##F4(NAME, T4, V4) DEF_ARGS_##F5(NAME, T5, V5) DEF_ARGS_##F6(NAME, T6, V6)
+#define DEF_ARGS_8(NAME, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4, T5, F5, V5, T6, F6, V6, T7, F7, \
+ V7) \
+ DEF_ARGS_##F0(NAME, T0, V0) DEF_ARGS_##F1(NAME, T1, V1) DEF_ARGS_##F2(NAME, T2, V2) DEF_ARGS_##F3(NAME, T3, V3) \
+ DEF_ARGS_##F4(NAME, T4, V4) DEF_ARGS_##F5(NAME, T5, V5) DEF_ARGS_##F6(NAME, T6, V6) \
+ DEF_ARGS_##F7(NAME, T7, V7)
+#define DEF_ARGS_9(NAME, T0, F0, V0, T1, F1, V1, T2, F2, V2, T3, F3, V3, T4, F4, V4, T5, F5, V5, T6, F6, V6, T7, F7, \
+ V7, T8, F8, V8) \
+ DEF_ARGS_##F0(NAME, T0, V0) DEF_ARGS_##F1(NAME, T1, V1) DEF_ARGS_##F2(NAME, T2, V2) DEF_ARGS_##F3(NAME, T3, V3) \
+ DEF_ARGS_##F4(NAME, T4, V4) DEF_ARGS_##F5(NAME, T5, V5) DEF_ARGS_##F6(NAME, T6, V6) \
+ DEF_ARGS_##F7(NAME, T7, V7) DEF_ARGS_##F8(NAME, T8, V8)
#define DEF_ATTRIBUTE(NAME, NUM_ARGS, ...) \
case Attribute_##NAME##Attribute: \
fb_attribute = Create##NAME##Attribute(_builder DEF_ARGS_##NUM_ARGS(NAME##Attribute, __VA_ARGS__)).Union(); \
break;
-
#include "attribute.def"
#undef DEF_ATTRIBUTE
#undef DEF_ARGS_1
@@ -692,53 +693,56 @@ tosa_err_t TosaSerializationHandler::Serialize()
#undef DEF_ARGS_S_float
#undef DEF_ARGS_S_bool
#undef DEF_ARGS_S_ResizeMode
+#undef DEF_ARGS_S_DType
#undef DEF_ARGS_S_string
#undef DEF_ARGS_S_STR
#undef DEF_ARGS_S_DEFAULT
- default:
- printf("TosaSerializationHandler::Serialize(): Attribute %s not implemented yet\n",
- EnumNamesAttribute()[attribute_type]);
- return TOSA_INTERNAL_ERROR;
+ default:
+ printf("TosaSerializationHandler::Serialize(): Attribute %s not implemented yet\n",
+ EnumNamesAttribute()[attribute_type]);
+ return TOSA_INTERNAL_ERROR;
+ }
+ auto fboffset_operator = CreateTosaOperator(_builder, operator_op, attribute_type, fb_attribute,
+ fb_operator_inputs, fb_operator_outputs);
+ fboffset_block_operators.push_back(fboffset_operator);
}
+ auto fb_block_operators = _builder.CreateVector(fboffset_block_operators);
+ for (auto tensor : block->GetTensors())
+ {
+ auto tensor_name = _builder.CreateString(tensor->GetName().c_str());
+ auto tensor_shape = _builder.CreateVector(tensor->GetShape());
+ auto tensor_dtype = tensor->GetDtype();
+ bool tensor_variable = tensor->GetVariable();
+ auto tensor_data = _builder.CreateVector(tensor->GetData());
+ auto tensor_is_unranked = tensor->GetIsUnranked();
+ auto tensor_variable_name = _builder.CreateString(tensor->GetVariableName().c_str());
+ auto fboffset_tensor = CreateTosaTensor(_builder, tensor_name, tensor_shape, tensor_dtype, tensor_data,
+ tensor_variable, tensor_is_unranked, tensor_variable_name);
+ fboffset_block_tensors.push_back(fboffset_tensor);
+ }
+ auto fb_block_tensors = _builder.CreateVector(fboffset_block_tensors);
+ auto fboffset_block = CreateTosaBasicBlock(_builder, block_name, fb_block_operators, fb_block_tensors,
+ fb_block_inputs, fb_block_outputs);
+ fboffset_blocks.push_back(fboffset_block);
+ } // end block for_loop
+ auto fb_blocks = _builder.CreateVector(fboffset_blocks);
- auto fboffset_operator = CreateTosaOperator(_builder, operator_op, attribute_type, fb_attribute,
- fb_operator_inputs, fb_operator_outputs);
- fboffset_block_operators.push_back(fboffset_operator);
- }
-
- auto fb_block_operators = _builder.CreateVector(fboffset_block_operators);
-
- for (auto tensor : block->GetTensors())
- {
-
- auto tensor_name = _builder.CreateString(tensor->GetName().c_str());
- auto tensor_shape = _builder.CreateVector(tensor->GetShape());
- auto tensor_dtype = tensor->GetDtype();
- auto tensor_data = _builder.CreateVector(tensor->GetData());
-
- auto fboffset_tensor = CreateTosaTensor(_builder, tensor_name, tensor_shape, tensor_dtype, tensor_data);
- fboffset_block_tensors.push_back(fboffset_tensor);
- }
-
- auto fb_block_tensors = _builder.CreateVector(fboffset_block_tensors);
-
- auto fboffset_block = CreateTosaBasicBlock(_builder, block_name, fb_block_operators, fb_block_tensors,
- fb_block_inputs, fb_block_outputs);
- fboffset_blocks.push_back(fboffset_block);
- }
+ auto region_name = _builder.CreateString(region->GetName().c_str());
+ auto fboffset_region = CreateTosaRegion(_builder, region_name, fb_blocks);
+ fboffset_regions.push_back(fboffset_region);
+ } // end region for_loop
- auto fb_blocks = _builder.CreateVector(fboffset_blocks);
+ auto fb_regions = _builder.CreateVector(fboffset_regions);
auto fb_version =
CreateVersion(_builder, TOSA_VERSION_MAJOR, TOSA_VERSION_MINOR, TOSA_VERSION_PATCH, TOSA_VERSION_DRAFT);
-
- auto fb_graph = CreateTosaGraph(_builder, fb_version, fb_blocks);
+ auto fb_graph = CreateTosaGraph(_builder, fb_version, fb_regions);
_builder.Finish(fb_graph, TosaGraphIdentifier());
return TOSA_OK;
}
-void zero_pad(std::vector<uint8_t>& buf)
+void TosaSerializationHandler::ForceAlignTensorData(std::vector<uint8_t>& buf)
{
while ((buf.size() % TENSOR_BUFFER_FORCE_ALIGNMENT) != 0)
{
@@ -746,6 +750,66 @@ void zero_pad(std::vector<uint8_t>& buf)
}
}
+tosa_err_t TosaSerializationHandler::ConvertBF16toU8(const std::vector<float>& in, std::vector<uint8_t>& out)
+{
+ // Note: Converts fp32->bf16 by ignoring the least significant 16 bits
+ out.clear();
+ for (auto val : in)
+ {
+ uint32_t* val_u32 = reinterpret_cast<uint32_t*>(&val);
+ uint8_t f32_byte2 = (*val_u32 >> 16) & 0xFF;
+ uint8_t f32_byte3 = (*val_u32 >> 24) & 0xFF;
+ // little endian: byte2 followed by byte3
+ out.push_back(f32_byte2);
+ out.push_back(f32_byte3);
+ }
+ ForceAlignTensorData(out);
+ return TOSA_OK;
+}
+
+tosa_err_t TosaSerializationHandler::ConvertFP8E4M3toU8(const std::vector<float>& in, std::vector<uint8_t>& out)
+{
+ // Note: Converts fp32->FP8E4M3 before converting to unint8_t
+ out.clear();
+ for (auto val : in)
+ {
+ auto f8 = static_cast<fp8e4m3>(val);
+ uint8_t b8 = f8.bits();
+ out.push_back(b8);
+ }
+ ForceAlignTensorData(out);
+ return TOSA_OK;
+}
+
+tosa_err_t TosaSerializationHandler::ConvertFP8E5M2toU8(const std::vector<float>& in, std::vector<uint8_t>& out)
+{
+ // Note: Converts fp32->FP8E5M2 before converting to uint8_t
+ out.clear();
+ for (auto val : in)
+ {
+ auto f8 = static_cast<fp8e5m2>(val);
+ uint8_t b8 = f8.bits();
+ out.push_back(b8);
+ }
+ ForceAlignTensorData(out);
+ return TOSA_OK;
+}
+
+tosa_err_t TosaSerializationHandler::ConvertF16toU8(const std::vector<float>& in, std::vector<uint8_t>& out)
+{
+ // Note: Converts fp32->fp16 before converting to uint8_t
+ out.clear();
+ for (auto val : in)
+ {
+ half_float::half val_f16 = half_float::half_cast<half_float::half, float>(val);
+ uint16_t* val_u16 = reinterpret_cast<uint16_t*>(&val_f16);
+ out.push_back(*val_u16 & 0xFF);
+ out.push_back((*val_u16 >> 8) & 0xFF);
+ }
+ ForceAlignTensorData(out);
+ return TOSA_OK;
+}
+
tosa_err_t TosaSerializationHandler::ConvertF32toU8(const std::vector<float>& in, std::vector<uint8_t>& out)
{
out.clear();
@@ -757,7 +821,26 @@ tosa_err_t TosaSerializationHandler::ConvertF32toU8(const std::vector<float>& in
out.push_back((*val_u32 >> 16) & 0xFF);
out.push_back((*val_u32 >> 24) & 0xFF);
}
- zero_pad(out);
+ ForceAlignTensorData(out);
+ return TOSA_OK;
+}
+
+tosa_err_t TosaSerializationHandler::ConvertI64toU8(const std::vector<int64_t>& in, std::vector<uint8_t>& out)
+{
+ out.clear();
+ for (auto val : in)
+ {
+ uint64_t* val_u64 = reinterpret_cast<uint64_t*>(&val);
+ out.push_back(*val_u64 & 0xFF);
+ out.push_back((*val_u64 >> 8) & 0xFF);
+ out.push_back((*val_u64 >> 16) & 0xFF);
+ out.push_back((*val_u64 >> 24) & 0xFF);
+ out.push_back((*val_u64 >> 32) & 0xFF);
+ out.push_back((*val_u64 >> 40) & 0xFF);
+ out.push_back((*val_u64 >> 48) & 0xFF);
+ out.push_back((*val_u64 >> 56) & 0xFF);
+ }
+ ForceAlignTensorData(out);
return TOSA_OK;
}
@@ -774,7 +857,7 @@ tosa_err_t TosaSerializationHandler::ConvertI48toU8(const std::vector<int64_t>&
out.push_back((*val_u64 >> 32) & 0xFF);
out.push_back((*val_u64 >> 40) & 0xFF);
}
- zero_pad(out);
+ ForceAlignTensorData(out);
return TOSA_OK;
}
@@ -789,7 +872,7 @@ tosa_err_t TosaSerializationHandler::ConvertI32toU8(const std::vector<int32_t>&
out.push_back((*val_u32 >> 16) & 0xFF);
out.push_back((*val_u32 >> 24) & 0xFF);
}
- zero_pad(out);
+ ForceAlignTensorData(out);
return TOSA_OK;
}
@@ -802,7 +885,7 @@ tosa_err_t TosaSerializationHandler::ConvertI16toU8(const std::vector<int16_t>&
out.push_back(*val_u16 & 0xFF);
out.push_back((*val_u16 >> 8) & 0xFF);
}
- zero_pad(out);
+ ForceAlignTensorData(out);
return TOSA_OK;
}
@@ -814,7 +897,7 @@ tosa_err_t TosaSerializationHandler::ConvertI8toU8(const std::vector<int8_t>& in
uint8_t* val_u8 = reinterpret_cast<uint8_t*>(&val);
out.push_back(*val_u8);
}
- zero_pad(out);
+ ForceAlignTensorData(out);
return TOSA_OK;
}
@@ -845,7 +928,7 @@ tosa_err_t TosaSerializationHandler::ConvertI4toU8(const std::vector<int8_t>& in
uint8_t val_u8 = static_cast<uint8_t>(val_packed);
out.push_back(val_u8);
}
- zero_pad(out);
+ ForceAlignTensorData(out);
return TOSA_OK;
}
@@ -857,7 +940,105 @@ tosa_err_t TosaSerializationHandler::ConvertBooltoU8(const std::vector<bool>& in
uint8_t val_u8 = val;
out.push_back(val_u8);
}
- zero_pad(out);
+ ForceAlignTensorData(out);
+ return TOSA_OK;
+}
+
+tosa_err_t TosaSerializationHandler::ConvertU8toBF16(const std::vector<uint8_t>& in,
+ uint32_t out_size,
+ std::vector<float>& out)
+{
+ // Note: bf16 values returned in fp32 type
+ out.clear();
+ if (in.size() < out_size * sizeof(int16_t))
+ {
+ printf("TosaSerializationHandler::ConvertU8toBF16(): uint8 buffer size %ld must >= target size %ld\n",
+ in.size(), out_size * sizeof(int16_t));
+ return TOSA_USER_ERROR;
+ }
+
+ for (uint32_t i = 0; i < out_size; i++)
+ {
+ uint32_t f32_byte2 = in[i * sizeof(int16_t)];
+ uint32_t f32_byte3 = in[i * sizeof(int16_t) + 1];
+ uint32_t val_u32 = (f32_byte2 << 16) + (f32_byte3 << 24);
+
+ // Reinterpret u32 bytes as fp32
+ float val_f32 = *(float*)&val_u32;
+ out.push_back(val_f32);
+ }
+ return TOSA_OK;
+}
+
+tosa_err_t TosaSerializationHandler::ConvertU8toFP8E4M3(const std::vector<uint8_t>& in,
+ uint32_t out_size,
+ std::vector<float>& out)
+{
+ // Note: FP8E4M3 values returned in fp32 type
+ out.clear();
+ if (in.size() < out_size * sizeof(int8_t))
+ {
+ printf("TosaSerializationHandler::ConvertU8toF16(): uint8 buffer size %ld must >= target size %ld\n", in.size(),
+ out_size * sizeof(int8_t));
+ return TOSA_USER_ERROR;
+ }
+
+ for (uint32_t i = 0; i < out_size; i++)
+ {
+ int8_t bits = static_cast<int8_t>(in[i * sizeof(int8_t)]);
+ auto f8 = fp8e4m3::from_bits(bits);
+ float val_f32 = static_cast<float>(f8);
+ out.push_back(val_f32);
+ }
+ return TOSA_OK;
+}
+
+tosa_err_t TosaSerializationHandler::ConvertU8toFP8E5M2(const std::vector<uint8_t>& in,
+ uint32_t out_size,
+ std::vector<float>& out)
+{
+ // Note: FP8E5M2 values returned in fp32 type
+ out.clear();
+ if (in.size() < out_size * sizeof(int8_t))
+ {
+ printf("TosaSerializationHandler::ConvertU8toF16(): uint8 buffer size %ld must >= target size %ld\n", in.size(),
+ out_size * sizeof(int8_t));
+ return TOSA_USER_ERROR;
+ }
+
+ for (uint32_t i = 0; i < out_size; i++)
+ {
+ int8_t bits = static_cast<int8_t>(in[i * sizeof(int8_t)]);
+ auto f8 = fp8e5m2::from_bits(bits);
+ float val_f32 = static_cast<float>(f8);
+ out.push_back(val_f32);
+ }
+ return TOSA_OK;
+}
+
+tosa_err_t TosaSerializationHandler::ConvertU8toF16(const std::vector<uint8_t>& in,
+ uint32_t out_size,
+ std::vector<half_float::half>& out)
+{
+ // Note: fp16 values returned in fp32 type
+ out.clear();
+ if (in.size() < out_size * sizeof(int16_t))
+ {
+ printf("TosaSerializationHandler::ConvertU8toF16(): uint8 buffer size %ld must >= target size %ld\n", in.size(),
+ out_size * sizeof(int16_t));
+ return TOSA_USER_ERROR;
+ }
+
+ for (uint32_t i = 0; i < out_size; i++)
+ {
+ uint16_t f16_byte0 = in[i * sizeof(int16_t)];
+ uint16_t f16_byte1 = in[i * sizeof(int16_t) + 1];
+ uint16_t val_u16 = f16_byte0 + (f16_byte1 << 8);
+
+ // Reinterpret u16 byte as fp16 then convert to fp32
+ half_float::half val_f16 = *(half_float::half*)&val_u16;
+ out.push_back(val_f16);
+ }
return TOSA_OK;
}
@@ -884,6 +1065,35 @@ tosa_err_t
return TOSA_OK;
}
+tosa_err_t TosaSerializationHandler::ConvertU8toI64(const std::vector<uint8_t>& in,
+ uint32_t out_size,
+ std::vector<int64_t>& out)
+{
+ out.clear();
+ if (in.size() < out_size * sizeof(int64_t))
+ {
+ printf("TosaSerializationHandler::ConvertU8toI64(): uint8 buffer size %ld must >= target size %ld\n", in.size(),
+ out_size * sizeof(int64_t));
+ return TOSA_USER_ERROR;
+ }
+ for (uint32_t i = 0; i < out_size; i++)
+ {
+ uint64_t byte0 = in[i * sizeof(int64_t)];
+ uint64_t byte1 = in[i * sizeof(int64_t) + 1];
+ uint64_t byte2 = in[i * sizeof(int64_t) + 2];
+ uint64_t byte3 = in[i * sizeof(int64_t) + 3];
+ uint64_t byte4 = in[i * sizeof(int64_t) + 4];
+ uint64_t byte5 = in[i * sizeof(int64_t) + 5];
+ uint64_t byte6 = in[i * sizeof(int64_t) + 6];
+ uint64_t byte7 = in[i * sizeof(int64_t) + 7];
+ uint64_t val_u64 = byte0 + (byte1 << 8) + (byte2 << 16) + (byte3 << 24) + (byte4 << 32) + (byte5 << 40) +
+ (byte6 << 48) + (byte7 << 56);
+ int64_t* val_i64 = reinterpret_cast<int64_t*>(&val_u64);
+ out.push_back(*val_i64);
+ }
+ return TOSA_OK;
+}
+
tosa_err_t TosaSerializationHandler::ConvertU8toI48(const std::vector<uint8_t>& in,
uint32_t out_size,
std::vector<int64_t>& out)
diff --git a/test/scripts/test_npy_fileio.py b/test/scripts/test_npy_fileio.py
index e0a6f5d..272c124 100755
--- a/test/scripts/test_npy_fileio.py
+++ b/test/scripts/test_npy_fileio.py
@@ -122,7 +122,7 @@ def main():
xunit_suite = xunit_result.create_suite("basic_serialization")
max_size = 128
- datatypes = ["int32", "int64", "float", "bool"]
+ datatypes = ["int32", "int64", "float", "bool", "double"]
random.seed(args.seed)
failed = 0
diff --git a/test/scripts/test_serialization.py b/test/scripts/test_serialization.py
deleted file mode 100755
index 834bc1d..0000000
--- a/test/scripts/test_serialization.py
+++ /dev/null
@@ -1,197 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) 2021, ARM Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-""" Simple test script which uses serialization_read_write to copy tosa files. It
-uses flatc to convert to json for comparison since the binary files may
-differ. """
-
-import argparse
-import filecmp
-import random
-import shlex
-import subprocess
-from datetime import datetime
-from enum import IntEnum, unique
-from pathlib import Path
-from xunit.xunit import xunit_results, xunit_test
-
-
-@unique
-class TestResult(IntEnum):
- PASS = 0
- COMMAND_ERROR = 1
- MISMATCH = 2
- SKIPPED = 3
-
-
-def parseArgs():
- baseDir = (Path(__file__).parent / "../..").resolve()
- buildDir = (baseDir / "build").resolve()
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "-t",
- "--testdir",
- dest="test",
- type=str,
- required=True,
- help="Directory of tosa files to verify",
- )
- parser.add_argument(
- "--flatc",
- default=str(buildDir / "third_party/flatbuffers/flatc"),
- help="location of flatc compiler",
- )
- parser.add_argument(
- "-s",
- "--schema",
- default=str(baseDir / "schema/tosa.fbs"),
- help="location of schema file",
- )
- parser.add_argument(
- "-c",
- "--cmd",
- default=str(buildDir / "serialization_read_write"),
- help="Command to read/write test file",
- )
- parser.add_argument(
- "-v", "--verbose", action="store_true", help="verbose", default=False
- )
- parser.add_argument(
- "--xunit-file", default="result.xml", help="xunit result output file"
- )
- args = parser.parse_args()
-
- # check that required files exist
- if not Path(args.flatc).exists():
- print("flatc not found at location " + args.flatc)
- parser.print_help()
- exit(1)
- if not Path(args.cmd).exists():
- print("command not found at location " + args.cmd)
- parser.print_help()
- exit(1)
- if not Path(args.schema).exists():
- print("schema not found at location " + args.schema)
- parser.print_help()
- exit(1)
- return args
-
-
-def run_sh_command(full_cmd, verbose=False, capture_output=False):
- """Utility function to run an external command. Optionally return captured
- stdout/stderr"""
-
- # Quote the command line for printing
- full_cmd_esc = [shlex.quote(x) for x in full_cmd]
-
- if verbose:
- print("### Running {}".format(" ".join(full_cmd_esc)))
-
- if capture_output:
- rc = subprocess.run(full_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- if rc.returncode != 0:
- print(rc.stdout.decode("utf-8"))
- print(rc.stderr.decode("utf-8"))
- raise Exception(
- "Error running command: {}.\n{}".format(
- " ".join(full_cmd_esc), rc.stderr.decode("utf-8")
- )
- )
- return (rc.stdout, rc.stderr)
- else:
- rc = subprocess.run(full_cmd)
- if rc.returncode != 0:
- raise Exception("Error running command: {}".format(" ".join(full_cmd_esc)))
-
-
-def runTest(args, testfile):
- start_time = datetime.now()
- result = TestResult.PASS
- message = ""
-
- target = Path(f"serialization_script_output-{random.randint(0,10000)}.tosa")
- source_json = Path(testfile.stem + ".json")
- target_json = Path(target.stem + ".json")
-
- # Remove any previous files
- if target.exists():
- target.unlink()
- if source_json.exists():
- source_json.unlink()
- if target_json.exists():
- target_json.unlink()
-
- try:
- cmd = [args.cmd, str(testfile), str(target)]
- run_sh_command(cmd, args.verbose)
- # Create result json
- cmd = [args.flatc, "--json", "--raw-binary", args.schema, "--", str(target)]
- run_sh_command(cmd, args.verbose)
- # Create source json
- cmd = [args.flatc, "--json", "--raw-binary", args.schema, "--", str(testfile)]
- run_sh_command(cmd, args.verbose)
- if not filecmp.cmp(str(target_json), str(source_json), False):
- print("Failed to compare files on " + str(testfile))
- result = TestResult.MISMATCH
- # Cleanup generated files
- source_json.unlink()
- target_json.unlink()
- target.unlink()
-
- except Exception as e:
- message = str(e)
- result = TestResult.COMMAND_ERROR
- end_time = datetime.now()
- return result, message, end_time - start_time
-
-
-def getTestFiles(dir):
- files = Path(dir).glob("**/*.tosa")
- return files
-
-
-def main():
- args = parseArgs()
- testfiles = getTestFiles(args.test)
-
- suitename = "basic_serialization"
- classname = "copy_test"
-
- xunit_result = xunit_results()
- xunit_suite = xunit_result.create_suite("basic_serialization")
-
- failed = 0
- count = 0
- for test in testfiles:
- count = count + 1
- (result, message, time_delta) = runTest(args, test)
- xt = xunit_test(str(test), f"{suitename}.{classname}")
- xt.time = str(
- float(time_delta.seconds) + (float(time_delta.microseconds) * 1e-6)
- )
- if result == TestResult.PASS:
- pass
- else:
- xt.failed(message)
- failed = failed + 1
- xunit_suite.tests.append(xt)
-
- xunit_result.write_results(args.xunit_file)
- print(f"Total tests run: {count} failures: {failed}")
-
-
-if __name__ == "__main__":
- exit(main())
diff --git a/test/scripts/testfiles/test.tosa b/test/scripts/testfiles/test.tosa
deleted file mode 100644
index 3b4ca56..0000000
--- a/test/scripts/testfiles/test.tosa
+++ /dev/null
Binary files differ
diff --git a/test/src/serialization_npy_test.cpp b/test/src/serialization_npy_test.cpp
index 27ec464..24e3aff 100644
--- a/test/src/serialization_npy_test.cpp
+++ b/test/src/serialization_npy_test.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2021, ARM Limited.
+// Copyright (c) 2021,2024, ARM Limited.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -37,7 +37,7 @@ int test_int_type(std::vector<int32_t> shape, std::default_random_engine& gen, s
}
auto buffer = std::make_unique<T[]>(total_size);
- for (int i = 0; i < total_size; i++)
+ for (size_t i = 0; i < total_size; i++)
{
buffer[i] = gen_data(gen);
}
@@ -76,7 +76,46 @@ int test_float_type(std::vector<int32_t> shape, std::default_random_engine& gen,
}
auto buffer = std::make_unique<T[]>(total_size);
- for (int i = 0; i < total_size; i++)
+ for (size_t i = 0; i < total_size; i++)
+ {
+ buffer[i] = gen_data(gen);
+ }
+
+ NumpyUtilities::NPError err = NumpyUtilities::writeToNpyFile(filename.c_str(), shape, buffer.get());
+ if (err != NumpyUtilities::NO_ERROR)
+ {
+ std::cout << "Error writing file, code " << err << std::endl;
+ return 1;
+ }
+
+ auto read_buffer = std::make_unique<T[]>(total_size);
+ err = NumpyUtilities::readFromNpyFile(filename.c_str(), total_size, read_buffer.get());
+ if (err != NumpyUtilities::NO_ERROR)
+ {
+ std::cout << "Error reading file, code " << err << std::endl;
+ return 1;
+ }
+ if (memcmp(buffer.get(), read_buffer.get(), total_size * sizeof(T)))
+ {
+ std::cout << "Miscompare" << std::endl;
+ return 1;
+ }
+ return 0;
+}
+
+template <class T>
+int test_double_type(std::vector<int32_t> shape, std::default_random_engine& gen, std::string& filename)
+{
+ size_t total_size = 1;
+ std::uniform_real_distribution<T> gen_data(std::numeric_limits<T>::min(), std::numeric_limits<T>::max());
+
+ for (auto i : shape)
+ {
+ total_size *= i;
+ }
+
+ auto buffer = std::make_unique<T[]>(total_size);
+ for (size_t i = 0; i < total_size; i++)
{
buffer[i] = gen_data(gen);
}
@@ -114,7 +153,7 @@ int test_bool_type(std::vector<int32_t> shape, std::default_random_engine& gen,
}
auto buffer = std::make_unique<bool[]>(total_size);
- for (int i = 0; i < total_size; i++)
+ for (size_t i = 0; i < total_size; i++)
{
buffer[i] = (gen_data(gen)) ? true : false;
}
@@ -144,15 +183,13 @@ int test_bool_type(std::vector<int32_t> shape, std::default_random_engine& gen,
int main(int argc, char** argv)
{
- size_t total_size = 1;
- int32_t seed = 1;
+ int32_t seed = 1;
std::string str_type;
std::string str_shape;
std::string filename = "npytest.npy";
std::vector<int32_t> shape;
- bool verbose = false;
int opt;
- while ((opt = getopt(argc, argv, "d:f:s:t:v")) != -1)
+ while ((opt = getopt(argc, argv, "d:f:s:t:")) != -1)
{
switch (opt)
{
@@ -168,9 +205,6 @@ int main(int argc, char** argv)
case 't':
str_shape = optarg;
break;
- case 'v':
- verbose = true;
- break;
default:
std::cerr << "Invalid argument" << std::endl;
break;
@@ -193,7 +227,6 @@ int main(int argc, char** argv)
break;
int val = stoi(substr, &pos, 0);
assert(val);
- total_size *= val;
shape.push_back(val);
}
@@ -212,6 +245,10 @@ int main(int argc, char** argv)
{
return test_float_type<float>(shape, gen, filename);
}
+ else if (str_type == "double")
+ {
+ return test_double_type<double>(shape, gen, filename);
+ }
else if (str_type == "bool")
{
return test_bool_type(shape, gen, filename);
diff --git a/third_party/flatbuffers b/third_party/flatbuffers
-Subproject 697147a2e686486424b9d15fc3e1612586a60f9
+Subproject 0100f6a5779831fa7a651e4b67ef389a8752bd9
diff --git a/third_party/half/ChangeLog.txt b/third_party/half/ChangeLog.txt
new file mode 100644
index 0000000..37f3dbf
--- /dev/null
+++ b/third_party/half/ChangeLog.txt
@@ -0,0 +1,213 @@
+Release Notes {#changelog}
+=============
+
+2.2.0 release (2021-06-12):
+---------------------------
+
+- Added `rsqrt` function for inverse square root.
+- Improved performance of `pow` function.
+- Fixed bug that forgot to include `<immintrin.h>` for F16C intrinsics.
+
+
+2.1.0 release (2019-08-05):
+---------------------------
+
+- Added detection of IEEE floating-point exceptions to operators and functions.
+- Added configuration options for automatic exception handling.
+- Added functions for explicitly managing floating-point exception flags.
+- Improved accuracy of `pow` and `atan2` functions.
+
+
+2.0.0 release (2019-07-23):
+---------------------------
+
+- Made internal implementation independent from built-in floating point
+ facilities for increased reliability and IEEE-conformance.
+- Changed default rounding mode to rounding to nearest.
+- Always round ties to even when rounding to nearest.
+- Extended `constexpr` support to comparison and classification functions.
+- Added support for F16C compiler intrinsics for conversions.
+- Enabled C++11 feature detection for Intel compilers.
+
+
+1.12.0 release (2017-03-06):
+----------------------------
+
+- Changed behaviour of `half_cast` to perform conversions to/from `double`
+ and `long double` directly according to specified rounding mode, without an
+ intermediate `float` conversion.
+- Added `noexcept` specifiers to constructors.
+- Fixed minor portability problem with `logb` and `ilogb`.
+- Tested for *VC++ 2015*.
+
+
+1.11.0 release (2013-11-16):
+----------------------------
+
+- Made tie-breaking behaviour in round to nearest configurable by
+ `HALF_ROUND_TIES_TO_EVEN` macro.
+- Completed support for all C++11 mathematical functions even if single-
+ precision versions from `<cmath>` are unsupported.
+- Fixed inability to disable support for C++11 mathematical functions on
+ *VC++ 2013*.
+
+
+1.10.0 release (2013-11-09):
+----------------------------
+
+- Made default rounding mode configurable by `HALF_ROUND_STYLE` macro.
+- Added support for non-IEEE single-precision implementations.
+- Added `HALF_ENABLE_CPP11_TYPE_TRAITS` preprocessor flag for checking
+ support for C++11 type traits and TMP features.
+- Restricted `half_cast` to support built-in arithmetic types only.
+- Changed behaviour of `half_cast` to respect rounding mode when casting
+ to/from integer types.
+
+
+1.9.2 release (2013-11-01):
+---------------------------
+
+- Tested for *gcc 4.8*.
+- Tested and fixed for *VC++ 2013*.
+- Removed unnecessary warnings in *MSVC*.
+
+
+1.9.1 release (2013-08-08):
+---------------------------
+
+- Fixed problems with older gcc and MSVC versions.
+- Small fix to non-C++11 implementations of `remainder` and `remquo`.
+
+
+1.9.0 release (2013-08-07):
+---------------------------
+
+- Changed behaviour of `nearbyint`, `rint`, `lrint` and `llrint` to use
+ rounding mode of half-precision implementation (which is
+ truncating/indeterminate) instead of single-precision rounding mode.
+- Added support for more C++11 mathematical functions even if single-
+ precision versions from `<cmath>` are unsupported, in particular
+ `remainder`, `remquo` and `cbrt`.
+- Minor implementation changes.
+
+
+1.8.1 release (2013-01-22):
+---------------------------
+
+- Fixed bug resulting in multiple definitions of the `nanh` function due to
+ a missing `inline` specification.
+
+
+1.8.0 release (2013-01-19):
+---------------------------
+
+- Added support for more C++11 mathematical functions even if single-
+ precision versions from `<cmath>` are unsupported, in particular
+ exponential and logarithm functions, hyperbolic area functions and the
+ hypotenuse function.
+- Made `fma` function use default implementation if single-precision version
+ from `<cmath>` is not faster and thus `FP_FAST_FMAH` to be defined always.
+- Fixed overload resolution issues when invoking certain mathematical
+ functions by unqualified calls.
+
+
+1.7.0 release (2012-10-26):
+---------------------------
+
+- Added support for C++11 `noexcept` specifiers.
+- Changed C++11 `long long` to be supported on *VC++ 2003* and up.
+
+
+1.6.1 release (2012-09-13):
+---------------------------
+
+- Made `fma` and `fdim` functions available even if corresponding
+ single-precision functions are not.
+
+
+1.6.0 release (2012-09-12):
+---------------------------
+
+- Added `HALF_ENABLE_CPP11_LONG_LONG` to control support for `long long`
+ integers and corresponding mathematical functions.
+- Fixed C++98 compatibility on non-VC compilers.
+
+
+1.5.1 release (2012-08-17):
+---------------------------
+
+- Recorrected `std::numeric_limits::round_style` to always return
+ `std::round_indeterminate`, due to overflow-handling deviating from
+ correct round-toward-zero behaviour.
+
+
+1.5.0 release (2012-08-16):
+---------------------------
+
+- Added `half_cast` for explicitly casting between half and any type
+ convertible to/from `float` and allowing the explicit specification of
+ the rounding mode to use.
+
+
+1.4.0 release (2012-08-12):
+---------------------------
+
+- Added support for C++11 generalized constant expressions (`constexpr`).
+
+
+1.3.1 release (2012-08-11):
+---------------------------
+
+- Fixed requirement for `std::signbit` and `std::isnan` (even if C++11
+ `<cmath>` functions disabled) on non-VC compilers.
+
+
+1.3.0 release (2012-08-10):
+---------------------------
+
+- Made requirement for `<cstdint>` and `static_assert` optional and thus
+ made the library C++98-compatible.
+- Made support for C++11 features user-overridable through explicit
+ definition of corresponding preprocessor symbols to either 0 or 1.
+- Renamed `HALF_ENABLE_HASH` to `HALF_ENABLE_CPP11_HASH` in correspondence
+ with other C++11 preprocessor symbols.
+
+
+1.2.0 release (2012-08-07):
+---------------------------
+
+- Added proper preprocessor definitions for `HUGE_VALH` and `FP_FAST_FMAH`
+ in correspondence with their single-precision counterparts from `<cmath>`.
+- Fixed internal preprocessor macros to be properly undefined after use.
+
+
+1.1.2 release (2012-08-07):
+---------------------------
+
+- Revised `std::numeric_limits::round_style` to return
+ `std::round_toward_zero` if the `float` version also does and
+ `std::round_indeterminate` otherwise.
+- Fixed `std::numeric_limits::round_error` to reflect worst-case round
+ toward zero behaviour.
+
+
+1.1.1 release (2012-08-06):
+---------------------------
+
+- Fixed `std::numeric_limits::min` to return smallest positive normal
+ number, instead of subnormal number.
+- Fixed `std::numeric_limits::round_style` to return
+ `std::round_indeterminate` due to mixture of separately rounded
+ single-precision arithmetics with truncating single-to-half conversions.
+
+
+1.1.0 release (2012-08-06):
+---------------------------
+
+- Added half-precision literals.
+
+
+1.0.0 release (2012-08-05):
+---------------------------
+
+- First release.
diff --git a/third_party/half/LICENSE.txt b/third_party/half/LICENSE.txt
new file mode 100644
index 0000000..45f55db
--- /dev/null
+++ b/third_party/half/LICENSE.txt
@@ -0,0 +1,21 @@
+The MIT License
+
+Copyright (c) 2012-2021 Christian Rau
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/third_party/half/README.txt b/third_party/half/README.txt
new file mode 100644
index 0000000..3dd0d1c
--- /dev/null
+++ b/third_party/half/README.txt
@@ -0,0 +1,317 @@
+HALF-PRECISION FLOATING-POINT LIBRARY (Version 2.2.0)
+-----------------------------------------------------
+
+This is a C++ header-only library to provide an IEEE 754 conformant 16-bit
+half-precision floating-point type along with corresponding arithmetic
+operators, type conversions and common mathematical functions. It aims for both
+efficiency and ease of use, trying to accurately mimic the behaviour of the
+built-in floating-point types at the best performance possible.
+
+
+INSTALLATION AND REQUIREMENTS
+-----------------------------
+
+Conveniently, the library consists of just a single header file containing all
+the functionality, which can be directly included by your projects, without the
+neccessity to build anything or link to anything.
+
+Whereas this library is fully C++98-compatible, it can profit from certain
+C++11 features. Support for those features is checked automatically at compile
+(or rather preprocessing) time, but can be explicitly enabled or disabled by
+predefining the corresponding preprocessor symbols to either 1 or 0 yourself
+before including half.hpp. This is useful when the automatic detection fails
+(for more exotic implementations) or when a feature should be explicitly
+disabled:
+
+ - 'long long' integer type for mathematical functions returning 'long long'
+ results (enabled for VC++ 2003 and icc 11.1 and newer, gcc and clang,
+ overridable with 'HALF_ENABLE_CPP11_LONG_LONG').
+
+ - Static assertions for extended compile-time checks (enabled for VC++ 2010,
+ gcc 4.3, clang 2.9, icc 11.1 and newer, overridable with
+ 'HALF_ENABLE_CPP11_STATIC_ASSERT').
+
+ - Generalized constant expressions (enabled for VC++ 2015, gcc 4.6, clang 3.1,
+ icc 14.0 and newer, overridable with 'HALF_ENABLE_CPP11_CONSTEXPR').
+
+ - noexcept exception specifications (enabled for VC++ 2015, gcc 4.6,
+ clang 3.0, icc 14.0 and newer, overridable with 'HALF_ENABLE_CPP11_NOEXCEPT').
+
+ - User-defined literals for half-precision literals to work (enabled for
+ VC++ 2015, gcc 4.7, clang 3.1, icc 15.0 and newer, overridable with
+ 'HALF_ENABLE_CPP11_USER_LITERALS').
+
+ - Thread-local storage for per-thread floating-point exception flags (enabled
+ for VC++ 2015, gcc 4.8, clang 3.3, icc 15.0 and newer, overridable with
+ 'HALF_ENABLE_CPP11_THREAD_LOCAL').
+
+ - Type traits and template meta-programming features from <type_traits>
+ (enabled for VC++ 2010, libstdc++ 4.3, libc++ and newer, overridable with
+ 'HALF_ENABLE_CPP11_TYPE_TRAITS').
+
+ - Special integer types from <cstdint> (enabled for VC++ 2010, libstdc++ 4.3,
+ libc++ and newer, overridable with 'HALF_ENABLE_CPP11_CSTDINT').
+
+ - Certain C++11 single-precision mathematical functions from <cmath> for
+ floating-point classification during conversions from higher precision types
+ (enabled for VC++ 2013, libstdc++ 4.3, libc++ and newer, overridable with
+ 'HALF_ENABLE_CPP11_CMATH').
+
+ - Floating-point environment control from <cfenv> for possible exception
+ propagation to the built-in floating-point platform (enabled for VC++ 2013,
+ libstdc++ 4.3, libc++ and newer, overridable with 'HALF_ENABLE_CPP11_CFENV').
+
+ - Hash functor 'std::hash' from <functional> (enabled for VC++ 2010,
+ libstdc++ 4.3, libc++ and newer, overridable with 'HALF_ENABLE_CPP11_HASH').
+
+The library has been tested successfully with Visual C++ 2005-2015, gcc 4-8
+and clang 3-8 on 32- and 64-bit x86 systems. Please contact me if you have any
+problems, suggestions or even just success testing it on other platforms.
+
+
+DOCUMENTATION
+-------------
+
+What follows are some general words about the usage of the library and its
+implementation. For a complete documentation of its interface consult the
+corresponding website http://half.sourceforge.net. You may also generate the
+complete developer documentation from the library's only include file's doxygen
+comments, but this is more relevant to developers rather than mere users.
+
+BASIC USAGE
+
+To make use of the library just include its only header file half.hpp, which
+defines all half-precision functionality inside the 'half_float' namespace. The
+actual 16-bit half-precision data type is represented by the 'half' type, which
+uses the standard IEEE representation with 1 sign bit, 5 exponent bits and 11
+mantissa bits (including the hidden bit) and supports all types of special
+values, like subnormal values, infinity and NaNs. This type behaves like the
+built-in floating-point types as much as possible, supporting the usual
+arithmetic, comparison and streaming operators, which makes its use pretty
+straight-forward:
+
+ using half_float::half;
+ half a(3.4), b(5);
+ half c = a * b;
+ c += 3;
+ if(c > a)
+ std::cout << c << std::endl;
+
+Additionally the 'half_float' namespace also defines half-precision versions
+for all mathematical functions of the C++ standard library, which can be used
+directly through ADL:
+
+ half a(-3.14159);
+ half s = sin(abs(a));
+ long l = lround(s);
+
+You may also specify explicit half-precision literals, since the library
+provides a user-defined literal inside the 'half_float::literal' namespace,
+which you just need to import (assuming support for C++11 user-defined literals):
+
+ using namespace half_float::literal;
+ half x = 1.0_h;
+
+Furthermore the library provides proper specializations for
+'std::numeric_limits', defining various implementation properties, and
+'std::hash' for hashing half-precision numbers (assuming support for C++11
+'std::hash'). Similar to the corresponding preprocessor symbols from <cmath>
+the library also defines the 'HUGE_VALH' constant and maybe the 'FP_FAST_FMAH'
+symbol.
+
+CONVERSIONS AND ROUNDING
+
+The half is explicitly constructible/convertible from a single-precision float
+argument. Thus it is also explicitly constructible/convertible from any type
+implicitly convertible to float, but constructing it from types like double or
+int will involve the usual warnings arising when implicitly converting those to
+float because of the lost precision. On the one hand those warnings are
+intentional, because converting those types to half neccessarily also reduces
+precision. But on the other hand they are raised for explicit conversions from
+those types, when the user knows what he is doing. So if those warnings keep
+bugging you, then you won't get around first explicitly converting to float
+before converting to half, or use the 'half_cast' described below. In addition
+you can also directly assign float values to halfs.
+
+In contrast to the float-to-half conversion, which reduces precision, the
+conversion from half to float (and thus to any other type implicitly
+convertible from float) is implicit, because all values represetable with
+half-precision are also representable with single-precision. This way the
+half-to-float conversion behaves similar to the builtin float-to-double
+conversion and all arithmetic expressions involving both half-precision and
+single-precision arguments will be of single-precision type. This way you can
+also directly use the mathematical functions of the C++ standard library,
+though in this case you will invoke the single-precision versions which will
+also return single-precision values, which is (even if maybe performing the
+exact same computation, see below) not as conceptually clean when working in a
+half-precision environment.
+
+The default rounding mode for conversions between half and more precise types
+as well as for rounding results of arithmetic operations and mathematical
+functions rounds to the nearest representable value. But by predefining the
+'HALF_ROUND_STYLE' preprocessor symbol this default can be overridden with one
+of the other standard rounding modes using their respective constants or the
+equivalent values of 'std::float_round_style' (it can even be synchronized with
+the built-in single-precision implementation by defining it to
+'std::numeric_limits<float>::round_style'):
+
+ - 'std::round_indeterminate' (-1) for the fastest rounding.
+
+ - 'std::round_toward_zero' (0) for rounding toward zero.
+
+ - 'std::round_to_nearest' (1) for rounding to the nearest value (default).
+
+ - 'std::round_toward_infinity' (2) for rounding toward positive infinity.
+
+ - 'std::round_toward_neg_infinity' (3) for rounding toward negative infinity.
+
+In addition to changing the overall default rounding mode one can also use the
+'half_cast'. This converts between half and any built-in arithmetic type using
+a configurable rounding mode (or the default rounding mode if none is
+specified). In addition to a configurable rounding mode, 'half_cast' has
+another big difference to a mere 'static_cast': Any conversions are performed
+directly using the given rounding mode, without any intermediate conversion
+to/from 'float'. This is especially relevant for conversions to integer types,
+which don't necessarily truncate anymore. But also for conversions from
+'double' or 'long double' this may produce more precise results than a
+pre-conversion to 'float' using the single-precision implementation's current
+rounding mode would.
+
+ half a = half_cast<half>(4.2);
+ half b = half_cast<half,std::numeric_limits<float>::round_style>(4.2f);
+ assert( half_cast<int, std::round_to_nearest>( 0.7_h ) == 1 );
+ assert( half_cast<half,std::round_toward_zero>( 4097 ) == 4096.0_h );
+ assert( half_cast<half,std::round_toward_infinity>( 4097 ) == 4100.0_h );
+ assert( half_cast<half,std::round_toward_infinity>( std::numeric_limits<double>::min() ) > 0.0_h );
+
+ACCURACY AND PERFORMANCE
+
+From version 2.0 onward the library is implemented without employing the
+underlying floating-point implementation of the system (except for conversions,
+of course), providing an entirely self-contained half-precision implementation
+with results independent from the system's existing single- or double-precision
+implementation and its rounding behaviour.
+
+As to accuracy, many of the operators and functions provided by this library
+are exact to rounding for all rounding modes, i.e. the error to the exact
+result is at most 0.5 ULP (unit in the last place) for rounding to nearest and
+less than 1 ULP for all other rounding modes. This holds for all the operations
+required by the IEEE 754 standard and many more. Specifically the following
+functions might exhibit a deviation from the correctly rounded exact result by
+1 ULP for a select few input values: 'expm1', 'log1p', 'pow', 'atan2', 'erf',
+'erfc', 'lgamma', 'tgamma' (for more details see the documentation of the
+individual functions). All other functions and operators are always exact to
+rounding or independent of the rounding mode altogether.
+
+The increased IEEE-conformance and cleanliness of this implementation comes
+with a certain performance cost compared to doing computations and mathematical
+functions in hardware-accelerated single-precision. On average and depending on
+the platform, the arithemtic operators are about 75% as fast and the
+mathematical functions about 33-50% as fast as performing the corresponding
+operations in single-precision and converting between the inputs and outputs.
+However, directly computing with half-precision values is a rather rare
+use-case and usually using actual 'float' values for all computations and
+temproraries and using 'half's only for storage is the recommended way. But
+nevertheless the goal of this library was to provide a complete and
+conceptually clean IEEE-confromant half-precision implementation and in the few
+cases when you do need to compute directly in half-precision you do so for a
+reason and want accurate results.
+
+If necessary, this internal implementation can be overridden by predefining the
+'HALF_ARITHMETIC_TYPE' preprocessor symbol to one of the built-in
+floating-point types ('float', 'double' or 'long double'), which will cause the
+library to use this type for computing arithmetic operations and mathematical
+functions (if available). However, due to using the platform's floating-point
+implementation (and its rounding behaviour) internally, this might cause
+results to deviate from the specified half-precision rounding mode. It will of
+course also inhibit the automatic exception detection described below.
+
+The conversion operations between half-precision and single-precision types can
+also make use of the F16C extension for x86 processors by using the
+corresponding compiler intrinsics from <immintrin.h>. Support for this is
+checked at compile-time by looking for the '__F16C__' macro which at least gcc
+and clang define based on the target platform. It can also be enabled manually
+by predefining the 'HALF_ENABLE_F16C_INTRINSICS' preprocessor symbol to 1, or 0
+for explicitly disabling it. However, this will directly use the corresponding
+intrinsics for conversion without checking if they are available at runtime
+(possibly crashing if they are not), so make sure they are supported on the
+target platform before enabling this.
+
+EXCEPTION HANDLING
+
+The half-precision implementation supports all 5 required floating-point
+exceptions from the IEEE standard to indicate erroneous inputs or inexact
+results during operations. These are represented by exception flags which
+actually use the same values as the corresponding 'FE_...' flags defined in
+C++11's <cfenv> header if supported, specifically:
+
+ - 'FE_INVALID' for invalid inputs to an operation.
+ - 'FE_DIVBYZERO' for finite inputs producing infinite results.
+ - 'FE_OVERFLOW' if a result is too large to represent finitely.
+ - 'FE_UNDERFLOW' for a subnormal or zero result after rounding.
+ - 'FE_INEXACT' if a result needed rounding to be representable.
+ - 'FE_ALL_EXCEPT' as a convenient OR of all possible exception flags.
+
+The internal exception flag state will start with all flags cleared and is
+maintained per thread if C++11 thread-local storage is supported, otherwise it
+will be maintained globally and will theoretically NOT be thread-safe (while
+practically being as thread-safe as a simple integer variable can be). These
+flags can be managed explicitly using the library's error handling functions,
+which again try to mimic the built-in functions for handling floating-point
+exceptions from <cfenv>. You can clear them with 'feclearexcept' (which is the
+only way a flag can be cleared), test them with 'fetestexcept', explicitly
+raise errors with 'feraiseexcept' and save and restore their state using
+'fegetexceptflag' and 'fesetexceptflag'. You can also throw corresponding C++
+exceptions based on the current flag state using 'fethrowexcept'.
+
+However, any automatic exception detection and handling during half-precision
+operations and functions is DISABLED by default, since it comes with a minor
+performance overhead due to runtime checks, and reacting to IEEE floating-point
+exceptions is rarely ever needed in application code. But the library fully
+supports IEEE-conformant detection of floating-point exceptions and various
+ways for handling them, which can be enabled by pre-defining the corresponding
+preprocessor symbols to 1. They can be enabled individually or all at once and
+they will be processed in the order they are listed here:
+
+ - 'HALF_ERRHANDLING_FLAGS' sets the internal exception flags described above
+ whenever the corresponding exception occurs.
+ - 'HALF_ERRHANDLING_ERRNO' sets the value of 'errno' from <cerrno> similar to
+ the behaviour of the built-in floating-point types when 'MATH_ERRNO' is used.
+ - 'HALF_ERRHANDLING_FENV' will propagate exceptions to the built-in
+ floating-point implementation using 'std::feraiseexcept' if support for
+ C++11 floating-point control is enabled. However, this does not synchronize
+ exceptions: neither will clearing propagate nor will it work in reverse.
+ - 'HALF_ERRHANDLING_THROW_...' can be defined to a string literal which will
+ be used as description message for a C++ exception that is thrown whenever
+ a 'FE_...' exception occurs, similar to the behaviour of 'fethrowexcept'.
+
+If any of the above error handling is activated, non-quiet operations on
+half-precision values will also raise a 'FE_INVALID' exception whenever
+they encounter a signaling NaN value, in addition to transforming the value
+into a quiet NaN. If error handling is disabled, signaling NaNs will be
+treated like quiet NaNs (while still getting explicitly quieted if propagated
+to the result). There can also be additional treatment of overflow and
+underflow errors after they have been processed as above, which is ENABLED by
+default (but of course only takes effect if any other exception handling is
+activated) unless overridden by pre-defining the corresponding preprocessor
+symbol to 0:
+
+ - 'HALF_ERRHANDLING_OVERFLOW_TO_INEXACT' will cause overflow errors to also
+ raise a 'FE_INEXACT' exception.
+ - 'HALF_ERRHANDLING_UNDERFLOW_TO_INEXACT' will cause underflow errors to also
+ raise a 'FE_INEXACT' exception. This will also slightly change the
+ behaviour of the underflow exception, which will ONLY be raised if the
+ result is actually inexact due to underflow. If this is disabled, underflow
+ exceptions will be raised for ANY (possibly exact) subnormal result.
+
+
+CREDITS AND CONTACT
+-------------------
+
+This library is developed by CHRISTIAN RAU and released under the MIT License
+(see LICENSE.txt). If you have any questions or problems with it, feel free to
+contact me at rauy@users.sourceforge.net.
+
+Additional credit goes to JEROEN VAN DER ZIJP for his paper on "Fast Half Float
+Conversions", whose algorithms have been used in the library for converting
+between half-precision and single-precision values.
diff --git a/third_party/half/include/half.hpp b/third_party/half/include/half.hpp
new file mode 100644
index 0000000..ee8819a
--- /dev/null
+++ b/third_party/half/include/half.hpp
@@ -0,0 +1,4605 @@
+// half - IEEE 754-based half-precision floating-point library.
+//
+// Copyright (c) 2012-2021 Christian Rau <rauy@users.sourceforge.net>
+// Copyright (c) 2023, ARM Limited.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation
+// files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy,
+// modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
+// Software is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+// Version 2.2.0
+
+/// \file
+/// Main header file for half-precision functionality.
+
+#ifndef HALF_HALF_HPP
+#define HALF_HALF_HPP
+
+#define HALF_GCC_VERSION (__GNUC__*100+__GNUC_MINOR__)
+
+#if defined(__INTEL_COMPILER)
+ #define HALF_ICC_VERSION __INTEL_COMPILER
+#elif defined(__ICC)
+ #define HALF_ICC_VERSION __ICC
+#elif defined(__ICL)
+ #define HALF_ICC_VERSION __ICL
+#else
+ #define HALF_ICC_VERSION 0
+#endif
+
+// check C++11 language features
+#if defined(__clang__) // clang
+ #if __has_feature(cxx_static_assert) && !defined(HALF_ENABLE_CPP11_STATIC_ASSERT)
+ #define HALF_ENABLE_CPP11_STATIC_ASSERT 1
+ #endif
+ #if __has_feature(cxx_constexpr) && !defined(HALF_ENABLE_CPP11_CONSTEXPR)
+ #define HALF_ENABLE_CPP11_CONSTEXPR 1
+ #endif
+ #if __has_feature(cxx_noexcept) && !defined(HALF_ENABLE_CPP11_NOEXCEPT)
+ #define HALF_ENABLE_CPP11_NOEXCEPT 1
+ #endif
+ #if __has_feature(cxx_user_literals) && !defined(HALF_ENABLE_CPP11_USER_LITERALS)
+ #define HALF_ENABLE_CPP11_USER_LITERALS 1
+ #endif
+ #if __has_feature(cxx_thread_local) && !defined(HALF_ENABLE_CPP11_THREAD_LOCAL)
+ #define HALF_ENABLE_CPP11_THREAD_LOCAL 1
+ #endif
+ #if (defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L) && !defined(HALF_ENABLE_CPP11_LONG_LONG)
+ #define HALF_ENABLE_CPP11_LONG_LONG 1
+ #endif
+#elif HALF_ICC_VERSION && defined(__INTEL_CXX11_MODE__) // Intel C++
+ #if HALF_ICC_VERSION >= 1500 && !defined(HALF_ENABLE_CPP11_THREAD_LOCAL)
+ #define HALF_ENABLE_CPP11_THREAD_LOCAL 1
+ #endif
+ #if HALF_ICC_VERSION >= 1500 && !defined(HALF_ENABLE_CPP11_USER_LITERALS)
+ #define HALF_ENABLE_CPP11_USER_LITERALS 1
+ #endif
+ #if HALF_ICC_VERSION >= 1400 && !defined(HALF_ENABLE_CPP11_CONSTEXPR)
+ #define HALF_ENABLE_CPP11_CONSTEXPR 1
+ #endif
+ #if HALF_ICC_VERSION >= 1400 && !defined(HALF_ENABLE_CPP11_NOEXCEPT)
+ #define HALF_ENABLE_CPP11_NOEXCEPT 1
+ #endif
+ #if HALF_ICC_VERSION >= 1110 && !defined(HALF_ENABLE_CPP11_STATIC_ASSERT)
+ #define HALF_ENABLE_CPP11_STATIC_ASSERT 1
+ #endif
+ #if HALF_ICC_VERSION >= 1110 && !defined(HALF_ENABLE_CPP11_LONG_LONG)
+ #define HALF_ENABLE_CPP11_LONG_LONG 1
+ #endif
+#elif defined(__GNUC__) // gcc
+ #if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L
+ #if HALF_GCC_VERSION >= 408 && !defined(HALF_ENABLE_CPP11_THREAD_LOCAL)
+ #define HALF_ENABLE_CPP11_THREAD_LOCAL 1
+ #endif
+ #if HALF_GCC_VERSION >= 407 && !defined(HALF_ENABLE_CPP11_USER_LITERALS)
+ #define HALF_ENABLE_CPP11_USER_LITERALS 1
+ #endif
+ #if HALF_GCC_VERSION >= 406 && !defined(HALF_ENABLE_CPP11_CONSTEXPR)
+ #define HALF_ENABLE_CPP11_CONSTEXPR 1
+ #endif
+ #if HALF_GCC_VERSION >= 406 && !defined(HALF_ENABLE_CPP11_NOEXCEPT)
+ #define HALF_ENABLE_CPP11_NOEXCEPT 1
+ #endif
+ #if HALF_GCC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_STATIC_ASSERT)
+ #define HALF_ENABLE_CPP11_STATIC_ASSERT 1
+ #endif
+ #if !defined(HALF_ENABLE_CPP11_LONG_LONG)
+ #define HALF_ENABLE_CPP11_LONG_LONG 1
+ #endif
+ #endif
+ #define HALF_TWOS_COMPLEMENT_INT 1
+#elif defined(_MSC_VER) // Visual C++
+ #if _MSC_VER >= 1900 && !defined(HALF_ENABLE_CPP11_THREAD_LOCAL)
+ #define HALF_ENABLE_CPP11_THREAD_LOCAL 1
+ #endif
+ #if _MSC_VER >= 1900 && !defined(HALF_ENABLE_CPP11_USER_LITERALS)
+ #define HALF_ENABLE_CPP11_USER_LITERALS 1
+ #endif
+ #if _MSC_VER >= 1900 && !defined(HALF_ENABLE_CPP11_CONSTEXPR)
+ #define HALF_ENABLE_CPP11_CONSTEXPR 1
+ #endif
+ #if _MSC_VER >= 1900 && !defined(HALF_ENABLE_CPP11_NOEXCEPT)
+ #define HALF_ENABLE_CPP11_NOEXCEPT 1
+ #endif
+ #if _MSC_VER >= 1600 && !defined(HALF_ENABLE_CPP11_STATIC_ASSERT)
+ #define HALF_ENABLE_CPP11_STATIC_ASSERT 1
+ #endif
+ #if _MSC_VER >= 1310 && !defined(HALF_ENABLE_CPP11_LONG_LONG)
+ #define HALF_ENABLE_CPP11_LONG_LONG 1
+ #endif
+ #define HALF_TWOS_COMPLEMENT_INT 1
+ #define HALF_POP_WARNINGS 1
+ #pragma warning(push)
+ #pragma warning(disable : 4099 4127 4146) //struct vs class, constant in if, negative unsigned
+#endif
+
+// check C++11 library features
+#include <utility>
+#if defined(_LIBCPP_VERSION) // libc++
+ #if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103
+ #ifndef HALF_ENABLE_CPP11_TYPE_TRAITS
+ #define HALF_ENABLE_CPP11_TYPE_TRAITS 1
+ #endif
+ #ifndef HALF_ENABLE_CPP11_CSTDINT
+ #define HALF_ENABLE_CPP11_CSTDINT 1
+ #endif
+ #ifndef HALF_ENABLE_CPP11_CMATH
+ #define HALF_ENABLE_CPP11_CMATH 1
+ #endif
+ #ifndef HALF_ENABLE_CPP11_HASH
+ #define HALF_ENABLE_CPP11_HASH 1
+ #endif
+ #ifndef HALF_ENABLE_CPP11_CFENV
+ #define HALF_ENABLE_CPP11_CFENV 1
+ #endif
+ #endif
+#elif defined(__GLIBCXX__) // libstdc++
+ #if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103
+ #ifdef __clang__
+ #if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_TYPE_TRAITS)
+ #define HALF_ENABLE_CPP11_TYPE_TRAITS 1
+ #endif
+ #if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_CSTDINT)
+ #define HALF_ENABLE_CPP11_CSTDINT 1
+ #endif
+ #if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_CMATH)
+ #define HALF_ENABLE_CPP11_CMATH 1
+ #endif
+ #if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_HASH)
+ #define HALF_ENABLE_CPP11_HASH 1
+ #endif
+ #if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_CFENV)
+ #define HALF_ENABLE_CPP11_CFENV 1
+ #endif
+ #else
+ #if HALF_GCC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_TYPE_TRAITS)
+ #define HALF_ENABLE_CPP11_TYPE_TRAITS 1
+ #endif
+ #if HALF_GCC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_CSTDINT)
+ #define HALF_ENABLE_CPP11_CSTDINT 1
+ #endif
+ #if HALF_GCC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_CMATH)
+ #define HALF_ENABLE_CPP11_CMATH 1
+ #endif
+ #if HALF_GCC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_HASH)
+ #define HALF_ENABLE_CPP11_HASH 1
+ #endif
+ #if HALF_GCC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_CFENV)
+ #define HALF_ENABLE_CPP11_CFENV 1
+ #endif
+ #endif
+ #endif
+#elif defined(_CPPLIB_VER) // Dinkumware/Visual C++
+ #if _CPPLIB_VER >= 520 && !defined(HALF_ENABLE_CPP11_TYPE_TRAITS)
+ #define HALF_ENABLE_CPP11_TYPE_TRAITS 1
+ #endif
+ #if _CPPLIB_VER >= 520 && !defined(HALF_ENABLE_CPP11_CSTDINT)
+ #define HALF_ENABLE_CPP11_CSTDINT 1
+ #endif
+ #if _CPPLIB_VER >= 520 && !defined(HALF_ENABLE_CPP11_HASH)
+ #define HALF_ENABLE_CPP11_HASH 1
+ #endif
+ #if _CPPLIB_VER >= 610 && !defined(HALF_ENABLE_CPP11_CMATH)
+ #define HALF_ENABLE_CPP11_CMATH 1
+ #endif
+ #if _CPPLIB_VER >= 610 && !defined(HALF_ENABLE_CPP11_CFENV)
+ #define HALF_ENABLE_CPP11_CFENV 1
+ #endif
+#endif
+#undef HALF_GCC_VERSION
+#undef HALF_ICC_VERSION
+
+// any error throwing C++ exceptions?
+#if defined(HALF_ERRHANDLING_THROW_INVALID) || defined(HALF_ERRHANDLING_THROW_DIVBYZERO) || defined(HALF_ERRHANDLING_THROW_OVERFLOW) || defined(HALF_ERRHANDLING_THROW_UNDERFLOW) || defined(HALF_ERRHANDLING_THROW_INEXACT)
+#define HALF_ERRHANDLING_THROWS 1
+#endif
+
+// any error handling enabled?
+#define HALF_ERRHANDLING (HALF_ERRHANDLING_FLAGS||HALF_ERRHANDLING_ERRNO||HALF_ERRHANDLING_FENV||HALF_ERRHANDLING_THROWS)
+
+#if HALF_ERRHANDLING
+ #define HALF_UNUSED_NOERR(name) name
+#else
+ #define HALF_UNUSED_NOERR(name)
+#endif
+
+// support constexpr
+#if HALF_ENABLE_CPP11_CONSTEXPR
+ #define HALF_CONSTEXPR constexpr
+ #define HALF_CONSTEXPR_CONST constexpr
+ #if HALF_ERRHANDLING
+ #define HALF_CONSTEXPR_NOERR
+ #else
+ #define HALF_CONSTEXPR_NOERR constexpr
+ #endif
+#else
+ #define HALF_CONSTEXPR
+ #define HALF_CONSTEXPR_CONST const
+ #define HALF_CONSTEXPR_NOERR
+#endif
+
+// support noexcept
+#if HALF_ENABLE_CPP11_NOEXCEPT
+ #define HALF_NOEXCEPT noexcept
+ #define HALF_NOTHROW noexcept
+#else
+ #define HALF_NOEXCEPT
+ #define HALF_NOTHROW throw()
+#endif
+
+// support thread storage
+#if HALF_ENABLE_CPP11_THREAD_LOCAL
+ #define HALF_THREAD_LOCAL thread_local
+#else
+ #define HALF_THREAD_LOCAL static
+#endif
+
+#include <utility>
+#include <algorithm>
+#include <istream>
+#include <ostream>
+#include <limits>
+#include <stdexcept>
+#include <climits>
+#include <cmath>
+#include <cstring>
+#include <cstdlib>
+#if HALF_ENABLE_CPP11_TYPE_TRAITS
+ #include <type_traits>
+#endif
+#if HALF_ENABLE_CPP11_CSTDINT
+ #include <cstdint>
+#endif
+#if HALF_ERRHANDLING_ERRNO
+ #include <cerrno>
+#endif
+#if HALF_ENABLE_CPP11_CFENV
+ #include <cfenv>
+#endif
+#if HALF_ENABLE_CPP11_HASH
+ #include <functional>
+#endif
+
+
+#ifndef HALF_ENABLE_F16C_INTRINSICS
+ /// Enable F16C intruction set intrinsics.
+ /// Defining this to 1 enables the use of [F16C compiler intrinsics](https://en.wikipedia.org/wiki/F16C) for converting between
+ /// half-precision and single-precision values which may result in improved performance. This will not perform additional checks
+ /// for support of the F16C instruction set, so an appropriate target platform is required when enabling this feature.
+ ///
+ /// Unless predefined it will be enabled automatically when the `__F16C__` symbol is defined, which some compilers do on supporting platforms.
+ #define HALF_ENABLE_F16C_INTRINSICS __F16C__
+#endif
+#if HALF_ENABLE_F16C_INTRINSICS
+ #include <immintrin.h>
+#endif
+
+#ifdef HALF_DOXYGEN_ONLY
+/// Type for internal floating-point computations.
+/// This can be predefined to a built-in floating-point type (`float`, `double` or `long double`) to override the internal
+/// half-precision implementation to use this type for computing arithmetic operations and mathematical function (if available).
+/// This can result in improved performance for arithmetic operators and mathematical functions but might cause results to
+/// deviate from the specified half-precision rounding mode and inhibits proper detection of half-precision exceptions.
+#define HALF_ARITHMETIC_TYPE (undefined)
+
+/// Enable internal exception flags.
+/// Defining this to 1 causes operations on half-precision values to raise internal floating-point exception flags according to
+/// the IEEE 754 standard. These can then be cleared and checked with clearexcept(), testexcept().
+#define HALF_ERRHANDLING_FLAGS 0
+
+/// Enable exception propagation to `errno`.
+/// Defining this to 1 causes operations on half-precision values to propagate floating-point exceptions to
+/// [errno](https://en.cppreference.com/w/cpp/error/errno) from `<cerrno>`. Specifically this will propagate domain errors as
+/// [EDOM](https://en.cppreference.com/w/cpp/error/errno_macros) and pole, overflow and underflow errors as
+/// [ERANGE](https://en.cppreference.com/w/cpp/error/errno_macros). Inexact errors won't be propagated.
+#define HALF_ERRHANDLING_ERRNO 0
+
+/// Enable exception propagation to built-in floating-point platform.
+/// Defining this to 1 causes operations on half-precision values to propagate floating-point exceptions to the built-in
+/// single- and double-precision implementation's exception flags using the
+/// [C++11 floating-point environment control](https://en.cppreference.com/w/cpp/numeric/fenv) from `<cfenv>`. However, this
+/// does not work in reverse and single- or double-precision exceptions will not raise the corresponding half-precision
+/// exception flags, nor will explicitly clearing flags clear the corresponding built-in flags.
+#define HALF_ERRHANDLING_FENV 0
+
+/// Throw C++ exception on domain errors.
+/// Defining this to a string literal causes operations on half-precision values to throw a
+/// [std::domain_error](https://en.cppreference.com/w/cpp/error/domain_error) with the specified message on domain errors.
+#define HALF_ERRHANDLING_THROW_INVALID (undefined)
+
+/// Throw C++ exception on pole errors.
+/// Defining this to a string literal causes operations on half-precision values to throw a
+/// [std::domain_error](https://en.cppreference.com/w/cpp/error/domain_error) with the specified message on pole errors.
+#define HALF_ERRHANDLING_THROW_DIVBYZERO (undefined)
+
+/// Throw C++ exception on overflow errors.
+/// Defining this to a string literal causes operations on half-precision values to throw a
+/// [std::overflow_error](https://en.cppreference.com/w/cpp/error/overflow_error) with the specified message on overflows.
+#define HALF_ERRHANDLING_THROW_OVERFLOW (undefined)
+
+/// Throw C++ exception on underflow errors.
+/// Defining this to a string literal causes operations on half-precision values to throw a
+/// [std::underflow_error](https://en.cppreference.com/w/cpp/error/underflow_error) with the specified message on underflows.
+#define HALF_ERRHANDLING_THROW_UNDERFLOW (undefined)
+
+/// Throw C++ exception on rounding errors.
+/// Defining this to 1 causes operations on half-precision values to throw a
+/// [std::range_error](https://en.cppreference.com/w/cpp/error/range_error) with the specified message on general rounding errors.
+#define HALF_ERRHANDLING_THROW_INEXACT (undefined)
+#endif
+
+#ifndef HALF_ERRHANDLING_OVERFLOW_TO_INEXACT
+/// Raise INEXACT exception on overflow.
+/// Defining this to 1 (default) causes overflow errors to automatically raise inexact exceptions in addition.
+/// These will be raised after any possible handling of the underflow exception.
+#define HALF_ERRHANDLING_OVERFLOW_TO_INEXACT 1
+#endif
+
+#ifndef HALF_ERRHANDLING_UNDERFLOW_TO_INEXACT
+/// Raise INEXACT exception on underflow.
+/// Defining this to 1 (default) causes underflow errors to automatically raise inexact exceptions in addition.
+/// These will be raised after any possible handling of the underflow exception.
+///
+/// **Note:** This will actually cause underflow (and the accompanying inexact) exceptions to be raised *only* when the result
+/// is inexact, while if disabled bare underflow errors will be raised for *any* (possibly exact) subnormal result.
+#define HALF_ERRHANDLING_UNDERFLOW_TO_INEXACT 1
+#endif
+
+/// Default rounding mode.
+/// This specifies the rounding mode used for all conversions between [half](\ref half_float::half)s and more precise types
+/// (unless using half_cast() and specifying the rounding mode directly) as well as in arithmetic operations and mathematical
+/// functions. It can be redefined (before including half.hpp) to one of the standard rounding modes using their respective
+/// constants or the equivalent values of
+/// [std::float_round_style](https://en.cppreference.com/w/cpp/types/numeric_limits/float_round_style):
+///
+/// `std::float_round_style` | value | rounding
+/// ---------------------------------|-------|-------------------------
+/// `std::round_indeterminate` | -1 | fastest
+/// `std::round_toward_zero` | 0 | toward zero
+/// `std::round_to_nearest` | 1 | to nearest (default)
+/// `std::round_toward_infinity` | 2 | toward positive infinity
+/// `std::round_toward_neg_infinity` | 3 | toward negative infinity
+///
+/// By default this is set to `1` (`std::round_to_nearest`), which rounds results to the nearest representable value. It can even
+/// be set to [std::numeric_limits<float>::round_style](https://en.cppreference.com/w/cpp/types/numeric_limits/round_style) to synchronize
+/// the rounding mode with that of the built-in single-precision implementation (which is likely `std::round_to_nearest`, though).
+#ifndef HALF_ROUND_STYLE
+ #define HALF_ROUND_STYLE 1 // = std::round_to_nearest
+#endif
+
+/// Value signaling overflow.
+/// In correspondence with `HUGE_VAL[F|L]` from `<cmath>` this symbol expands to a positive value signaling the overflow of an
+/// operation, in particular it just evaluates to positive infinity.
+///
+/// **See also:** Documentation for [HUGE_VAL](https://en.cppreference.com/w/cpp/numeric/math/HUGE_VAL)
+#define HUGE_VALH std::numeric_limits<half_float::half>::infinity()
+
+/// Fast half-precision fma function.
+/// This symbol is defined if the fma() function generally executes as fast as, or faster than, a separate
+/// half-precision multiplication followed by an addition, which is always the case.
+///
+/// **See also:** Documentation for [FP_FAST_FMA](https://en.cppreference.com/w/cpp/numeric/math/fma)
+#define FP_FAST_FMAH 1
+
+/// Half rounding mode.
+/// In correspondence with `FLT_ROUNDS` from `<cfloat>` this symbol expands to the rounding mode used for
+/// half-precision operations. It is an alias for [HALF_ROUND_STYLE](\ref HALF_ROUND_STYLE).
+///
+/// **See also:** Documentation for [FLT_ROUNDS](https://en.cppreference.com/w/cpp/types/climits/FLT_ROUNDS)
+#define HLF_ROUNDS HALF_ROUND_STYLE
+
+#ifndef FP_ILOGB0
+ #define FP_ILOGB0 INT_MIN
+#endif
+#ifndef FP_ILOGBNAN
+ #define FP_ILOGBNAN INT_MAX
+#endif
+#ifndef FP_SUBNORMAL
+ #define FP_SUBNORMAL 0
+#endif
+#ifndef FP_ZERO
+ #define FP_ZERO 1
+#endif
+#ifndef FP_NAN
+ #define FP_NAN 2
+#endif
+#ifndef FP_INFINITE
+ #define FP_INFINITE 3
+#endif
+#ifndef FP_NORMAL
+ #define FP_NORMAL 4
+#endif
+
+#if !HALF_ENABLE_CPP11_CFENV && !defined(FE_ALL_EXCEPT)
+ #define FE_INVALID 0x10
+ #define FE_DIVBYZERO 0x08
+ #define FE_OVERFLOW 0x04
+ #define FE_UNDERFLOW 0x02
+ #define FE_INEXACT 0x01
+ #define FE_ALL_EXCEPT (FE_INVALID|FE_DIVBYZERO|FE_OVERFLOW|FE_UNDERFLOW|FE_INEXACT)
+#endif
+
+
+/// Main namespace for half-precision functionality.
+/// This namespace contains all the functionality provided by the library.
+namespace half_float
+{
+ class half;
+
+#if HALF_ENABLE_CPP11_USER_LITERALS
+ /// Library-defined half-precision literals.
+ /// Import this namespace to enable half-precision floating-point literals:
+ /// ~~~~{.cpp}
+ /// using namespace half_float::literal;
+ /// half_float::half = 4.2_h;
+ /// ~~~~
+ namespace literal
+ {
+ half operator "" _h(long double);
+ }
+#endif
+
+ /// \internal
+ /// \brief Implementation details.
+ namespace detail
+ {
+ #if HALF_ENABLE_CPP11_TYPE_TRAITS
+ /// Conditional type.
+ template<bool B,typename T,typename F> struct conditional : std::conditional<B,T,F> {};
+
+ /// Helper for tag dispatching.
+ template<bool B> struct bool_type : std::integral_constant<bool,B> {};
+ using std::true_type;
+ using std::false_type;
+
+ /// Type traits for floating-point types.
+ template<typename T> struct is_float : std::is_floating_point<T> {};
+ #else
+ /// Conditional type.
+ template<bool,typename T,typename> struct conditional { typedef T type; };
+ template<typename T,typename F> struct conditional<false,T,F> { typedef F type; };
+
+ /// Helper for tag dispatching.
+ template<bool> struct bool_type {};
+ typedef bool_type<true> true_type;
+ typedef bool_type<false> false_type;
+
+ /// Type traits for floating-point types.
+ template<typename> struct is_float : false_type {};
+ template<typename T> struct is_float<const T> : is_float<T> {};
+ template<typename T> struct is_float<volatile T> : is_float<T> {};
+ template<typename T> struct is_float<const volatile T> : is_float<T> {};
+ template<> struct is_float<float> : true_type {};
+ template<> struct is_float<double> : true_type {};
+ template<> struct is_float<long double> : true_type {};
+ #endif
+
+ /// Type traits for floating-point bits.
+ template<typename T> struct bits { typedef unsigned char type; };
+ template<typename T> struct bits<const T> : bits<T> {};
+ template<typename T> struct bits<volatile T> : bits<T> {};
+ template<typename T> struct bits<const volatile T> : bits<T> {};
+
+ #if HALF_ENABLE_CPP11_CSTDINT
+ /// Unsigned integer of (at least) 16 bits width.
+ typedef std::uint_least16_t uint16;
+
+ /// Fastest unsigned integer of (at least) 32 bits width.
+ typedef std::uint_fast32_t uint32;
+
+ /// Fastest signed integer of (at least) 32 bits width.
+ typedef std::int_fast32_t int32;
+
+ /// Unsigned integer of (at least) 32 bits width.
+ template<> struct bits<float> { typedef std::uint_least32_t type; };
+
+ /// Unsigned integer of (at least) 64 bits width.
+ template<> struct bits<double> { typedef std::uint_least64_t type; };
+ #else
+ /// Unsigned integer of (at least) 16 bits width.
+ typedef unsigned short uint16;
+
+ /// Fastest unsigned integer of (at least) 32 bits width.
+ typedef unsigned long uint32;
+
+ /// Fastest unsigned integer of (at least) 32 bits width.
+ typedef long int32;
+
+ /// Unsigned integer of (at least) 32 bits width.
+ template<> struct bits<float> : conditional<std::numeric_limits<unsigned int>::digits>=32,unsigned int,unsigned long> {};
+
+ #if HALF_ENABLE_CPP11_LONG_LONG
+ /// Unsigned integer of (at least) 64 bits width.
+ template<> struct bits<double> : conditional<std::numeric_limits<unsigned long>::digits>=64,unsigned long,unsigned long long> {};
+ #else
+ /// Unsigned integer of (at least) 64 bits width.
+ template<> struct bits<double> { typedef unsigned long type; };
+ #endif
+ #endif
+
+ #ifdef HALF_ARITHMETIC_TYPE
+ /// Type to use for arithmetic computations and mathematic functions internally.
+ typedef HALF_ARITHMETIC_TYPE internal_t;
+ #endif
+
+ /// Tag type for binary construction.
+ struct binary_t {};
+
+ /// Tag for binary construction.
+ HALF_CONSTEXPR_CONST binary_t binary = binary_t();
+
+ /// \name Implementation defined classification and arithmetic
+ /// \{
+
+ /// Check for infinity.
+ /// \tparam T argument type (builtin floating-point type)
+ /// \param arg value to query
+ /// \retval true if infinity
+ /// \retval false else
+ template<typename T> bool builtin_isinf(T arg)
+ {
+ #if HALF_ENABLE_CPP11_CMATH
+ return std::isinf(arg);
+ #elif defined(_MSC_VER)
+ return !::_finite(static_cast<double>(arg)) && !::_isnan(static_cast<double>(arg));
+ #else
+ return arg == std::numeric_limits<T>::infinity() || arg == -std::numeric_limits<T>::infinity();
+ #endif
+ }
+
+ /// Check for NaN.
+ /// \tparam T argument type (builtin floating-point type)
+ /// \param arg value to query
+ /// \retval true if not a number
+ /// \retval false else
+ template<typename T> bool builtin_isnan(T arg)
+ {
+ #if HALF_ENABLE_CPP11_CMATH
+ return std::isnan(arg);
+ #elif defined(_MSC_VER)
+ return ::_isnan(static_cast<double>(arg)) != 0;
+ #else
+ return arg != arg;
+ #endif
+ }
+
+ /// Check sign.
+ /// \tparam T argument type (builtin floating-point type)
+ /// \param arg value to query
+ /// \retval true if signbit set
+ /// \retval false else
+ template<typename T> bool builtin_signbit(T arg)
+ {
+ #if HALF_ENABLE_CPP11_CMATH
+ return std::signbit(arg);
+ #else
+ return arg < T() || (arg == T() && T(1)/arg < T());
+ #endif
+ }
+
+ /// Platform-independent sign mask.
+ /// \param arg integer value in two's complement
+ /// \retval -1 if \a arg negative
+ /// \retval 0 if \a arg positive
+ inline uint32 sign_mask(uint32 arg)
+ {
+ static const int N = std::numeric_limits<uint32>::digits - 1;
+ #if HALF_TWOS_COMPLEMENT_INT
+ return static_cast<int32>(arg) >> N;
+ #else
+ return -((arg>>N)&1);
+ #endif
+ }
+
+ /// Platform-independent arithmetic right shift.
+ /// \param arg integer value in two's complement
+ /// \param i shift amount (at most 31)
+ /// \return \a arg right shifted for \a i bits with possible sign extension
+ inline uint32 arithmetic_shift(uint32 arg, int i)
+ {
+ #if HALF_TWOS_COMPLEMENT_INT
+ return static_cast<int32>(arg) >> i;
+ #else
+ return static_cast<int32>(arg)/(static_cast<int32>(1)<<i) - ((arg>>(std::numeric_limits<uint32>::digits-1))&1);
+ #endif
+ }
+
+ /// \}
+ /// \name Error handling
+ /// \{
+
+ /// Internal exception flags.
+ /// \return reference to global exception flags
+ inline int& errflags() { HALF_THREAD_LOCAL int flags = 0; return flags; }
+
+ /// Raise floating-point exception.
+ /// \param flags exceptions to raise
+ /// \param cond condition to raise exceptions for
+ inline void raise(int HALF_UNUSED_NOERR(flags), bool HALF_UNUSED_NOERR(cond) = true)
+ {
+ #if HALF_ERRHANDLING
+ if(!cond)
+ return;
+ #if HALF_ERRHANDLING_FLAGS
+ errflags() |= flags;
+ #endif
+ #if HALF_ERRHANDLING_ERRNO
+ if(flags & FE_INVALID)
+ errno = EDOM;
+ else if(flags & (FE_DIVBYZERO|FE_OVERFLOW|FE_UNDERFLOW))
+ errno = ERANGE;
+ #endif
+ #if HALF_ERRHANDLING_FENV && HALF_ENABLE_CPP11_CFENV
+ std::feraiseexcept(flags);
+ #endif
+ #ifdef HALF_ERRHANDLING_THROW_INVALID
+ if(flags & FE_INVALID)
+ throw std::domain_error(HALF_ERRHANDLING_THROW_INVALID);
+ #endif
+ #ifdef HALF_ERRHANDLING_THROW_DIVBYZERO
+ if(flags & FE_DIVBYZERO)
+ throw std::domain_error(HALF_ERRHANDLING_THROW_DIVBYZERO);
+ #endif
+ #ifdef HALF_ERRHANDLING_THROW_OVERFLOW
+ if(flags & FE_OVERFLOW)
+ throw std::overflow_error(HALF_ERRHANDLING_THROW_OVERFLOW);
+ #endif
+ #ifdef HALF_ERRHANDLING_THROW_UNDERFLOW
+ if(flags & FE_UNDERFLOW)
+ throw std::underflow_error(HALF_ERRHANDLING_THROW_UNDERFLOW);
+ #endif
+ #ifdef HALF_ERRHANDLING_THROW_INEXACT
+ if(flags & FE_INEXACT)
+ throw std::range_error(HALF_ERRHANDLING_THROW_INEXACT);
+ #endif
+ #if HALF_ERRHANDLING_UNDERFLOW_TO_INEXACT
+ if((flags & FE_UNDERFLOW) && !(flags & FE_INEXACT))
+ raise(FE_INEXACT);
+ #endif
+ #if HALF_ERRHANDLING_OVERFLOW_TO_INEXACT
+ if((flags & FE_OVERFLOW) && !(flags & FE_INEXACT))
+ raise(FE_INEXACT);
+ #endif
+ #endif
+ }
+
+ /// Check and signal for any NaN.
+ /// \param x first half-precision value to check
+ /// \param y second half-precision value to check
+ /// \retval true if either \a x or \a y is NaN
+ /// \retval false else
+ /// \exception FE_INVALID if \a x or \a y is NaN
+ inline HALF_CONSTEXPR_NOERR bool compsignal(unsigned int x, unsigned int y)
+ {
+ #if HALF_ERRHANDLING
+ raise(FE_INVALID, (x&0x7FFF)>0x7C00 || (y&0x7FFF)>0x7C00);
+ #endif
+ return (x&0x7FFF) > 0x7C00 || (y&0x7FFF) > 0x7C00;
+ }
+
+ /// Signal and silence signaling NaN.
+ /// \param nan half-precision NaN value
+ /// \return quiet NaN
+ /// \exception FE_INVALID if \a nan is signaling NaN
+ inline HALF_CONSTEXPR_NOERR unsigned int signal(unsigned int nan)
+ {
+ #if HALF_ERRHANDLING
+ raise(FE_INVALID, !(nan&0x200));
+ #endif
+ return nan | 0x200;
+ }
+
+ /// Signal and silence signaling NaNs.
+ /// \param x first half-precision value to check
+ /// \param y second half-precision value to check
+ /// \return quiet NaN
+ /// \exception FE_INVALID if \a x or \a y is signaling NaN
+ inline HALF_CONSTEXPR_NOERR unsigned int signal(unsigned int x, unsigned int y)
+ {
+ #if HALF_ERRHANDLING
+ raise(FE_INVALID, ((x&0x7FFF)>0x7C00 && !(x&0x200)) || ((y&0x7FFF)>0x7C00 && !(y&0x200)));
+ #endif
+ return ((x&0x7FFF)>0x7C00) ? (x|0x200) : (y|0x200);
+ }
+
+ /// Signal and silence signaling NaNs.
+ /// \param x first half-precision value to check
+ /// \param y second half-precision value to check
+ /// \param z third half-precision value to check
+ /// \return quiet NaN
+ /// \exception FE_INVALID if \a x, \a y or \a z is signaling NaN
+ inline HALF_CONSTEXPR_NOERR unsigned int signal(unsigned int x, unsigned int y, unsigned int z)
+ {
+ #if HALF_ERRHANDLING
+ raise(FE_INVALID, ((x&0x7FFF)>0x7C00 && !(x&0x200)) || ((y&0x7FFF)>0x7C00 && !(y&0x200)) || ((z&0x7FFF)>0x7C00 && !(z&0x200)));
+ #endif
+ return ((x&0x7FFF)>0x7C00) ? (x|0x200) : ((y&0x7FFF)>0x7C00) ? (y|0x200) : (z|0x200);
+ }
+
+ /// Select value or signaling NaN.
+ /// \param x preferred half-precision value
+ /// \param y ignored half-precision value except for signaling NaN
+ /// \return \a y if signaling NaN, \a x otherwise
+ /// \exception FE_INVALID if \a y is signaling NaN
+ inline HALF_CONSTEXPR_NOERR unsigned int select(unsigned int x, unsigned int HALF_UNUSED_NOERR(y))
+ {
+ #if HALF_ERRHANDLING
+ return (((y&0x7FFF)>0x7C00) && !(y&0x200)) ? signal(y) : x;
+ #else
+ return x;
+ #endif
+ }
+
+ /// Raise domain error and return NaN.
+ /// return quiet NaN
+ /// \exception FE_INVALID
+ inline HALF_CONSTEXPR_NOERR unsigned int invalid()
+ {
+ #if HALF_ERRHANDLING
+ raise(FE_INVALID);
+ #endif
+ return 0x7FFF;
+ }
+
+ /// Raise pole error and return infinity.
+ /// \param sign half-precision value with sign bit only
+ /// \return half-precision infinity with sign of \a sign
+ /// \exception FE_DIVBYZERO
+ inline HALF_CONSTEXPR_NOERR unsigned int pole(unsigned int sign = 0)
+ {
+ #if HALF_ERRHANDLING
+ raise(FE_DIVBYZERO);
+ #endif
+ return sign | 0x7C00;
+ }
+
+ /// Check value for underflow.
+ /// \param arg non-zero half-precision value to check
+ /// \return \a arg
+ /// \exception FE_UNDERFLOW if arg is subnormal
+ inline HALF_CONSTEXPR_NOERR unsigned int check_underflow(unsigned int arg)
+ {
+ #if HALF_ERRHANDLING && !HALF_ERRHANDLING_UNDERFLOW_TO_INEXACT
+ raise(FE_UNDERFLOW, !(arg&0x7C00));
+ #endif
+ return arg;
+ }
+
+ /// \}
+ /// \name Conversion and rounding
+ /// \{
+
+ /// Half-precision overflow.
+ /// \tparam R rounding mode to use
+ /// \param sign half-precision value with sign bit only
+ /// \return rounded overflowing half-precision value
+ /// \exception FE_OVERFLOW
+ template<std::float_round_style R> HALF_CONSTEXPR_NOERR unsigned int overflow(unsigned int sign = 0)
+ {
+ #if HALF_ERRHANDLING
+ raise(FE_OVERFLOW);
+ #endif
+ return (R==std::round_toward_infinity) ? (sign+0x7C00-(sign>>15)) :
+ (R==std::round_toward_neg_infinity) ? (sign+0x7BFF+(sign>>15)) :
+ (R==std::round_toward_zero) ? (sign|0x7BFF) :
+ (sign|0x7C00);
+ }
+
+ /// Half-precision underflow.
+ /// \tparam R rounding mode to use
+ /// \param sign half-precision value with sign bit only
+ /// \return rounded underflowing half-precision value
+ /// \exception FE_UNDERFLOW
+ template<std::float_round_style R> HALF_CONSTEXPR_NOERR unsigned int underflow(unsigned int sign = 0)
+ {
+ #if HALF_ERRHANDLING
+ raise(FE_UNDERFLOW);
+ #endif
+ return (R==std::round_toward_infinity) ? (sign+1-(sign>>15)) :
+ (R==std::round_toward_neg_infinity) ? (sign+(sign>>15)) :
+ sign;
+ }
+
+ /// Round half-precision number.
+ /// \tparam R rounding mode to use
+ /// \tparam I `true` to always raise INEXACT exception, `false` to raise only for rounded results
+ /// \param value finite half-precision number to round
+ /// \param g guard bit (most significant discarded bit)
+ /// \param s sticky bit (or of all but the most significant discarded bits)
+ /// \return rounded half-precision value
+ /// \exception FE_OVERFLOW on overflows
+ /// \exception FE_UNDERFLOW on underflows
+ /// \exception FE_INEXACT if value had to be rounded or \a I is `true`
+ template<std::float_round_style R,bool I> HALF_CONSTEXPR_NOERR unsigned int rounded(unsigned int value, int g, int s)
+ {
+ #if HALF_ERRHANDLING
+ value += (R==std::round_to_nearest) ? (g&(s|value)) :
+ (R==std::round_toward_infinity) ? (~(value>>15)&(g|s)) :
+ (R==std::round_toward_neg_infinity) ? ((value>>15)&(g|s)) : 0;
+ if((value&0x7C00) == 0x7C00)
+ raise(FE_OVERFLOW);
+ else if(value & 0x7C00)
+ raise(FE_INEXACT, I || (g|s)!=0);
+ else
+ raise(FE_UNDERFLOW, !(HALF_ERRHANDLING_UNDERFLOW_TO_INEXACT) || I || (g|s)!=0);
+ return value;
+ #else
+ return (R==std::round_to_nearest) ? (value+(g&(s|value))) :
+ (R==std::round_toward_infinity) ? (value+(~(value>>15)&(g|s))) :
+ (R==std::round_toward_neg_infinity) ? (value+((value>>15)&(g|s))) :
+ value;
+ #endif
+ }
+
+ /// Round half-precision number to nearest integer value.
+ /// \tparam R rounding mode to use
+ /// \tparam E `true` for round to even, `false` for round away from zero
+ /// \tparam I `true` to raise INEXACT exception (if inexact), `false` to never raise it
+ /// \param value half-precision value to round
+ /// \return half-precision bits for nearest integral value
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_INEXACT if value had to be rounded and \a I is `true`
+ template<std::float_round_style R,bool E,bool I> unsigned int integral(unsigned int value)
+ {
+ unsigned int abs = value & 0x7FFF;
+ if(abs < 0x3C00)
+ {
+ raise(FE_INEXACT, I);
+ return ((R==std::round_to_nearest) ? (0x3C00&-static_cast<unsigned>(abs>=(0x3800+E))) :
+ (R==std::round_toward_infinity) ? (0x3C00&-(~(value>>15)&(abs!=0))) :
+ (R==std::round_toward_neg_infinity) ? (0x3C00&-static_cast<unsigned>(value>0x8000)) :
+ 0) | (value&0x8000);
+ }
+ if(abs >= 0x6400)
+ return (abs>0x7C00) ? signal(value) : value;
+ unsigned int exp = 25 - (abs>>10), mask = (1<<exp) - 1;
+ raise(FE_INEXACT, I && (value&mask));
+ return (( (R==std::round_to_nearest) ? ((1<<(exp-1))-(~(value>>exp)&E)) :
+ (R==std::round_toward_infinity) ? (mask&((value>>15)-1)) :
+ (R==std::round_toward_neg_infinity) ? (mask&-(value>>15)) :
+ 0) + value) & ~mask;
+ }
+
+ /// Convert fixed point to half-precision floating-point.
+ /// \tparam R rounding mode to use
+ /// \tparam F number of fractional bits in [11,31]
+ /// \tparam S `true` for signed, `false` for unsigned
+ /// \tparam N `true` for additional normalization step, `false` if already normalized to 1.F
+ /// \tparam I `true` to always raise INEXACT exception, `false` to raise only for rounded results
+ /// \param m mantissa in Q1.F fixed point format
+ /// \param exp biased exponent - 1
+ /// \param sign half-precision value with sign bit only
+ /// \param s sticky bit (or of all but the most significant already discarded bits)
+ /// \return value converted to half-precision
+ /// \exception FE_OVERFLOW on overflows
+ /// \exception FE_UNDERFLOW on underflows
+ /// \exception FE_INEXACT if value had to be rounded or \a I is `true`
+ template<std::float_round_style R,unsigned int F,bool S,bool N,bool I> unsigned int fixed2half(uint32 m, int exp = 14, unsigned int sign = 0, int s = 0)
+ {
+ if(S)
+ {
+ uint32 msign = sign_mask(m);
+ m = (m^msign) - msign;
+ sign = msign & 0x8000;
+ }
+ if(N)
+ for(; m<(static_cast<uint32>(1)<<F) && exp; m<<=1,--exp) ;
+ else if(exp < 0)
+ return rounded<R,I>(sign+(m>>(F-10-exp)), (m>>(F-11-exp))&1, s|((m&((static_cast<uint32>(1)<<(F-11-exp))-1))!=0));
+ return rounded<R,I>(sign+(exp<<10)+(m>>(F-10)), (m>>(F-11))&1, s|((m&((static_cast<uint32>(1)<<(F-11))-1))!=0));
+ }
+
+ /// Convert IEEE single-precision to half-precision.
+ /// Credit for this goes to [Jeroen van der Zijp](ftp://ftp.fox-toolkit.org/pub/fasthalffloatconversion.pdf).
+ /// \tparam R rounding mode to use
+ /// \param value single-precision value to convert
+ /// \return rounded half-precision value
+ /// \exception FE_OVERFLOW on overflows
+ /// \exception FE_UNDERFLOW on underflows
+ /// \exception FE_INEXACT if value had to be rounded
+ template<std::float_round_style R> unsigned int float2half_impl(float value, true_type)
+ {
+ #if HALF_ENABLE_F16C_INTRINSICS
+ return _mm_cvtsi128_si32(_mm_cvtps_ph(_mm_set_ss(value),
+ (R==std::round_to_nearest) ? _MM_FROUND_TO_NEAREST_INT :
+ (R==std::round_toward_zero) ? _MM_FROUND_TO_ZERO :
+ (R==std::round_toward_infinity) ? _MM_FROUND_TO_POS_INF :
+ (R==std::round_toward_neg_infinity) ? _MM_FROUND_TO_NEG_INF :
+ _MM_FROUND_CUR_DIRECTION));
+ #else
+ bits<float>::type fbits;
+ std::memcpy(&fbits, &value, sizeof(float));
+ #if 1
+ unsigned int sign = (fbits>>16) & 0x8000;
+ fbits &= 0x7FFFFFFF;
+ if(fbits >= 0x7F800000)
+ return sign | 0x7C00 | ((fbits>0x7F800000) ? (0x200|((fbits>>13)&0x3FF)) : 0);
+ if(fbits >= 0x47800000)
+ return overflow<R>(sign);
+ if(fbits >= 0x38800000)
+ return rounded<R,false>(sign|(((fbits>>23)-112)<<10)|((fbits>>13)&0x3FF), (fbits>>12)&1, (fbits&0xFFF)!=0);
+ if(fbits >= 0x33000000)
+ {
+ int i = 125 - (fbits>>23);
+ fbits = (fbits&0x7FFFFF) | 0x800000;
+ return rounded<R,false>(sign|(fbits>>(i+1)), (fbits>>i)&1, (fbits&((static_cast<uint32>(1)<<i)-1))!=0);
+ }
+ if(fbits != 0)
+ return underflow<R>(sign);
+ return sign;
+ #else
+ static const uint16 base_table[512] = {
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, 0x0100,
+ 0x0200, 0x0400, 0x0800, 0x0C00, 0x1000, 0x1400, 0x1800, 0x1C00, 0x2000, 0x2400, 0x2800, 0x2C00, 0x3000, 0x3400, 0x3800, 0x3C00,
+ 0x4000, 0x4400, 0x4800, 0x4C00, 0x5000, 0x5400, 0x5800, 0x5C00, 0x6000, 0x6400, 0x6800, 0x6C00, 0x7000, 0x7400, 0x7800, 0x7BFF,
+ 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF,
+ 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF,
+ 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF,
+ 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF,
+ 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF,
+ 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF,
+ 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7BFF, 0x7C00,
+ 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
+ 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
+ 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
+ 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
+ 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
+ 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
+ 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8001, 0x8002, 0x8004, 0x8008, 0x8010, 0x8020, 0x8040, 0x8080, 0x8100,
+ 0x8200, 0x8400, 0x8800, 0x8C00, 0x9000, 0x9400, 0x9800, 0x9C00, 0xA000, 0xA400, 0xA800, 0xAC00, 0xB000, 0xB400, 0xB800, 0xBC00,
+ 0xC000, 0xC400, 0xC800, 0xCC00, 0xD000, 0xD400, 0xD800, 0xDC00, 0xE000, 0xE400, 0xE800, 0xEC00, 0xF000, 0xF400, 0xF800, 0xFBFF,
+ 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF,
+ 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF,
+ 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF,
+ 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF,
+ 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF,
+ 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF,
+ 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFBFF, 0xFC00 };
+ static const unsigned char shift_table[256] = {
+ 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 13 };
+ int sexp = fbits >> 23, exp = sexp & 0xFF, i = shift_table[exp];
+ fbits &= 0x7FFFFF;
+ uint32 m = (fbits|((exp!=0)<<23)) & -static_cast<uint32>(exp!=0xFF);
+ return rounded<R,false>(base_table[sexp]+(fbits>>i), (m>>(i-1))&1, (((static_cast<uint32>(1)<<(i-1))-1)&m)!=0);
+ #endif
+ #endif
+ }
+
+ /// Convert IEEE double-precision to half-precision.
+ /// \tparam R rounding mode to use
+ /// \param value double-precision value to convert
+ /// \return rounded half-precision value
+ /// \exception FE_OVERFLOW on overflows
+ /// \exception FE_UNDERFLOW on underflows
+ /// \exception FE_INEXACT if value had to be rounded
+ template<std::float_round_style R> unsigned int float2half_impl(double value, true_type)
+ {
+ #if HALF_ENABLE_F16C_INTRINSICS
+ if(R == std::round_indeterminate)
+ return _mm_cvtsi128_si32(_mm_cvtps_ph(_mm_cvtpd_ps(_mm_set_sd(value)), _MM_FROUND_CUR_DIRECTION));
+ #endif
+ bits<double>::type dbits;
+ std::memcpy(&dbits, &value, sizeof(double));
+ uint32 hi = dbits >> 32, lo = dbits & 0xFFFFFFFF;
+ unsigned int sign = (hi>>16) & 0x8000;
+ hi &= 0x7FFFFFFF;
+ if(hi >= 0x7FF00000)
+ return sign | 0x7C00 | ((dbits&0xFFFFFFFFFFFFF) ? (0x200|((hi>>10)&0x3FF)) : 0);
+ if(hi >= 0x40F00000)
+ return overflow<R>(sign);
+ if(hi >= 0x3F100000)
+ return rounded<R,false>(sign|(((hi>>20)-1008)<<10)|((hi>>10)&0x3FF), (hi>>9)&1, ((hi&0x1FF)|lo)!=0);
+ if(hi >= 0x3E600000)
+ {
+ int i = 1018 - (hi>>20);
+ hi = (hi&0xFFFFF) | 0x100000;
+ return rounded<R,false>(sign|(hi>>(i+1)), (hi>>i)&1, ((hi&((static_cast<uint32>(1)<<i)-1))|lo)!=0);
+ }
+ if((hi|lo) != 0)
+ return underflow<R>(sign);
+ return sign;
+ }
+
+ /// Convert non-IEEE floating-point to half-precision.
+ /// \tparam R rounding mode to use
+ /// \tparam T source type (builtin floating-point type)
+ /// \param value floating-point value to convert
+ /// \return rounded half-precision value
+ /// \exception FE_OVERFLOW on overflows
+ /// \exception FE_UNDERFLOW on underflows
+ /// \exception FE_INEXACT if value had to be rounded
+ template<std::float_round_style R,typename T> unsigned int float2half_impl(T value, ...)
+ {
+ unsigned int hbits = static_cast<unsigned>(builtin_signbit(value)) << 15;
+ if(value == T())
+ return hbits;
+ if(builtin_isnan(value))
+ return hbits | 0x7FFF;
+ if(builtin_isinf(value))
+ return hbits | 0x7C00;
+ int exp;
+ std::frexp(value, &exp);
+ if(exp > 16)
+ return overflow<R>(hbits);
+ if(exp < -13)
+ value = std::ldexp(value, 25);
+ else
+ {
+ value = std::ldexp(value, 12-exp);
+ hbits |= ((exp+13)<<10);
+ }
+ T ival, frac = std::modf(value, &ival);
+ int m = std::abs(static_cast<int>(ival));
+ return rounded<R,false>(hbits+(m>>1), m&1, frac!=T());
+ }
+
+ /// Convert floating-point to half-precision.
+ /// \tparam R rounding mode to use
+ /// \tparam T source type (builtin floating-point type)
+ /// \param value floating-point value to convert
+ /// \return rounded half-precision value
+ /// \exception FE_OVERFLOW on overflows
+ /// \exception FE_UNDERFLOW on underflows
+ /// \exception FE_INEXACT if value had to be rounded
+ template<std::float_round_style R,typename T> unsigned int float2half(T value)
+ {
+ return float2half_impl<R>(value, bool_type<std::numeric_limits<T>::is_iec559&&sizeof(typename bits<T>::type)==sizeof(T)>());
+ }
+
+ /// Convert integer to half-precision floating-point.
+ /// \tparam R rounding mode to use
+ /// \tparam T type to convert (builtin integer type)
+ /// \param value integral value to convert
+ /// \return rounded half-precision value
+ /// \exception FE_OVERFLOW on overflows
+ /// \exception FE_INEXACT if value had to be rounded
+ template<std::float_round_style R,typename T> unsigned int int2half(T value)
+ {
+ unsigned int bits = static_cast<unsigned>(value<0) << 15;
+ if(!value)
+ return bits;
+ if(bits)
+ value = -value;
+ if(value > 0xFFFF)
+ return overflow<R>(bits);
+ unsigned int m = static_cast<unsigned int>(value), exp = 24;
+ for(; m<0x400; m<<=1,--exp) ;
+ for(; m>0x7FF; m>>=1,++exp) ;
+ bits |= (exp<<10) + m;
+ return (exp>24) ? rounded<R,false>(bits, (value>>(exp-25))&1, (((1<<(exp-25))-1)&value)!=0) : bits;
+ }
+
+ /// Convert half-precision to IEEE single-precision.
+ /// Credit for this goes to [Jeroen van der Zijp](ftp://ftp.fox-toolkit.org/pub/fasthalffloatconversion.pdf).
+ /// \param value half-precision value to convert
+ /// \return single-precision value
+ inline float half2float_impl(unsigned int value, float, true_type)
+ {
+ #if HALF_ENABLE_F16C_INTRINSICS
+ return _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(value)));
+ #else
+ #if 0
+ bits<float>::type fbits = static_cast<bits<float>::type>(value&0x8000) << 16;
+ int abs = value & 0x7FFF;
+ if(abs)
+ {
+ fbits |= 0x38000000 << static_cast<unsigned>(abs>=0x7C00);
+ for(; abs<0x400; abs<<=1,fbits-=0x800000) ;
+ fbits += static_cast<bits<float>::type>(abs) << 13;
+ }
+ #else
+ static const bits<float>::type mantissa_table[2048] = {
+ 0x00000000, 0x33800000, 0x34000000, 0x34400000, 0x34800000, 0x34A00000, 0x34C00000, 0x34E00000, 0x35000000, 0x35100000, 0x35200000, 0x35300000, 0x35400000, 0x35500000, 0x35600000, 0x35700000,
+ 0x35800000, 0x35880000, 0x35900000, 0x35980000, 0x35A00000, 0x35A80000, 0x35B00000, 0x35B80000, 0x35C00000, 0x35C80000, 0x35D00000, 0x35D80000, 0x35E00000, 0x35E80000, 0x35F00000, 0x35F80000,
+ 0x36000000, 0x36040000, 0x36080000, 0x360C0000, 0x36100000, 0x36140000, 0x36180000, 0x361C0000, 0x36200000, 0x36240000, 0x36280000, 0x362C0000, 0x36300000, 0x36340000, 0x36380000, 0x363C0000,
+ 0x36400000, 0x36440000, 0x36480000, 0x364C0000, 0x36500000, 0x36540000, 0x36580000, 0x365C0000, 0x36600000, 0x36640000, 0x36680000, 0x366C0000, 0x36700000, 0x36740000, 0x36780000, 0x367C0000,
+ 0x36800000, 0x36820000, 0x36840000, 0x36860000, 0x36880000, 0x368A0000, 0x368C0000, 0x368E0000, 0x36900000, 0x36920000, 0x36940000, 0x36960000, 0x36980000, 0x369A0000, 0x369C0000, 0x369E0000,
+ 0x36A00000, 0x36A20000, 0x36A40000, 0x36A60000, 0x36A80000, 0x36AA0000, 0x36AC0000, 0x36AE0000, 0x36B00000, 0x36B20000, 0x36B40000, 0x36B60000, 0x36B80000, 0x36BA0000, 0x36BC0000, 0x36BE0000,
+ 0x36C00000, 0x36C20000, 0x36C40000, 0x36C60000, 0x36C80000, 0x36CA0000, 0x36CC0000, 0x36CE0000, 0x36D00000, 0x36D20000, 0x36D40000, 0x36D60000, 0x36D80000, 0x36DA0000, 0x36DC0000, 0x36DE0000,
+ 0x36E00000, 0x36E20000, 0x36E40000, 0x36E60000, 0x36E80000, 0x36EA0000, 0x36EC0000, 0x36EE0000, 0x36F00000, 0x36F20000, 0x36F40000, 0x36F60000, 0x36F80000, 0x36FA0000, 0x36FC0000, 0x36FE0000,
+ 0x37000000, 0x37010000, 0x37020000, 0x37030000, 0x37040000, 0x37050000, 0x37060000, 0x37070000, 0x37080000, 0x37090000, 0x370A0000, 0x370B0000, 0x370C0000, 0x370D0000, 0x370E0000, 0x370F0000,
+ 0x37100000, 0x37110000, 0x37120000, 0x37130000, 0x37140000, 0x37150000, 0x37160000, 0x37170000, 0x37180000, 0x37190000, 0x371A0000, 0x371B0000, 0x371C0000, 0x371D0000, 0x371E0000, 0x371F0000,
+ 0x37200000, 0x37210000, 0x37220000, 0x37230000, 0x37240000, 0x37250000, 0x37260000, 0x37270000, 0x37280000, 0x37290000, 0x372A0000, 0x372B0000, 0x372C0000, 0x372D0000, 0x372E0000, 0x372F0000,
+ 0x37300000, 0x37310000, 0x37320000, 0x37330000, 0x37340000, 0x37350000, 0x37360000, 0x37370000, 0x37380000, 0x37390000, 0x373A0000, 0x373B0000, 0x373C0000, 0x373D0000, 0x373E0000, 0x373F0000,
+ 0x37400000, 0x37410000, 0x37420000, 0x37430000, 0x37440000, 0x37450000, 0x37460000, 0x37470000, 0x37480000, 0x37490000, 0x374A0000, 0x374B0000, 0x374C0000, 0x374D0000, 0x374E0000, 0x374F0000,
+ 0x37500000, 0x37510000, 0x37520000, 0x37530000, 0x37540000, 0x37550000, 0x37560000, 0x37570000, 0x37580000, 0x37590000, 0x375A0000, 0x375B0000, 0x375C0000, 0x375D0000, 0x375E0000, 0x375F0000,
+ 0x37600000, 0x37610000, 0x37620000, 0x37630000, 0x37640000, 0x37650000, 0x37660000, 0x37670000, 0x37680000, 0x37690000, 0x376A0000, 0x376B0000, 0x376C0000, 0x376D0000, 0x376E0000, 0x376F0000,
+ 0x37700000, 0x37710000, 0x37720000, 0x37730000, 0x37740000, 0x37750000, 0x37760000, 0x37770000, 0x37780000, 0x37790000, 0x377A0000, 0x377B0000, 0x377C0000, 0x377D0000, 0x377E0000, 0x377F0000,
+ 0x37800000, 0x37808000, 0x37810000, 0x37818000, 0x37820000, 0x37828000, 0x37830000, 0x37838000, 0x37840000, 0x37848000, 0x37850000, 0x37858000, 0x37860000, 0x37868000, 0x37870000, 0x37878000,
+ 0x37880000, 0x37888000, 0x37890000, 0x37898000, 0x378A0000, 0x378A8000, 0x378B0000, 0x378B8000, 0x378C0000, 0x378C8000, 0x378D0000, 0x378D8000, 0x378E0000, 0x378E8000, 0x378F0000, 0x378F8000,
+ 0x37900000, 0x37908000, 0x37910000, 0x37918000, 0x37920000, 0x37928000, 0x37930000, 0x37938000, 0x37940000, 0x37948000, 0x37950000, 0x37958000, 0x37960000, 0x37968000, 0x37970000, 0x37978000,
+ 0x37980000, 0x37988000, 0x37990000, 0x37998000, 0x379A0000, 0x379A8000, 0x379B0000, 0x379B8000, 0x379C0000, 0x379C8000, 0x379D0000, 0x379D8000, 0x379E0000, 0x379E8000, 0x379F0000, 0x379F8000,
+ 0x37A00000, 0x37A08000, 0x37A10000, 0x37A18000, 0x37A20000, 0x37A28000, 0x37A30000, 0x37A38000, 0x37A40000, 0x37A48000, 0x37A50000, 0x37A58000, 0x37A60000, 0x37A68000, 0x37A70000, 0x37A78000,
+ 0x37A80000, 0x37A88000, 0x37A90000, 0x37A98000, 0x37AA0000, 0x37AA8000, 0x37AB0000, 0x37AB8000, 0x37AC0000, 0x37AC8000, 0x37AD0000, 0x37AD8000, 0x37AE0000, 0x37AE8000, 0x37AF0000, 0x37AF8000,
+ 0x37B00000, 0x37B08000, 0x37B10000, 0x37B18000, 0x37B20000, 0x37B28000, 0x37B30000, 0x37B38000, 0x37B40000, 0x37B48000, 0x37B50000, 0x37B58000, 0x37B60000, 0x37B68000, 0x37B70000, 0x37B78000,
+ 0x37B80000, 0x37B88000, 0x37B90000, 0x37B98000, 0x37BA0000, 0x37BA8000, 0x37BB0000, 0x37BB8000, 0x37BC0000, 0x37BC8000, 0x37BD0000, 0x37BD8000, 0x37BE0000, 0x37BE8000, 0x37BF0000, 0x37BF8000,
+ 0x37C00000, 0x37C08000, 0x37C10000, 0x37C18000, 0x37C20000, 0x37C28000, 0x37C30000, 0x37C38000, 0x37C40000, 0x37C48000, 0x37C50000, 0x37C58000, 0x37C60000, 0x37C68000, 0x37C70000, 0x37C78000,
+ 0x37C80000, 0x37C88000, 0x37C90000, 0x37C98000, 0x37CA0000, 0x37CA8000, 0x37CB0000, 0x37CB8000, 0x37CC0000, 0x37CC8000, 0x37CD0000, 0x37CD8000, 0x37CE0000, 0x37CE8000, 0x37CF0000, 0x37CF8000,
+ 0x37D00000, 0x37D08000, 0x37D10000, 0x37D18000, 0x37D20000, 0x37D28000, 0x37D30000, 0x37D38000, 0x37D40000, 0x37D48000, 0x37D50000, 0x37D58000, 0x37D60000, 0x37D68000, 0x37D70000, 0x37D78000,
+ 0x37D80000, 0x37D88000, 0x37D90000, 0x37D98000, 0x37DA0000, 0x37DA8000, 0x37DB0000, 0x37DB8000, 0x37DC0000, 0x37DC8000, 0x37DD0000, 0x37DD8000, 0x37DE0000, 0x37DE8000, 0x37DF0000, 0x37DF8000,
+ 0x37E00000, 0x37E08000, 0x37E10000, 0x37E18000, 0x37E20000, 0x37E28000, 0x37E30000, 0x37E38000, 0x37E40000, 0x37E48000, 0x37E50000, 0x37E58000, 0x37E60000, 0x37E68000, 0x37E70000, 0x37E78000,
+ 0x37E80000, 0x37E88000, 0x37E90000, 0x37E98000, 0x37EA0000, 0x37EA8000, 0x37EB0000, 0x37EB8000, 0x37EC0000, 0x37EC8000, 0x37ED0000, 0x37ED8000, 0x37EE0000, 0x37EE8000, 0x37EF0000, 0x37EF8000,
+ 0x37F00000, 0x37F08000, 0x37F10000, 0x37F18000, 0x37F20000, 0x37F28000, 0x37F30000, 0x37F38000, 0x37F40000, 0x37F48000, 0x37F50000, 0x37F58000, 0x37F60000, 0x37F68000, 0x37F70000, 0x37F78000,
+ 0x37F80000, 0x37F88000, 0x37F90000, 0x37F98000, 0x37FA0000, 0x37FA8000, 0x37FB0000, 0x37FB8000, 0x37FC0000, 0x37FC8000, 0x37FD0000, 0x37FD8000, 0x37FE0000, 0x37FE8000, 0x37FF0000, 0x37FF8000,
+ 0x38000000, 0x38004000, 0x38008000, 0x3800C000, 0x38010000, 0x38014000, 0x38018000, 0x3801C000, 0x38020000, 0x38024000, 0x38028000, 0x3802C000, 0x38030000, 0x38034000, 0x38038000, 0x3803C000,
+ 0x38040000, 0x38044000, 0x38048000, 0x3804C000, 0x38050000, 0x38054000, 0x38058000, 0x3805C000, 0x38060000, 0x38064000, 0x38068000, 0x3806C000, 0x38070000, 0x38074000, 0x38078000, 0x3807C000,
+ 0x38080000, 0x38084000, 0x38088000, 0x3808C000, 0x38090000, 0x38094000, 0x38098000, 0x3809C000, 0x380A0000, 0x380A4000, 0x380A8000, 0x380AC000, 0x380B0000, 0x380B4000, 0x380B8000, 0x380BC000,
+ 0x380C0000, 0x380C4000, 0x380C8000, 0x380CC000, 0x380D0000, 0x380D4000, 0x380D8000, 0x380DC000, 0x380E0000, 0x380E4000, 0x380E8000, 0x380EC000, 0x380F0000, 0x380F4000, 0x380F8000, 0x380FC000,
+ 0x38100000, 0x38104000, 0x38108000, 0x3810C000, 0x38110000, 0x38114000, 0x38118000, 0x3811C000, 0x38120000, 0x38124000, 0x38128000, 0x3812C000, 0x38130000, 0x38134000, 0x38138000, 0x3813C000,
+ 0x38140000, 0x38144000, 0x38148000, 0x3814C000, 0x38150000, 0x38154000, 0x38158000, 0x3815C000, 0x38160000, 0x38164000, 0x38168000, 0x3816C000, 0x38170000, 0x38174000, 0x38178000, 0x3817C000,
+ 0x38180000, 0x38184000, 0x38188000, 0x3818C000, 0x38190000, 0x38194000, 0x38198000, 0x3819C000, 0x381A0000, 0x381A4000, 0x381A8000, 0x381AC000, 0x381B0000, 0x381B4000, 0x381B8000, 0x381BC000,
+ 0x381C0000, 0x381C4000, 0x381C8000, 0x381CC000, 0x381D0000, 0x381D4000, 0x381D8000, 0x381DC000, 0x381E0000, 0x381E4000, 0x381E8000, 0x381EC000, 0x381F0000, 0x381F4000, 0x381F8000, 0x381FC000,
+ 0x38200000, 0x38204000, 0x38208000, 0x3820C000, 0x38210000, 0x38214000, 0x38218000, 0x3821C000, 0x38220000, 0x38224000, 0x38228000, 0x3822C000, 0x38230000, 0x38234000, 0x38238000, 0x3823C000,
+ 0x38240000, 0x38244000, 0x38248000, 0x3824C000, 0x38250000, 0x38254000, 0x38258000, 0x3825C000, 0x38260000, 0x38264000, 0x38268000, 0x3826C000, 0x38270000, 0x38274000, 0x38278000, 0x3827C000,
+ 0x38280000, 0x38284000, 0x38288000, 0x3828C000, 0x38290000, 0x38294000, 0x38298000, 0x3829C000, 0x382A0000, 0x382A4000, 0x382A8000, 0x382AC000, 0x382B0000, 0x382B4000, 0x382B8000, 0x382BC000,
+ 0x382C0000, 0x382C4000, 0x382C8000, 0x382CC000, 0x382D0000, 0x382D4000, 0x382D8000, 0x382DC000, 0x382E0000, 0x382E4000, 0x382E8000, 0x382EC000, 0x382F0000, 0x382F4000, 0x382F8000, 0x382FC000,
+ 0x38300000, 0x38304000, 0x38308000, 0x3830C000, 0x38310000, 0x38314000, 0x38318000, 0x3831C000, 0x38320000, 0x38324000, 0x38328000, 0x3832C000, 0x38330000, 0x38334000, 0x38338000, 0x3833C000,
+ 0x38340000, 0x38344000, 0x38348000, 0x3834C000, 0x38350000, 0x38354000, 0x38358000, 0x3835C000, 0x38360000, 0x38364000, 0x38368000, 0x3836C000, 0x38370000, 0x38374000, 0x38378000, 0x3837C000,
+ 0x38380000, 0x38384000, 0x38388000, 0x3838C000, 0x38390000, 0x38394000, 0x38398000, 0x3839C000, 0x383A0000, 0x383A4000, 0x383A8000, 0x383AC000, 0x383B0000, 0x383B4000, 0x383B8000, 0x383BC000,
+ 0x383C0000, 0x383C4000, 0x383C8000, 0x383CC000, 0x383D0000, 0x383D4000, 0x383D8000, 0x383DC000, 0x383E0000, 0x383E4000, 0x383E8000, 0x383EC000, 0x383F0000, 0x383F4000, 0x383F8000, 0x383FC000,
+ 0x38400000, 0x38404000, 0x38408000, 0x3840C000, 0x38410000, 0x38414000, 0x38418000, 0x3841C000, 0x38420000, 0x38424000, 0x38428000, 0x3842C000, 0x38430000, 0x38434000, 0x38438000, 0x3843C000,
+ 0x38440000, 0x38444000, 0x38448000, 0x3844C000, 0x38450000, 0x38454000, 0x38458000, 0x3845C000, 0x38460000, 0x38464000, 0x38468000, 0x3846C000, 0x38470000, 0x38474000, 0x38478000, 0x3847C000,
+ 0x38480000, 0x38484000, 0x38488000, 0x3848C000, 0x38490000, 0x38494000, 0x38498000, 0x3849C000, 0x384A0000, 0x384A4000, 0x384A8000, 0x384AC000, 0x384B0000, 0x384B4000, 0x384B8000, 0x384BC000,
+ 0x384C0000, 0x384C4000, 0x384C8000, 0x384CC000, 0x384D0000, 0x384D4000, 0x384D8000, 0x384DC000, 0x384E0000, 0x384E4000, 0x384E8000, 0x384EC000, 0x384F0000, 0x384F4000, 0x384F8000, 0x384FC000,
+ 0x38500000, 0x38504000, 0x38508000, 0x3850C000, 0x38510000, 0x38514000, 0x38518000, 0x3851C000, 0x38520000, 0x38524000, 0x38528000, 0x3852C000, 0x38530000, 0x38534000, 0x38538000, 0x3853C000,
+ 0x38540000, 0x38544000, 0x38548000, 0x3854C000, 0x38550000, 0x38554000, 0x38558000, 0x3855C000, 0x38560000, 0x38564000, 0x38568000, 0x3856C000, 0x38570000, 0x38574000, 0x38578000, 0x3857C000,
+ 0x38580000, 0x38584000, 0x38588000, 0x3858C000, 0x38590000, 0x38594000, 0x38598000, 0x3859C000, 0x385A0000, 0x385A4000, 0x385A8000, 0x385AC000, 0x385B0000, 0x385B4000, 0x385B8000, 0x385BC000,
+ 0x385C0000, 0x385C4000, 0x385C8000, 0x385CC000, 0x385D0000, 0x385D4000, 0x385D8000, 0x385DC000, 0x385E0000, 0x385E4000, 0x385E8000, 0x385EC000, 0x385F0000, 0x385F4000, 0x385F8000, 0x385FC000,
+ 0x38600000, 0x38604000, 0x38608000, 0x3860C000, 0x38610000, 0x38614000, 0x38618000, 0x3861C000, 0x38620000, 0x38624000, 0x38628000, 0x3862C000, 0x38630000, 0x38634000, 0x38638000, 0x3863C000,
+ 0x38640000, 0x38644000, 0x38648000, 0x3864C000, 0x38650000, 0x38654000, 0x38658000, 0x3865C000, 0x38660000, 0x38664000, 0x38668000, 0x3866C000, 0x38670000, 0x38674000, 0x38678000, 0x3867C000,
+ 0x38680000, 0x38684000, 0x38688000, 0x3868C000, 0x38690000, 0x38694000, 0x38698000, 0x3869C000, 0x386A0000, 0x386A4000, 0x386A8000, 0x386AC000, 0x386B0000, 0x386B4000, 0x386B8000, 0x386BC000,
+ 0x386C0000, 0x386C4000, 0x386C8000, 0x386CC000, 0x386D0000, 0x386D4000, 0x386D8000, 0x386DC000, 0x386E0000, 0x386E4000, 0x386E8000, 0x386EC000, 0x386F0000, 0x386F4000, 0x386F8000, 0x386FC000,
+ 0x38700000, 0x38704000, 0x38708000, 0x3870C000, 0x38710000, 0x38714000, 0x38718000, 0x3871C000, 0x38720000, 0x38724000, 0x38728000, 0x3872C000, 0x38730000, 0x38734000, 0x38738000, 0x3873C000,
+ 0x38740000, 0x38744000, 0x38748000, 0x3874C000, 0x38750000, 0x38754000, 0x38758000, 0x3875C000, 0x38760000, 0x38764000, 0x38768000, 0x3876C000, 0x38770000, 0x38774000, 0x38778000, 0x3877C000,
+ 0x38780000, 0x38784000, 0x38788000, 0x3878C000, 0x38790000, 0x38794000, 0x38798000, 0x3879C000, 0x387A0000, 0x387A4000, 0x387A8000, 0x387AC000, 0x387B0000, 0x387B4000, 0x387B8000, 0x387BC000,
+ 0x387C0000, 0x387C4000, 0x387C8000, 0x387CC000, 0x387D0000, 0x387D4000, 0x387D8000, 0x387DC000, 0x387E0000, 0x387E4000, 0x387E8000, 0x387EC000, 0x387F0000, 0x387F4000, 0x387F8000, 0x387FC000,
+ 0x38000000, 0x38002000, 0x38004000, 0x38006000, 0x38008000, 0x3800A000, 0x3800C000, 0x3800E000, 0x38010000, 0x38012000, 0x38014000, 0x38016000, 0x38018000, 0x3801A000, 0x3801C000, 0x3801E000,
+ 0x38020000, 0x38022000, 0x38024000, 0x38026000, 0x38028000, 0x3802A000, 0x3802C000, 0x3802E000, 0x38030000, 0x38032000, 0x38034000, 0x38036000, 0x38038000, 0x3803A000, 0x3803C000, 0x3803E000,
+ 0x38040000, 0x38042000, 0x38044000, 0x38046000, 0x38048000, 0x3804A000, 0x3804C000, 0x3804E000, 0x38050000, 0x38052000, 0x38054000, 0x38056000, 0x38058000, 0x3805A000, 0x3805C000, 0x3805E000,
+ 0x38060000, 0x38062000, 0x38064000, 0x38066000, 0x38068000, 0x3806A000, 0x3806C000, 0x3806E000, 0x38070000, 0x38072000, 0x38074000, 0x38076000, 0x38078000, 0x3807A000, 0x3807C000, 0x3807E000,
+ 0x38080000, 0x38082000, 0x38084000, 0x38086000, 0x38088000, 0x3808A000, 0x3808C000, 0x3808E000, 0x38090000, 0x38092000, 0x38094000, 0x38096000, 0x38098000, 0x3809A000, 0x3809C000, 0x3809E000,
+ 0x380A0000, 0x380A2000, 0x380A4000, 0x380A6000, 0x380A8000, 0x380AA000, 0x380AC000, 0x380AE000, 0x380B0000, 0x380B2000, 0x380B4000, 0x380B6000, 0x380B8000, 0x380BA000, 0x380BC000, 0x380BE000,
+ 0x380C0000, 0x380C2000, 0x380C4000, 0x380C6000, 0x380C8000, 0x380CA000, 0x380CC000, 0x380CE000, 0x380D0000, 0x380D2000, 0x380D4000, 0x380D6000, 0x380D8000, 0x380DA000, 0x380DC000, 0x380DE000,
+ 0x380E0000, 0x380E2000, 0x380E4000, 0x380E6000, 0x380E8000, 0x380EA000, 0x380EC000, 0x380EE000, 0x380F0000, 0x380F2000, 0x380F4000, 0x380F6000, 0x380F8000, 0x380FA000, 0x380FC000, 0x380FE000,
+ 0x38100000, 0x38102000, 0x38104000, 0x38106000, 0x38108000, 0x3810A000, 0x3810C000, 0x3810E000, 0x38110000, 0x38112000, 0x38114000, 0x38116000, 0x38118000, 0x3811A000, 0x3811C000, 0x3811E000,
+ 0x38120000, 0x38122000, 0x38124000, 0x38126000, 0x38128000, 0x3812A000, 0x3812C000, 0x3812E000, 0x38130000, 0x38132000, 0x38134000, 0x38136000, 0x38138000, 0x3813A000, 0x3813C000, 0x3813E000,
+ 0x38140000, 0x38142000, 0x38144000, 0x38146000, 0x38148000, 0x3814A000, 0x3814C000, 0x3814E000, 0x38150000, 0x38152000, 0x38154000, 0x38156000, 0x38158000, 0x3815A000, 0x3815C000, 0x3815E000,
+ 0x38160000, 0x38162000, 0x38164000, 0x38166000, 0x38168000, 0x3816A000, 0x3816C000, 0x3816E000, 0x38170000, 0x38172000, 0x38174000, 0x38176000, 0x38178000, 0x3817A000, 0x3817C000, 0x3817E000,
+ 0x38180000, 0x38182000, 0x38184000, 0x38186000, 0x38188000, 0x3818A000, 0x3818C000, 0x3818E000, 0x38190000, 0x38192000, 0x38194000, 0x38196000, 0x38198000, 0x3819A000, 0x3819C000, 0x3819E000,
+ 0x381A0000, 0x381A2000, 0x381A4000, 0x381A6000, 0x381A8000, 0x381AA000, 0x381AC000, 0x381AE000, 0x381B0000, 0x381B2000, 0x381B4000, 0x381B6000, 0x381B8000, 0x381BA000, 0x381BC000, 0x381BE000,
+ 0x381C0000, 0x381C2000, 0x381C4000, 0x381C6000, 0x381C8000, 0x381CA000, 0x381CC000, 0x381CE000, 0x381D0000, 0x381D2000, 0x381D4000, 0x381D6000, 0x381D8000, 0x381DA000, 0x381DC000, 0x381DE000,
+ 0x381E0000, 0x381E2000, 0x381E4000, 0x381E6000, 0x381E8000, 0x381EA000, 0x381EC000, 0x381EE000, 0x381F0000, 0x381F2000, 0x381F4000, 0x381F6000, 0x381F8000, 0x381FA000, 0x381FC000, 0x381FE000,
+ 0x38200000, 0x38202000, 0x38204000, 0x38206000, 0x38208000, 0x3820A000, 0x3820C000, 0x3820E000, 0x38210000, 0x38212000, 0x38214000, 0x38216000, 0x38218000, 0x3821A000, 0x3821C000, 0x3821E000,
+ 0x38220000, 0x38222000, 0x38224000, 0x38226000, 0x38228000, 0x3822A000, 0x3822C000, 0x3822E000, 0x38230000, 0x38232000, 0x38234000, 0x38236000, 0x38238000, 0x3823A000, 0x3823C000, 0x3823E000,
+ 0x38240000, 0x38242000, 0x38244000, 0x38246000, 0x38248000, 0x3824A000, 0x3824C000, 0x3824E000, 0x38250000, 0x38252000, 0x38254000, 0x38256000, 0x38258000, 0x3825A000, 0x3825C000, 0x3825E000,
+ 0x38260000, 0x38262000, 0x38264000, 0x38266000, 0x38268000, 0x3826A000, 0x3826C000, 0x3826E000, 0x38270000, 0x38272000, 0x38274000, 0x38276000, 0x38278000, 0x3827A000, 0x3827C000, 0x3827E000,
+ 0x38280000, 0x38282000, 0x38284000, 0x38286000, 0x38288000, 0x3828A000, 0x3828C000, 0x3828E000, 0x38290000, 0x38292000, 0x38294000, 0x38296000, 0x38298000, 0x3829A000, 0x3829C000, 0x3829E000,
+ 0x382A0000, 0x382A2000, 0x382A4000, 0x382A6000, 0x382A8000, 0x382AA000, 0x382AC000, 0x382AE000, 0x382B0000, 0x382B2000, 0x382B4000, 0x382B6000, 0x382B8000, 0x382BA000, 0x382BC000, 0x382BE000,
+ 0x382C0000, 0x382C2000, 0x382C4000, 0x382C6000, 0x382C8000, 0x382CA000, 0x382CC000, 0x382CE000, 0x382D0000, 0x382D2000, 0x382D4000, 0x382D6000, 0x382D8000, 0x382DA000, 0x382DC000, 0x382DE000,
+ 0x382E0000, 0x382E2000, 0x382E4000, 0x382E6000, 0x382E8000, 0x382EA000, 0x382EC000, 0x382EE000, 0x382F0000, 0x382F2000, 0x382F4000, 0x382F6000, 0x382F8000, 0x382FA000, 0x382FC000, 0x382FE000,
+ 0x38300000, 0x38302000, 0x38304000, 0x38306000, 0x38308000, 0x3830A000, 0x3830C000, 0x3830E000, 0x38310000, 0x38312000, 0x38314000, 0x38316000, 0x38318000, 0x3831A000, 0x3831C000, 0x3831E000,
+ 0x38320000, 0x38322000, 0x38324000, 0x38326000, 0x38328000, 0x3832A000, 0x3832C000, 0x3832E000, 0x38330000, 0x38332000, 0x38334000, 0x38336000, 0x38338000, 0x3833A000, 0x3833C000, 0x3833E000,
+ 0x38340000, 0x38342000, 0x38344000, 0x38346000, 0x38348000, 0x3834A000, 0x3834C000, 0x3834E000, 0x38350000, 0x38352000, 0x38354000, 0x38356000, 0x38358000, 0x3835A000, 0x3835C000, 0x3835E000,
+ 0x38360000, 0x38362000, 0x38364000, 0x38366000, 0x38368000, 0x3836A000, 0x3836C000, 0x3836E000, 0x38370000, 0x38372000, 0x38374000, 0x38376000, 0x38378000, 0x3837A000, 0x3837C000, 0x3837E000,
+ 0x38380000, 0x38382000, 0x38384000, 0x38386000, 0x38388000, 0x3838A000, 0x3838C000, 0x3838E000, 0x38390000, 0x38392000, 0x38394000, 0x38396000, 0x38398000, 0x3839A000, 0x3839C000, 0x3839E000,
+ 0x383A0000, 0x383A2000, 0x383A4000, 0x383A6000, 0x383A8000, 0x383AA000, 0x383AC000, 0x383AE000, 0x383B0000, 0x383B2000, 0x383B4000, 0x383B6000, 0x383B8000, 0x383BA000, 0x383BC000, 0x383BE000,
+ 0x383C0000, 0x383C2000, 0x383C4000, 0x383C6000, 0x383C8000, 0x383CA000, 0x383CC000, 0x383CE000, 0x383D0000, 0x383D2000, 0x383D4000, 0x383D6000, 0x383D8000, 0x383DA000, 0x383DC000, 0x383DE000,
+ 0x383E0000, 0x383E2000, 0x383E4000, 0x383E6000, 0x383E8000, 0x383EA000, 0x383EC000, 0x383EE000, 0x383F0000, 0x383F2000, 0x383F4000, 0x383F6000, 0x383F8000, 0x383FA000, 0x383FC000, 0x383FE000,
+ 0x38400000, 0x38402000, 0x38404000, 0x38406000, 0x38408000, 0x3840A000, 0x3840C000, 0x3840E000, 0x38410000, 0x38412000, 0x38414000, 0x38416000, 0x38418000, 0x3841A000, 0x3841C000, 0x3841E000,
+ 0x38420000, 0x38422000, 0x38424000, 0x38426000, 0x38428000, 0x3842A000, 0x3842C000, 0x3842E000, 0x38430000, 0x38432000, 0x38434000, 0x38436000, 0x38438000, 0x3843A000, 0x3843C000, 0x3843E000,
+ 0x38440000, 0x38442000, 0x38444000, 0x38446000, 0x38448000, 0x3844A000, 0x3844C000, 0x3844E000, 0x38450000, 0x38452000, 0x38454000, 0x38456000, 0x38458000, 0x3845A000, 0x3845C000, 0x3845E000,
+ 0x38460000, 0x38462000, 0x38464000, 0x38466000, 0x38468000, 0x3846A000, 0x3846C000, 0x3846E000, 0x38470000, 0x38472000, 0x38474000, 0x38476000, 0x38478000, 0x3847A000, 0x3847C000, 0x3847E000,
+ 0x38480000, 0x38482000, 0x38484000, 0x38486000, 0x38488000, 0x3848A000, 0x3848C000, 0x3848E000, 0x38490000, 0x38492000, 0x38494000, 0x38496000, 0x38498000, 0x3849A000, 0x3849C000, 0x3849E000,
+ 0x384A0000, 0x384A2000, 0x384A4000, 0x384A6000, 0x384A8000, 0x384AA000, 0x384AC000, 0x384AE000, 0x384B0000, 0x384B2000, 0x384B4000, 0x384B6000, 0x384B8000, 0x384BA000, 0x384BC000, 0x384BE000,
+ 0x384C0000, 0x384C2000, 0x384C4000, 0x384C6000, 0x384C8000, 0x384CA000, 0x384CC000, 0x384CE000, 0x384D0000, 0x384D2000, 0x384D4000, 0x384D6000, 0x384D8000, 0x384DA000, 0x384DC000, 0x384DE000,
+ 0x384E0000, 0x384E2000, 0x384E4000, 0x384E6000, 0x384E8000, 0x384EA000, 0x384EC000, 0x384EE000, 0x384F0000, 0x384F2000, 0x384F4000, 0x384F6000, 0x384F8000, 0x384FA000, 0x384FC000, 0x384FE000,
+ 0x38500000, 0x38502000, 0x38504000, 0x38506000, 0x38508000, 0x3850A000, 0x3850C000, 0x3850E000, 0x38510000, 0x38512000, 0x38514000, 0x38516000, 0x38518000, 0x3851A000, 0x3851C000, 0x3851E000,
+ 0x38520000, 0x38522000, 0x38524000, 0x38526000, 0x38528000, 0x3852A000, 0x3852C000, 0x3852E000, 0x38530000, 0x38532000, 0x38534000, 0x38536000, 0x38538000, 0x3853A000, 0x3853C000, 0x3853E000,
+ 0x38540000, 0x38542000, 0x38544000, 0x38546000, 0x38548000, 0x3854A000, 0x3854C000, 0x3854E000, 0x38550000, 0x38552000, 0x38554000, 0x38556000, 0x38558000, 0x3855A000, 0x3855C000, 0x3855E000,
+ 0x38560000, 0x38562000, 0x38564000, 0x38566000, 0x38568000, 0x3856A000, 0x3856C000, 0x3856E000, 0x38570000, 0x38572000, 0x38574000, 0x38576000, 0x38578000, 0x3857A000, 0x3857C000, 0x3857E000,
+ 0x38580000, 0x38582000, 0x38584000, 0x38586000, 0x38588000, 0x3858A000, 0x3858C000, 0x3858E000, 0x38590000, 0x38592000, 0x38594000, 0x38596000, 0x38598000, 0x3859A000, 0x3859C000, 0x3859E000,
+ 0x385A0000, 0x385A2000, 0x385A4000, 0x385A6000, 0x385A8000, 0x385AA000, 0x385AC000, 0x385AE000, 0x385B0000, 0x385B2000, 0x385B4000, 0x385B6000, 0x385B8000, 0x385BA000, 0x385BC000, 0x385BE000,
+ 0x385C0000, 0x385C2000, 0x385C4000, 0x385C6000, 0x385C8000, 0x385CA000, 0x385CC000, 0x385CE000, 0x385D0000, 0x385D2000, 0x385D4000, 0x385D6000, 0x385D8000, 0x385DA000, 0x385DC000, 0x385DE000,
+ 0x385E0000, 0x385E2000, 0x385E4000, 0x385E6000, 0x385E8000, 0x385EA000, 0x385EC000, 0x385EE000, 0x385F0000, 0x385F2000, 0x385F4000, 0x385F6000, 0x385F8000, 0x385FA000, 0x385FC000, 0x385FE000,
+ 0x38600000, 0x38602000, 0x38604000, 0x38606000, 0x38608000, 0x3860A000, 0x3860C000, 0x3860E000, 0x38610000, 0x38612000, 0x38614000, 0x38616000, 0x38618000, 0x3861A000, 0x3861C000, 0x3861E000,
+ 0x38620000, 0x38622000, 0x38624000, 0x38626000, 0x38628000, 0x3862A000, 0x3862C000, 0x3862E000, 0x38630000, 0x38632000, 0x38634000, 0x38636000, 0x38638000, 0x3863A000, 0x3863C000, 0x3863E000,
+ 0x38640000, 0x38642000, 0x38644000, 0x38646000, 0x38648000, 0x3864A000, 0x3864C000, 0x3864E000, 0x38650000, 0x38652000, 0x38654000, 0x38656000, 0x38658000, 0x3865A000, 0x3865C000, 0x3865E000,
+ 0x38660000, 0x38662000, 0x38664000, 0x38666000, 0x38668000, 0x3866A000, 0x3866C000, 0x3866E000, 0x38670000, 0x38672000, 0x38674000, 0x38676000, 0x38678000, 0x3867A000, 0x3867C000, 0x3867E000,
+ 0x38680000, 0x38682000, 0x38684000, 0x38686000, 0x38688000, 0x3868A000, 0x3868C000, 0x3868E000, 0x38690000, 0x38692000, 0x38694000, 0x38696000, 0x38698000, 0x3869A000, 0x3869C000, 0x3869E000,
+ 0x386A0000, 0x386A2000, 0x386A4000, 0x386A6000, 0x386A8000, 0x386AA000, 0x386AC000, 0x386AE000, 0x386B0000, 0x386B2000, 0x386B4000, 0x386B6000, 0x386B8000, 0x386BA000, 0x386BC000, 0x386BE000,
+ 0x386C0000, 0x386C2000, 0x386C4000, 0x386C6000, 0x386C8000, 0x386CA000, 0x386CC000, 0x386CE000, 0x386D0000, 0x386D2000, 0x386D4000, 0x386D6000, 0x386D8000, 0x386DA000, 0x386DC000, 0x386DE000,
+ 0x386E0000, 0x386E2000, 0x386E4000, 0x386E6000, 0x386E8000, 0x386EA000, 0x386EC000, 0x386EE000, 0x386F0000, 0x386F2000, 0x386F4000, 0x386F6000, 0x386F8000, 0x386FA000, 0x386FC000, 0x386FE000,
+ 0x38700000, 0x38702000, 0x38704000, 0x38706000, 0x38708000, 0x3870A000, 0x3870C000, 0x3870E000, 0x38710000, 0x38712000, 0x38714000, 0x38716000, 0x38718000, 0x3871A000, 0x3871C000, 0x3871E000,
+ 0x38720000, 0x38722000, 0x38724000, 0x38726000, 0x38728000, 0x3872A000, 0x3872C000, 0x3872E000, 0x38730000, 0x38732000, 0x38734000, 0x38736000, 0x38738000, 0x3873A000, 0x3873C000, 0x3873E000,
+ 0x38740000, 0x38742000, 0x38744000, 0x38746000, 0x38748000, 0x3874A000, 0x3874C000, 0x3874E000, 0x38750000, 0x38752000, 0x38754000, 0x38756000, 0x38758000, 0x3875A000, 0x3875C000, 0x3875E000,
+ 0x38760000, 0x38762000, 0x38764000, 0x38766000, 0x38768000, 0x3876A000, 0x3876C000, 0x3876E000, 0x38770000, 0x38772000, 0x38774000, 0x38776000, 0x38778000, 0x3877A000, 0x3877C000, 0x3877E000,
+ 0x38780000, 0x38782000, 0x38784000, 0x38786000, 0x38788000, 0x3878A000, 0x3878C000, 0x3878E000, 0x38790000, 0x38792000, 0x38794000, 0x38796000, 0x38798000, 0x3879A000, 0x3879C000, 0x3879E000,
+ 0x387A0000, 0x387A2000, 0x387A4000, 0x387A6000, 0x387A8000, 0x387AA000, 0x387AC000, 0x387AE000, 0x387B0000, 0x387B2000, 0x387B4000, 0x387B6000, 0x387B8000, 0x387BA000, 0x387BC000, 0x387BE000,
+ 0x387C0000, 0x387C2000, 0x387C4000, 0x387C6000, 0x387C8000, 0x387CA000, 0x387CC000, 0x387CE000, 0x387D0000, 0x387D2000, 0x387D4000, 0x387D6000, 0x387D8000, 0x387DA000, 0x387DC000, 0x387DE000,
+ 0x387E0000, 0x387E2000, 0x387E4000, 0x387E6000, 0x387E8000, 0x387EA000, 0x387EC000, 0x387EE000, 0x387F0000, 0x387F2000, 0x387F4000, 0x387F6000, 0x387F8000, 0x387FA000, 0x387FC000, 0x387FE000 };
+ static const bits<float>::type exponent_table[64] = {
+ 0x00000000, 0x00800000, 0x01000000, 0x01800000, 0x02000000, 0x02800000, 0x03000000, 0x03800000, 0x04000000, 0x04800000, 0x05000000, 0x05800000, 0x06000000, 0x06800000, 0x07000000, 0x07800000,
+ 0x08000000, 0x08800000, 0x09000000, 0x09800000, 0x0A000000, 0x0A800000, 0x0B000000, 0x0B800000, 0x0C000000, 0x0C800000, 0x0D000000, 0x0D800000, 0x0E000000, 0x0E800000, 0x0F000000, 0x47800000,
+ 0x80000000, 0x80800000, 0x81000000, 0x81800000, 0x82000000, 0x82800000, 0x83000000, 0x83800000, 0x84000000, 0x84800000, 0x85000000, 0x85800000, 0x86000000, 0x86800000, 0x87000000, 0x87800000,
+ 0x88000000, 0x88800000, 0x89000000, 0x89800000, 0x8A000000, 0x8A800000, 0x8B000000, 0x8B800000, 0x8C000000, 0x8C800000, 0x8D000000, 0x8D800000, 0x8E000000, 0x8E800000, 0x8F000000, 0xC7800000 };
+ static const unsigned short offset_table[64] = {
+ 0, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+ 0, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024 };
+ bits<float>::type fbits = mantissa_table[offset_table[value>>10]+(value&0x3FF)] + exponent_table[value>>10];
+ #endif
+ float out;
+ std::memcpy(&out, &fbits, sizeof(float));
+ return out;
+ #endif
+ }
+
+ /// Convert half-precision to IEEE double-precision.
+ /// \param value half-precision value to convert
+ /// \return double-precision value
+ inline double half2float_impl(unsigned int value, double, true_type)
+ {
+ #if HALF_ENABLE_F16C_INTRINSICS
+ return _mm_cvtsd_f64(_mm_cvtps_pd(_mm_cvtph_ps(_mm_cvtsi32_si128(value))));
+ #else
+ uint32 hi = static_cast<uint32>(value&0x8000) << 16;
+ unsigned int abs = value & 0x7FFF;
+ if(abs)
+ {
+ hi |= 0x3F000000 << static_cast<unsigned>(abs>=0x7C00);
+ for(; abs<0x400; abs<<=1,hi-=0x100000) ;
+ hi += static_cast<uint32>(abs) << 10;
+ }
+ bits<double>::type dbits = static_cast<bits<double>::type>(hi) << 32;
+ double out;
+ std::memcpy(&out, &dbits, sizeof(double));
+ return out;
+ #endif
+ }
+
+ /// Convert half-precision to non-IEEE floating-point.
+ /// \tparam T type to convert to (builtin integer type)
+ /// \param value half-precision value to convert
+ /// \return floating-point value
+ template<typename T> T half2float_impl(unsigned int value, T, ...)
+ {
+ T out;
+ unsigned int abs = value & 0x7FFF;
+ if(abs > 0x7C00)
+ out = (std::numeric_limits<T>::has_signaling_NaN && !(abs&0x200)) ? std::numeric_limits<T>::signaling_NaN() :
+ std::numeric_limits<T>::has_quiet_NaN ? std::numeric_limits<T>::quiet_NaN() : T();
+ else if(abs == 0x7C00)
+ out = std::numeric_limits<T>::has_infinity ? std::numeric_limits<T>::infinity() : std::numeric_limits<T>::max();
+ else if(abs > 0x3FF)
+ out = std::ldexp(static_cast<T>((abs&0x3FF)|0x400), (abs>>10)-25);
+ else
+ out = std::ldexp(static_cast<T>(abs), -24);
+ return (value&0x8000) ? -out : out;
+ }
+
+ /// Convert half-precision to floating-point.
+ /// \tparam T type to convert to (builtin integer type)
+ /// \param value half-precision value to convert
+ /// \return floating-point value
+ template<typename T> T half2float(unsigned int value)
+ {
+ return half2float_impl(value, T(), bool_type<std::numeric_limits<T>::is_iec559&&sizeof(typename bits<T>::type)==sizeof(T)>());
+ }
+
+ /// Convert half-precision floating-point to integer.
+ /// \tparam R rounding mode to use
+ /// \tparam E `true` for round to even, `false` for round away from zero
+ /// \tparam I `true` to raise INEXACT exception (if inexact), `false` to never raise it
+ /// \tparam T type to convert to (buitlin integer type with at least 16 bits precision, excluding any implicit sign bits)
+ /// \param value half-precision value to convert
+ /// \return rounded integer value
+ /// \exception FE_INVALID if value is not representable in type \a T
+ /// \exception FE_INEXACT if value had to be rounded and \a I is `true`
+ template<std::float_round_style R,bool E,bool I,typename T> T half2int(unsigned int value)
+ {
+ unsigned int abs = value & 0x7FFF;
+ if(abs >= 0x7C00)
+ {
+ raise(FE_INVALID);
+ return (value&0x8000) ? std::numeric_limits<T>::min() : std::numeric_limits<T>::max();
+ }
+ if(abs < 0x3800)
+ {
+ raise(FE_INEXACT, I);
+ return (R==std::round_toward_infinity) ? T(~(value>>15)&(abs!=0)) :
+ (R==std::round_toward_neg_infinity) ? -T(value>0x8000) :
+ T();
+ }
+ int exp = 25 - (abs>>10);
+ unsigned int m = (value&0x3FF) | 0x400;
+ int32 i = static_cast<int32>((exp<=0) ? (m<<-exp) : ((m+(
+ (R==std::round_to_nearest) ? ((1<<(exp-1))-(~(m>>exp)&E)) :
+ (R==std::round_toward_infinity) ? (((1<<exp)-1)&((value>>15)-1)) :
+ (R==std::round_toward_neg_infinity) ? (((1<<exp)-1)&-(value>>15)) : 0))>>exp));
+ if((!std::numeric_limits<T>::is_signed && (value&0x8000)) || (std::numeric_limits<T>::digits<16 &&
+ ((value&0x8000) ? (-i<std::numeric_limits<T>::min()) : (i>std::numeric_limits<T>::max()))))
+ raise(FE_INVALID);
+ else if(I && exp > 0 && (m&((1<<exp)-1)))
+ raise(FE_INEXACT);
+ return static_cast<T>((value&0x8000) ? -i : i);
+ }
+
+ /// \}
+ /// \name Mathematics
+ /// \{
+
+ /// upper part of 64-bit multiplication.
+ /// \tparam R rounding mode to use
+ /// \param x first factor
+ /// \param y second factor
+ /// \return upper 32 bit of \a x * \a y
+ template<std::float_round_style R> uint32 mulhi(uint32 x, uint32 y)
+ {
+ uint32 xy = (x>>16) * (y&0xFFFF), yx = (x&0xFFFF) * (y>>16), c = (xy&0xFFFF) + (yx&0xFFFF) + (((x&0xFFFF)*(y&0xFFFF))>>16);
+ return (x>>16)*(y>>16) + (xy>>16) + (yx>>16) + (c>>16) +
+ ((R==std::round_to_nearest) ? ((c>>15)&1) : (R==std::round_toward_infinity) ? ((c&0xFFFF)!=0) : 0);
+ }
+
+ /// 64-bit multiplication.
+ /// \param x first factor
+ /// \param y second factor
+ /// \return upper 32 bit of \a x * \a y rounded to nearest
+ inline uint32 multiply64(uint32 x, uint32 y)
+ {
+ #if HALF_ENABLE_CPP11_LONG_LONG
+ return static_cast<uint32>((static_cast<unsigned long long>(x)*static_cast<unsigned long long>(y)+0x80000000)>>32);
+ #else
+ return mulhi<std::round_to_nearest>(x, y);
+ #endif
+ }
+
+ /// 64-bit division.
+ /// \param x upper 32 bit of dividend
+ /// \param y divisor
+ /// \param s variable to store sticky bit for rounding
+ /// \return (\a x << 32) / \a y
+ inline uint32 divide64(uint32 x, uint32 y, int &s)
+ {
+ #if HALF_ENABLE_CPP11_LONG_LONG
+ unsigned long long xx = static_cast<unsigned long long>(x) << 32;
+ return s = (xx%y!=0), static_cast<uint32>(xx/y);
+ #else
+ y >>= 1;
+ uint32 rem = x, div = 0;
+ for(unsigned int i=0; i<32; ++i)
+ {
+ div <<= 1;
+ if(rem >= y)
+ {
+ rem -= y;
+ div |= 1;
+ }
+ rem <<= 1;
+ }
+ return s = rem > 1, div;
+ #endif
+ }
+
+ /// Half precision positive modulus.
+ /// \tparam Q `true` to compute full quotient, `false` else
+ /// \tparam R `true` to compute signed remainder, `false` for positive remainder
+ /// \param x first operand as positive finite half-precision value
+ /// \param y second operand as positive finite half-precision value
+ /// \param quo adress to store quotient at, `nullptr` if \a Q `false`
+ /// \return modulus of \a x / \a y
+ template<bool Q,bool R> unsigned int mod(unsigned int x, unsigned int y, int *quo = NULL)
+ {
+ unsigned int q = 0;
+ if(x > y)
+ {
+ int absx = x, absy = y, expx = 0, expy = 0;
+ for(; absx<0x400; absx<<=1,--expx) ;
+ for(; absy<0x400; absy<<=1,--expy) ;
+ expx += absx >> 10;
+ expy += absy >> 10;
+ int mx = (absx&0x3FF) | 0x400, my = (absy&0x3FF) | 0x400;
+ for(int d=expx-expy; d; --d)
+ {
+ if(!Q && mx == my)
+ return 0;
+ if(mx >= my)
+ {
+ mx -= my;
+ q += Q;
+ }
+ mx <<= 1;
+ q <<= static_cast<int>(Q);
+ }
+ if(!Q && mx == my)
+ return 0;
+ if(mx >= my)
+ {
+ mx -= my;
+ ++q;
+ }
+ if(Q)
+ {
+ q &= (1<<(std::numeric_limits<int>::digits-1)) - 1;
+ if(!mx)
+ return *quo = q, 0;
+ }
+ for(; mx<0x400; mx<<=1,--expy) ;
+ x = (expy>0) ? ((expy<<10)|(mx&0x3FF)) : (mx>>(1-expy));
+ }
+ if(R)
+ {
+ unsigned int a, b;
+ if(y < 0x800)
+ {
+ a = (x<0x400) ? (x<<1) : (x+0x400);
+ b = y;
+ }
+ else
+ {
+ a = x;
+ b = y - 0x400;
+ }
+ if(a > b || (a == b && (q&1)))
+ {
+ int exp = (y>>10) + (y<=0x3FF), d = exp - (x>>10) - (x<=0x3FF);
+ int m = (((y&0x3FF)|((y>0x3FF)<<10))<<1) - (((x&0x3FF)|((x>0x3FF)<<10))<<(1-d));
+ for(; m<0x800 && exp>1; m<<=1,--exp) ;
+ x = 0x8000 + ((exp-1)<<10) + (m>>1);
+ q += Q;
+ }
+ }
+ if(Q)
+ *quo = q;
+ return x;
+ }
+
+ /// Fixed point square root.
+ /// \tparam F number of fractional bits
+ /// \param r radicand in Q1.F fixed point format
+ /// \param exp exponent
+ /// \return square root as Q1.F/2
+ template<unsigned int F> uint32 sqrt(uint32 &r, int &exp)
+ {
+ int i = exp & 1;
+ r <<= i;
+ exp = (exp-i) / 2;
+ uint32 m = 0;
+ for(uint32 bit=static_cast<uint32>(1)<<F; bit; bit>>=2)
+ {
+ if(r < m+bit)
+ m >>= 1;
+ else
+ {
+ r -= m + bit;
+ m = (m>>1) + bit;
+ }
+ }
+ return m;
+ }
+
+ /// Fixed point binary exponential.
+ /// This uses the BKM algorithm in E-mode.
+ /// \param m exponent in [0,1) as Q0.31
+ /// \param n number of iterations (at most 32)
+ /// \return 2 ^ \a m as Q1.31
+ inline uint32 exp2(uint32 m, unsigned int n = 32)
+ {
+ static const uint32 logs[] = {
+ 0x80000000, 0x4AE00D1D, 0x2934F098, 0x15C01A3A, 0x0B31FB7D, 0x05AEB4DD, 0x02DCF2D1, 0x016FE50B,
+ 0x00B84E23, 0x005C3E10, 0x002E24CA, 0x001713D6, 0x000B8A47, 0x0005C53B, 0x0002E2A3, 0x00017153,
+ 0x0000B8AA, 0x00005C55, 0x00002E2B, 0x00001715, 0x00000B8B, 0x000005C5, 0x000002E3, 0x00000171,
+ 0x000000B9, 0x0000005C, 0x0000002E, 0x00000017, 0x0000000C, 0x00000006, 0x00000003, 0x00000001 };
+ if(!m)
+ return 0x80000000;
+ uint32 mx = 0x80000000, my = 0;
+ for(unsigned int i=1; i<n; ++i)
+ {
+ uint32 mz = my + logs[i];
+ if(mz <= m)
+ {
+ my = mz;
+ mx += mx >> i;
+ }
+ }
+ return mx;
+ }
+
+ /// Fixed point binary logarithm.
+ /// This uses the BKM algorithm in L-mode.
+ /// \param m mantissa in [1,2) as Q1.30
+ /// \param n number of iterations (at most 32)
+ /// \return log2(\a m) as Q0.31
+ inline uint32 log2(uint32 m, unsigned int n = 32)
+ {
+ static const uint32 logs[] = {
+ 0x80000000, 0x4AE00D1D, 0x2934F098, 0x15C01A3A, 0x0B31FB7D, 0x05AEB4DD, 0x02DCF2D1, 0x016FE50B,
+ 0x00B84E23, 0x005C3E10, 0x002E24CA, 0x001713D6, 0x000B8A47, 0x0005C53B, 0x0002E2A3, 0x00017153,
+ 0x0000B8AA, 0x00005C55, 0x00002E2B, 0x00001715, 0x00000B8B, 0x000005C5, 0x000002E3, 0x00000171,
+ 0x000000B9, 0x0000005C, 0x0000002E, 0x00000017, 0x0000000C, 0x00000006, 0x00000003, 0x00000001 };
+ if(m == 0x40000000)
+ return 0;
+ uint32 mx = 0x40000000, my = 0;
+ for(unsigned int i=1; i<n; ++i)
+ {
+ uint32 mz = mx + (mx>>i);
+ if(mz <= m)
+ {
+ mx = mz;
+ my += logs[i];
+ }
+ }
+ return my;
+ }
+
+ /// Fixed point sine and cosine.
+ /// This uses the CORDIC algorithm in rotation mode.
+ /// \param mz angle in [-pi/2,pi/2] as Q1.30
+ /// \param n number of iterations (at most 31)
+ /// \return sine and cosine of \a mz as Q1.30
+ inline std::pair<uint32,uint32> sincos(uint32 mz, unsigned int n = 31)
+ {
+ static const uint32 angles[] = {
+ 0x3243F6A9, 0x1DAC6705, 0x0FADBAFD, 0x07F56EA7, 0x03FEAB77, 0x01FFD55C, 0x00FFFAAB, 0x007FFF55,
+ 0x003FFFEB, 0x001FFFFD, 0x00100000, 0x00080000, 0x00040000, 0x00020000, 0x00010000, 0x00008000,
+ 0x00004000, 0x00002000, 0x00001000, 0x00000800, 0x00000400, 0x00000200, 0x00000100, 0x00000080,
+ 0x00000040, 0x00000020, 0x00000010, 0x00000008, 0x00000004, 0x00000002, 0x00000001 };
+ uint32 mx = 0x26DD3B6A, my = 0;
+ for(unsigned int i=0; i<n; ++i)
+ {
+ uint32 sign = sign_mask(mz);
+ uint32 tx = mx - (arithmetic_shift(my, i)^sign) + sign;
+ uint32 ty = my + (arithmetic_shift(mx, i)^sign) - sign;
+ mx = tx; my = ty; mz -= (angles[i]^sign) - sign;
+ }
+ return std::make_pair(my, mx);
+ }
+
+ /// Fixed point arc tangent.
+ /// This uses the CORDIC algorithm in vectoring mode.
+ /// \param my y coordinate as Q0.30
+ /// \param mx x coordinate as Q0.30
+ /// \param n number of iterations (at most 31)
+ /// \return arc tangent of \a my / \a mx as Q1.30
+ inline uint32 atan2(uint32 my, uint32 mx, unsigned int n = 31)
+ {
+ static const uint32 angles[] = {
+ 0x3243F6A9, 0x1DAC6705, 0x0FADBAFD, 0x07F56EA7, 0x03FEAB77, 0x01FFD55C, 0x00FFFAAB, 0x007FFF55,
+ 0x003FFFEB, 0x001FFFFD, 0x00100000, 0x00080000, 0x00040000, 0x00020000, 0x00010000, 0x00008000,
+ 0x00004000, 0x00002000, 0x00001000, 0x00000800, 0x00000400, 0x00000200, 0x00000100, 0x00000080,
+ 0x00000040, 0x00000020, 0x00000010, 0x00000008, 0x00000004, 0x00000002, 0x00000001 };
+ uint32 mz = 0;
+ for(unsigned int i=0; i<n; ++i)
+ {
+ uint32 sign = sign_mask(my);
+ uint32 tx = mx + (arithmetic_shift(my, i)^sign) - sign;
+ uint32 ty = my - (arithmetic_shift(mx, i)^sign) + sign;
+ mx = tx; my = ty; mz += (angles[i]^sign) - sign;
+ }
+ return mz;
+ }
+
+ /// Reduce argument for trigonometric functions.
+ /// \param abs half-precision floating-point value
+ /// \param k value to take quarter period
+ /// \return \a abs reduced to [-pi/4,pi/4] as Q0.30
+ inline uint32 angle_arg(unsigned int abs, int &k)
+ {
+ uint32 m = (abs&0x3FF) | ((abs>0x3FF)<<10);
+ int exp = (abs>>10) + (abs<=0x3FF) - 15;
+ if(abs < 0x3A48)
+ return k = 0, m << (exp+20);
+ #if HALF_ENABLE_CPP11_LONG_LONG
+ unsigned long long y = m * 0xA2F9836E4E442, mask = (1ULL<<(62-exp)) - 1, yi = (y+(mask>>1)) & ~mask, f = y - yi;
+ uint32 sign = -static_cast<uint32>(f>>63);
+ k = static_cast<int>(yi>>(62-exp));
+ return (multiply64(static_cast<uint32>((sign ? -f : f)>>(31-exp)), 0xC90FDAA2)^sign) - sign;
+ #else
+ uint32 yh = m*0xA2F98 + mulhi<std::round_toward_zero>(m, 0x36E4E442), yl = (m*0x36E4E442) & 0xFFFFFFFF;
+ uint32 mask = (static_cast<uint32>(1)<<(30-exp)) - 1, yi = (yh+(mask>>1)) & ~mask, sign = -static_cast<uint32>(yi>yh);
+ k = static_cast<int>(yi>>(30-exp));
+ uint32 fh = (yh^sign) + (yi^~sign) - ~sign, fl = (yl^sign) - sign;
+ return (multiply64((exp>-1) ? (((fh<<(1+exp))&0xFFFFFFFF)|((fl&0xFFFFFFFF)>>(31-exp))) : fh, 0xC90FDAA2)^sign) - sign;
+ #endif
+ }
+
+ /// Get arguments for atan2 function.
+ /// \param abs half-precision floating-point value
+ /// \return \a abs and sqrt(1 - \a abs^2) as Q0.30
+ inline std::pair<uint32,uint32> atan2_args(unsigned int abs)
+ {
+ int exp = -15;
+ for(; abs<0x400; abs<<=1,--exp) ;
+ exp += abs >> 10;
+ uint32 my = ((abs&0x3FF)|0x400) << 5, r = my * my;
+ int rexp = 2 * exp;
+ r = 0x40000000 - ((rexp>-31) ? ((r>>-rexp)|((r&((static_cast<uint32>(1)<<-rexp)-1))!=0)) : 1);
+ for(rexp=0; r<0x40000000; r<<=1,--rexp) ;
+ uint32 mx = sqrt<30>(r, rexp);
+ int d = exp - rexp;
+ if(d < 0)
+ return std::make_pair((d<-14) ? ((my>>(-d-14))+((my>>(-d-15))&1)) : (my<<(14+d)), (mx<<14)+(r<<13)/mx);
+ if(d > 0)
+ return std::make_pair(my<<14, (d>14) ? ((mx>>(d-14))+((mx>>(d-15))&1)) : ((d==14) ? mx : ((mx<<(14-d))+(r<<(13-d))/mx)));
+ return std::make_pair(my<<13, (mx<<13)+(r<<12)/mx);
+ }
+
+ /// Get exponentials for hyperbolic computation
+ /// \param abs half-precision floating-point value
+ /// \param exp variable to take unbiased exponent of larger result
+ /// \param n number of BKM iterations (at most 32)
+ /// \return exp(abs) and exp(-\a abs) as Q1.31 with same exponent
+ inline std::pair<uint32,uint32> hyperbolic_args(unsigned int abs, int &exp, unsigned int n = 32)
+ {
+ uint32 mx = detail::multiply64(static_cast<uint32>((abs&0x3FF)+((abs>0x3FF)<<10))<<21, 0xB8AA3B29), my;
+ int e = (abs>>10) + (abs<=0x3FF);
+ if(e < 14)
+ {
+ exp = 0;
+ mx >>= 14 - e;
+ }
+ else
+ {
+ exp = mx >> (45-e);
+ mx = (mx<<(e-14)) & 0x7FFFFFFF;
+ }
+ mx = exp2(mx, n);
+ int d = exp << 1, s;
+ if(mx > 0x80000000)
+ {
+ my = divide64(0x80000000, mx, s);
+ my |= s;
+ ++d;
+ }
+ else
+ my = mx;
+ return std::make_pair(mx, (d<31) ? ((my>>d)|((my&((static_cast<uint32>(1)<<d)-1))!=0)) : 1);
+ }
+
+ /// Postprocessing for binary exponential.
+ /// \tparam R rounding mode to use
+ /// \param m fractional part of as Q0.31
+ /// \param exp absolute value of unbiased exponent
+ /// \param esign sign of actual exponent
+ /// \param sign sign bit of result
+ /// \param n number of BKM iterations (at most 32)
+ /// \return value converted to half-precision
+ /// \exception FE_OVERFLOW on overflows
+ /// \exception FE_UNDERFLOW on underflows
+ /// \exception FE_INEXACT if value had to be rounded or \a I is `true`
+ template<std::float_round_style R> unsigned int exp2_post(uint32 m, int exp, bool esign, unsigned int sign = 0, unsigned int n = 32)
+ {
+ if(esign)
+ {
+ exp = -exp - (m!=0);
+ if(exp < -25)
+ return underflow<R>(sign);
+ else if(exp == -25)
+ return rounded<R,false>(sign, 1, m!=0);
+ }
+ else if(exp > 15)
+ return overflow<R>(sign);
+ if(!m)
+ return sign | (((exp+=15)>0) ? (exp<<10) : check_underflow(0x200>>-exp));
+ m = exp2(m, n);
+ int s = 0;
+ if(esign)
+ m = divide64(0x80000000, m, s);
+ return fixed2half<R,31,false,false,true>(m, exp+14, sign, s);
+ }
+
+ /// Postprocessing for binary logarithm.
+ /// \tparam R rounding mode to use
+ /// \tparam L logarithm for base transformation as Q1.31
+ /// \param m fractional part of logarithm as Q0.31
+ /// \param ilog signed integer part of logarithm
+ /// \param exp biased exponent of result
+ /// \param sign sign bit of result
+ /// \return value base-transformed and converted to half-precision
+ /// \exception FE_OVERFLOW on overflows
+ /// \exception FE_UNDERFLOW on underflows
+ /// \exception FE_INEXACT if no other exception occurred
+ template<std::float_round_style R,uint32 L> unsigned int log2_post(uint32 m, int ilog, int exp, unsigned int sign = 0)
+ {
+ uint32 msign = sign_mask(ilog);
+ m = (((static_cast<uint32>(ilog)<<27)+(m>>4))^msign) - msign;
+ if(!m)
+ return 0;
+ for(; m<0x80000000; m<<=1,--exp) ;
+ int i = m >= L, s;
+ exp += i;
+ m >>= 1 + i;
+ sign ^= msign & 0x8000;
+ if(exp < -11)
+ return underflow<R>(sign);
+ m = divide64(m, L, s);
+ return fixed2half<R,30,false,false,true>(m, exp, sign, 1);
+ }
+
+ /// Hypotenuse square root and postprocessing.
+ /// \tparam R rounding mode to use
+ /// \param r mantissa as Q2.30
+ /// \param exp biased exponent
+ /// \return square root converted to half-precision
+ /// \exception FE_OVERFLOW on overflows
+ /// \exception FE_UNDERFLOW on underflows
+ /// \exception FE_INEXACT if value had to be rounded
+ template<std::float_round_style R> unsigned int hypot_post(uint32 r, int exp)
+ {
+ int i = r >> 31;
+ if((exp+=i) > 46)
+ return overflow<R>();
+ if(exp < -34)
+ return underflow<R>();
+ r = (r>>i) | (r&i);
+ uint32 m = sqrt<30>(r, exp+=15);
+ return fixed2half<R,15,false,false,false>(m, exp-1, 0, r!=0);
+ }
+
+ /// Division and postprocessing for tangents.
+ /// \tparam R rounding mode to use
+ /// \param my dividend as Q1.31
+ /// \param mx divisor as Q1.31
+ /// \param exp biased exponent of result
+ /// \param sign sign bit of result
+ /// \return quotient converted to half-precision
+ /// \exception FE_OVERFLOW on overflows
+ /// \exception FE_UNDERFLOW on underflows
+ /// \exception FE_INEXACT if no other exception occurred
+ template<std::float_round_style R> unsigned int tangent_post(uint32 my, uint32 mx, int exp, unsigned int sign = 0)
+ {
+ int i = my >= mx, s;
+ exp += i;
+ if(exp > 29)
+ return overflow<R>(sign);
+ if(exp < -11)
+ return underflow<R>(sign);
+ uint32 m = divide64(my>>(i+1), mx, s);
+ return fixed2half<R,30,false,false,true>(m, exp, sign, s);
+ }
+
+ /// Area function and postprocessing.
+ /// This computes the value directly in Q2.30 using the representation `asinh|acosh(x) = log(x+sqrt(x^2+|-1))`.
+ /// \tparam R rounding mode to use
+ /// \tparam S `true` for asinh, `false` for acosh
+ /// \param arg half-precision argument
+ /// \return asinh|acosh(\a arg) converted to half-precision
+ /// \exception FE_OVERFLOW on overflows
+ /// \exception FE_UNDERFLOW on underflows
+ /// \exception FE_INEXACT if no other exception occurred
+ template<std::float_round_style R,bool S> unsigned int area(unsigned int arg)
+ {
+ int abs = arg & 0x7FFF, expx = (abs>>10) + (abs<=0x3FF) - 15, expy = -15, ilog, i;
+ uint32 mx = static_cast<uint32>((abs&0x3FF)|((abs>0x3FF)<<10)) << 20, my, r;
+ for(; abs<0x400; abs<<=1,--expy) ;
+ expy += abs >> 10;
+ r = ((abs&0x3FF)|0x400) << 5;
+ r *= r;
+ i = r >> 31;
+ expy = 2*expy + i;
+ r >>= i;
+ if(S)
+ {
+ if(expy < 0)
+ {
+ r = 0x40000000 + ((expy>-30) ? ((r>>-expy)|((r&((static_cast<uint32>(1)<<-expy)-1))!=0)) : 1);
+ expy = 0;
+ }
+ else
+ {
+ r += 0x40000000 >> expy;
+ i = r >> 31;
+ r = (r>>i) | (r&i);
+ expy += i;
+ }
+ }
+ else
+ {
+ r -= 0x40000000 >> expy;
+ for(; r<0x40000000; r<<=1,--expy) ;
+ }
+ my = sqrt<30>(r, expy);
+ my = (my<<15) + (r<<14)/my;
+ if(S)
+ {
+ mx >>= expy - expx;
+ ilog = expy;
+ }
+ else
+ {
+ my >>= expx - expy;
+ ilog = expx;
+ }
+ my += mx;
+ i = my >> 31;
+ static const int G = S && (R==std::round_to_nearest);
+ return log2_post<R,0xB8AA3B2A>(log2(my>>i, 26+S+G)+(G<<3), ilog+i, 17, arg&(static_cast<unsigned>(S)<<15));
+ }
+
+ /// Class for 1.31 unsigned floating-point computation
+ struct f31
+ {
+ /// Constructor.
+ /// \param mant mantissa as 1.31
+ /// \param e exponent
+ HALF_CONSTEXPR f31(uint32 mant, int e) : m(mant), exp(e) {}
+
+ /// Constructor.
+ /// \param abs unsigned half-precision value
+ f31(unsigned int abs) : exp(-15)
+ {
+ for(; abs<0x400; abs<<=1,--exp) ;
+ m = static_cast<uint32>((abs&0x3FF)|0x400) << 21;
+ exp += (abs>>10);
+ }
+
+ /// Addition operator.
+ /// \param a first operand
+ /// \param b second operand
+ /// \return \a a + \a b
+ friend f31 operator+(f31 a, f31 b)
+ {
+ if(b.exp > a.exp)
+ std::swap(a, b);
+ int d = a.exp - b.exp;
+ uint32 m = a.m + ((d<32) ? (b.m>>d) : 0);
+ int i = (m&0xFFFFFFFF) < a.m;
+ return f31(((m+i)>>i)|0x80000000, a.exp+i);
+ }
+
+ /// Subtraction operator.
+ /// \param a first operand
+ /// \param b second operand
+ /// \return \a a - \a b
+ friend f31 operator-(f31 a, f31 b)
+ {
+ int d = a.exp - b.exp, exp = a.exp;
+ uint32 m = a.m - ((d<32) ? (b.m>>d) : 0);
+ if(!m)
+ return f31(0, -32);
+ for(; m<0x80000000; m<<=1,--exp) ;
+ return f31(m, exp);
+ }
+
+ /// Multiplication operator.
+ /// \param a first operand
+ /// \param b second operand
+ /// \return \a a * \a b
+ friend f31 operator*(f31 a, f31 b)
+ {
+ uint32 m = multiply64(a.m, b.m);
+ int i = m >> 31;
+ return f31(m<<(1-i), a.exp + b.exp + i);
+ }
+
+ /// Division operator.
+ /// \param a first operand
+ /// \param b second operand
+ /// \return \a a / \a b
+ friend f31 operator/(f31 a, f31 b)
+ {
+ int i = a.m >= b.m, s;
+ uint32 m = divide64((a.m+i)>>i, b.m, s);
+ return f31(m, a.exp - b.exp + i - 1);
+ }
+
+ uint32 m; ///< mantissa as 1.31.
+ int exp; ///< exponent.
+ };
+
+ /// Error function and postprocessing.
+ /// This computes the value directly in Q1.31 using the approximations given
+ /// [here](https://en.wikipedia.org/wiki/Error_function#Approximation_with_elementary_functions).
+ /// \tparam R rounding mode to use
+ /// \tparam C `true` for comlementary error function, `false` else
+ /// \param arg half-precision function argument
+ /// \return approximated value of error function in half-precision
+ /// \exception FE_OVERFLOW on overflows
+ /// \exception FE_UNDERFLOW on underflows
+ /// \exception FE_INEXACT if no other exception occurred
+ template<std::float_round_style R,bool C> unsigned int erf(unsigned int arg)
+ {
+ unsigned int abs = arg & 0x7FFF, sign = arg & 0x8000;
+ f31 x(abs), x2 = x * x * f31(0xB8AA3B29, 0), t = f31(0x80000000, 0) / (f31(0x80000000, 0)+f31(0xA7BA054A, -2)*x), t2 = t * t;
+ f31 e = ((f31(0x87DC2213, 0)*t2+f31(0xB5F0E2AE, 0))*t2+f31(0x82790637, -2)-(f31(0xBA00E2B8, 0)*t2+f31(0x91A98E62, -2))*t) * t /
+ ((x2.exp<0) ? f31(exp2((x2.exp>-32) ? (x2.m>>-x2.exp) : 0, 30), 0) : f31(exp2((x2.m<<x2.exp)&0x7FFFFFFF, 22), x2.m>>(31-x2.exp)));
+ return (!C || sign) ? fixed2half<R,31,false,true,true>(0x80000000-(e.m>>(C-e.exp)), 14+C, sign&(C-1U)) :
+ (e.exp<-25) ? underflow<R>() : fixed2half<R,30,false,false,true>(e.m>>1, e.exp+14, 0, e.m&1);
+ }
+
+ /// Gamma function and postprocessing.
+ /// This approximates the value of either the gamma function or its logarithm directly in Q1.31.
+ /// \tparam R rounding mode to use
+ /// \tparam L `true` for lograithm of gamma function, `false` for gamma function
+ /// \param arg half-precision floating-point value
+ /// \return lgamma/tgamma(\a arg) in half-precision
+ /// \exception FE_OVERFLOW on overflows
+ /// \exception FE_UNDERFLOW on underflows
+ /// \exception FE_INEXACT if \a arg is not a positive integer
+ template<std::float_round_style R,bool L> unsigned int gamma(unsigned int arg)
+ {
+/* static const double p[] ={ 2.50662827563479526904, 225.525584619175212544, -268.295973841304927459, 80.9030806934622512966, -5.00757863970517583837, 0.0114684895434781459556 };
+ double t = arg + 4.65, s = p[0];
+ for(unsigned int i=0; i<5; ++i)
+ s += p[i+1] / (arg+i);
+ return std::log(s) + (arg-0.5)*std::log(t) - t;
+*/ static const f31 pi(0xC90FDAA2, 1), lbe(0xB8AA3B29, 0);
+ unsigned int abs = arg & 0x7FFF, sign = arg & 0x8000;
+ bool bsign = sign != 0;
+ f31 z(abs), x = sign ? (z+f31(0x80000000, 0)) : z, t = x + f31(0x94CCCCCD, 2), s =
+ f31(0xA06C9901, 1) + f31(0xBBE654E2, -7)/(x+f31(0x80000000, 2)) + f31(0xA1CE6098, 6)/(x+f31(0x80000000, 1))
+ + f31(0xE1868CB7, 7)/x - f31(0x8625E279, 8)/(x+f31(0x80000000, 0)) - f31(0xA03E158F, 2)/(x+f31(0xC0000000, 1));
+ int i = (s.exp>=2) + (s.exp>=4) + (s.exp>=8) + (s.exp>=16);
+ s = f31((static_cast<uint32>(s.exp)<<(31-i))+(log2(s.m>>1, 28)>>i), i) / lbe;
+ if(x.exp != -1 || x.m != 0x80000000)
+ {
+ i = (t.exp>=2) + (t.exp>=4) + (t.exp>=8);
+ f31 l = f31((static_cast<uint32>(t.exp)<<(31-i))+(log2(t.m>>1, 30)>>i), i) / lbe;
+ s = (x.exp<-1) ? (s-(f31(0x80000000, -1)-x)*l) : (s+(x-f31(0x80000000, -1))*l);
+ }
+ s = x.exp ? (s-t) : (t-s);
+ if(bsign)
+ {
+ if(z.exp >= 0)
+ {
+ sign &= (L|((z.m>>(31-z.exp))&1)) - 1;
+ for(z=f31((z.m<<(1+z.exp))&0xFFFFFFFF, -1); z.m<0x80000000; z.m<<=1,--z.exp) ;
+ }
+ if(z.exp == -1)
+ z = f31(0x80000000, 0) - z;
+ if(z.exp < -1)
+ {
+ z = z * pi;
+ z.m = sincos(z.m>>(1-z.exp), 30).first;
+ for(z.exp=1; z.m<0x80000000; z.m<<=1,--z.exp) ;
+ }
+ else
+ z = f31(0x80000000, 0);
+ }
+ if(L)
+ {
+ if(bsign)
+ {
+ f31 l(0x92868247, 0);
+ if(z.exp < 0)
+ {
+ uint32 m = log2((z.m+1)>>1, 27);
+ z = f31(-((static_cast<uint32>(z.exp)<<26)+(m>>5)), 5);
+ for(; z.m<0x80000000; z.m<<=1,--z.exp) ;
+ l = l + z / lbe;
+ }
+ sign = static_cast<unsigned>(x.exp&&(l.exp<s.exp||(l.exp==s.exp&&l.m<s.m))) << 15;
+ s = sign ? (s-l) : x.exp ? (l-s) : (l+s);
+ }
+ else
+ {
+ sign = static_cast<unsigned>(x.exp==0) << 15;
+ if(s.exp < -24)
+ return underflow<R>(sign);
+ if(s.exp > 15)
+ return overflow<R>(sign);
+ }
+ }
+ else
+ {
+ s = s * lbe;
+ uint32 m;
+ if(s.exp < 0)
+ {
+ m = s.m >> -s.exp;
+ s.exp = 0;
+ }
+ else
+ {
+ m = (s.m<<s.exp) & 0x7FFFFFFF;
+ s.exp = (s.m>>(31-s.exp));
+ }
+ s.m = exp2(m, 27);
+ if(!x.exp)
+ s = f31(0x80000000, 0) / s;
+ if(bsign)
+ {
+ if(z.exp < 0)
+ s = s * z;
+ s = pi / s;
+ if(s.exp < -24)
+ return underflow<R>(sign);
+ }
+ else if(z.exp > 0 && !(z.m&((1<<(31-z.exp))-1)))
+ return ((s.exp+14)<<10) + (s.m>>21);
+ if(s.exp > 15)
+ return overflow<R>(sign);
+ }
+ return fixed2half<R,31,false,false,true>(s.m, s.exp+14, sign);
+ }
+ /// \}
+
+ template<typename,typename,std::float_round_style> struct half_caster;
+ }
+
+ /// Half-precision floating-point type.
+ /// This class implements an IEEE-conformant half-precision floating-point type with the usual arithmetic
+ /// operators and conversions. It is implicitly convertible to single-precision floating-point, which makes artihmetic
+ /// expressions and functions with mixed-type operands to be of the most precise operand type.
+ ///
+ /// According to the C++98/03 definition, the half type is not a POD type. But according to C++11's less strict and
+ /// extended definitions it is both a standard layout type and a trivially copyable type (even if not a POD type), which
+ /// means it can be standard-conformantly copied using raw binary copies. But in this context some more words about the
+ /// actual size of the type. Although the half is representing an IEEE 16-bit type, it does not neccessarily have to be of
+ /// exactly 16-bits size. But on any reasonable implementation the actual binary representation of this type will most
+ /// probably not ivolve any additional "magic" or padding beyond the simple binary representation of the underlying 16-bit
+ /// IEEE number, even if not strictly guaranteed by the standard. But even then it only has an actual size of 16 bits if
+ /// your C++ implementation supports an unsigned integer type of exactly 16 bits width. But this should be the case on
+ /// nearly any reasonable platform.
+ ///
+ /// So if your C++ implementation is not totally exotic or imposes special alignment requirements, it is a reasonable
+ /// assumption that the data of a half is just comprised of the 2 bytes of the underlying IEEE representation.
+ class half
+ {
+ public:
+ /// \name Construction and assignment
+ /// \{
+
+ /// Default constructor.
+ /// This initializes the half to 0. Although this does not match the builtin types' default-initialization semantics
+ /// and may be less efficient than no initialization, it is needed to provide proper value-initialization semantics.
+ HALF_CONSTEXPR half() HALF_NOEXCEPT : data_() {}
+
+ /// Conversion constructor.
+ /// \param rhs float to convert
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ explicit half(float rhs) : data_(static_cast<detail::uint16>(detail::float2half<round_style>(rhs))) {}
+
+ /// Conversion to single-precision.
+ /// \return single precision value representing expression value
+ operator float() const { return detail::half2float<float>(data_); }
+
+ /// Assignment operator.
+ /// \param rhs single-precision value to copy from
+ /// \return reference to this half
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ half& operator=(float rhs) { data_ = static_cast<detail::uint16>(detail::float2half<round_style>(rhs)); return *this; }
+
+ /// \}
+ /// \name Arithmetic updates
+ /// \{
+
+ /// Arithmetic assignment.
+ /// \tparam T type of concrete half expression
+ /// \param rhs half expression to add
+ /// \return reference to this half
+ /// \exception FE_... according to operator+(half,half)
+ half& operator+=(half rhs) { return *this = *this + rhs; }
+
+ /// Arithmetic assignment.
+ /// \tparam T type of concrete half expression
+ /// \param rhs half expression to subtract
+ /// \return reference to this half
+ /// \exception FE_... according to operator-(half,half)
+ half& operator-=(half rhs) { return *this = *this - rhs; }
+
+ /// Arithmetic assignment.
+ /// \tparam T type of concrete half expression
+ /// \param rhs half expression to multiply with
+ /// \return reference to this half
+ /// \exception FE_... according to operator*(half,half)
+ half& operator*=(half rhs) { return *this = *this * rhs; }
+
+ /// Arithmetic assignment.
+ /// \tparam T type of concrete half expression
+ /// \param rhs half expression to divide by
+ /// \return reference to this half
+ /// \exception FE_... according to operator/(half,half)
+ half& operator/=(half rhs) { return *this = *this / rhs; }
+
+ /// Arithmetic assignment.
+ /// \param rhs single-precision value to add
+ /// \return reference to this half
+ /// \exception FE_... according to operator=()
+ half& operator+=(float rhs) { return *this = *this + rhs; }
+
+ /// Arithmetic assignment.
+ /// \param rhs single-precision value to subtract
+ /// \return reference to this half
+ /// \exception FE_... according to operator=()
+ half& operator-=(float rhs) { return *this = *this - rhs; }
+
+ /// Arithmetic assignment.
+ /// \param rhs single-precision value to multiply with
+ /// \return reference to this half
+ /// \exception FE_... according to operator=()
+ half& operator*=(float rhs) { return *this = *this * rhs; }
+
+ /// Arithmetic assignment.
+ /// \param rhs single-precision value to divide by
+ /// \return reference to this half
+ /// \exception FE_... according to operator=()
+ half& operator/=(float rhs) { return *this = *this / rhs; }
+
+ /// \}
+ /// \name Increment and decrement
+ /// \{
+
+ /// Prefix increment.
+ /// \return incremented half value
+ /// \exception FE_... according to operator+(half,half)
+ half& operator++() { return *this = *this + half(detail::binary, 0x3C00); }
+
+ /// Prefix decrement.
+ /// \return decremented half value
+ /// \exception FE_... according to operator-(half,half)
+ half& operator--() { return *this = *this + half(detail::binary, 0xBC00); }
+
+ /// Postfix increment.
+ /// \return non-incremented half value
+ /// \exception FE_... according to operator+(half,half)
+ half operator++(int) { half out(*this); ++*this; return out; }
+
+ /// Postfix decrement.
+ /// \return non-decremented half value
+ /// \exception FE_... according to operator-(half,half)
+ half operator--(int) { half out(*this); --*this; return out; }
+ /// \}
+
+ private:
+ /// Rounding mode to use
+ static const std::float_round_style round_style = (std::float_round_style)(HALF_ROUND_STYLE);
+
+ /// Constructor.
+ /// \param bits binary representation to set half to
+ HALF_CONSTEXPR half(detail::binary_t, unsigned int bits) HALF_NOEXCEPT : data_(static_cast<detail::uint16>(bits)) {}
+
+ /// Internal binary representation
+ detail::uint16 data_;
+
+ #ifndef HALF_DOXYGEN_ONLY
+ friend HALF_CONSTEXPR_NOERR bool operator==(half, half);
+ friend HALF_CONSTEXPR_NOERR bool operator!=(half, half);
+ friend HALF_CONSTEXPR_NOERR bool operator<(half, half);
+ friend HALF_CONSTEXPR_NOERR bool operator>(half, half);
+ friend HALF_CONSTEXPR_NOERR bool operator<=(half, half);
+ friend HALF_CONSTEXPR_NOERR bool operator>=(half, half);
+ friend HALF_CONSTEXPR half operator-(half);
+ friend half operator+(half, half);
+ friend half operator-(half, half);
+ friend half operator*(half, half);
+ friend half operator/(half, half);
+ template<typename charT,typename traits> friend std::basic_ostream<charT,traits>& operator<<(std::basic_ostream<charT,traits>&, half);
+ template<typename charT,typename traits> friend std::basic_istream<charT,traits>& operator>>(std::basic_istream<charT,traits>&, half&);
+ friend HALF_CONSTEXPR half fabs(half);
+ friend half fmod(half, half);
+ friend half remainder(half, half);
+ friend half remquo(half, half, int*);
+ friend half fma(half, half, half);
+ friend HALF_CONSTEXPR_NOERR half fmax(half, half);
+ friend HALF_CONSTEXPR_NOERR half fmin(half, half);
+ friend half fdim(half, half);
+ friend half nanh(const char*);
+ friend half exp(half);
+ friend half exp2(half);
+ friend half expm1(half);
+ friend half log(half);
+ friend half log10(half);
+ friend half log2(half);
+ friend half log1p(half);
+ friend half sqrt(half);
+ friend half rsqrt(half);
+ friend half cbrt(half);
+ friend half hypot(half, half);
+ friend half hypot(half, half, half);
+ friend half pow(half, half);
+ friend void sincos(half, half*, half*);
+ friend half sin(half);
+ friend half cos(half);
+ friend half tan(half);
+ friend half asin(half);
+ friend half acos(half);
+ friend half atan(half);
+ friend half atan2(half, half);
+ friend half sinh(half);
+ friend half cosh(half);
+ friend half tanh(half);
+ friend half asinh(half);
+ friend half acosh(half);
+ friend half atanh(half);
+ friend half erf(half);
+ friend half erfc(half);
+ friend half lgamma(half);
+ friend half tgamma(half);
+ friend half ceil(half);
+ friend half floor(half);
+ friend half trunc(half);
+ friend half round(half);
+ friend long lround(half);
+ friend half rint(half);
+ friend long lrint(half);
+ friend half nearbyint(half);
+ #ifdef HALF_ENABLE_CPP11_LONG_LONG
+ friend long long llround(half);
+ friend long long llrint(half);
+ #endif
+ friend half frexp(half, int*);
+ friend half scalbln(half, long);
+ friend half modf(half, half*);
+ friend int ilogb(half);
+ friend half logb(half);
+ friend half nextafter(half, half);
+ friend half nexttoward(half, long double);
+ friend HALF_CONSTEXPR half copysign(half, half);
+ friend HALF_CONSTEXPR int fpclassify(half);
+ friend HALF_CONSTEXPR bool isfinite(half);
+ friend HALF_CONSTEXPR bool isinf(half);
+ friend HALF_CONSTEXPR bool isnan(half);
+ friend HALF_CONSTEXPR bool isnormal(half);
+ friend HALF_CONSTEXPR bool signbit(half);
+ friend HALF_CONSTEXPR bool isgreater(half, half);
+ friend HALF_CONSTEXPR bool isgreaterequal(half, half);
+ friend HALF_CONSTEXPR bool isless(half, half);
+ friend HALF_CONSTEXPR bool islessequal(half, half);
+ friend HALF_CONSTEXPR bool islessgreater(half, half);
+ template<typename,typename,std::float_round_style> friend struct detail::half_caster;
+ friend class std::numeric_limits<half>;
+ #if HALF_ENABLE_CPP11_HASH
+ friend struct std::hash<half>;
+ #endif
+ #if HALF_ENABLE_CPP11_USER_LITERALS
+ friend half literal::operator "" _h(long double);
+ #endif
+ #endif
+ };
+
+#if HALF_ENABLE_CPP11_USER_LITERALS
+ namespace literal
+ {
+ /// Half literal.
+ /// While this returns a properly rounded half-precision value, half literals can unfortunately not be constant
+ /// expressions due to rather involved conversions. So don't expect this to be a literal literal without involving
+ /// conversion operations at runtime. It is a convenience feature, not a performance optimization.
+ /// \param value literal value
+ /// \return half with of given value (possibly rounded)
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half operator "" _h(long double value) { return half(detail::binary, detail::float2half<half::round_style>(value)); }
+ }
+#endif
+
+ namespace detail
+ {
+ /// Helper class for half casts.
+ /// This class template has to be specialized for all valid cast arguments to define an appropriate static
+ /// `cast` member function and a corresponding `type` member denoting its return type.
+ /// \tparam T destination type
+ /// \tparam U source type
+ /// \tparam R rounding mode to use
+ template<typename T,typename U,std::float_round_style R=(std::float_round_style)(HALF_ROUND_STYLE)> struct half_caster {};
+ template<typename U,std::float_round_style R> struct half_caster<half,U,R>
+ {
+ #if HALF_ENABLE_CPP11_STATIC_ASSERT && HALF_ENABLE_CPP11_TYPE_TRAITS
+ static_assert(std::is_arithmetic<U>::value, "half_cast from non-arithmetic type unsupported");
+ #endif
+
+ static half cast(U arg) { return cast_impl(arg, is_float<U>()); };
+
+ private:
+ static half cast_impl(U arg, true_type) { return half(binary, float2half<R>(arg)); }
+ static half cast_impl(U arg, false_type) { return half(binary, int2half<R>(arg)); }
+ };
+ template<typename T,std::float_round_style R> struct half_caster<T,half,R>
+ {
+ #if HALF_ENABLE_CPP11_STATIC_ASSERT && HALF_ENABLE_CPP11_TYPE_TRAITS
+ static_assert(std::is_arithmetic<T>::value, "half_cast to non-arithmetic type unsupported");
+ #endif
+
+ static T cast(half arg) { return cast_impl(arg, is_float<T>()); }
+
+ private:
+ static T cast_impl(half arg, true_type) { return half2float<T>(arg.data_); }
+ static T cast_impl(half arg, false_type) { return half2int<R,true,true,T>(arg.data_); }
+ };
+ template<std::float_round_style R> struct half_caster<half,half,R>
+ {
+ static half cast(half arg) { return arg; }
+ };
+ }
+}
+
+/// Extensions to the C++ standard library.
+namespace std
+{
+ /// Numeric limits for half-precision floats.
+ /// **See also:** Documentation for [std::numeric_limits](https://en.cppreference.com/w/cpp/types/numeric_limits)
+ template<> class numeric_limits<half_float::half>
+ {
+ public:
+ /// Is template specialization.
+ static HALF_CONSTEXPR_CONST bool is_specialized = true;
+
+ /// Supports signed values.
+ static HALF_CONSTEXPR_CONST bool is_signed = true;
+
+ /// Is not an integer type.
+ static HALF_CONSTEXPR_CONST bool is_integer = false;
+
+ /// Is not exact.
+ static HALF_CONSTEXPR_CONST bool is_exact = false;
+
+ /// Doesn't provide modulo arithmetic.
+ static HALF_CONSTEXPR_CONST bool is_modulo = false;
+
+ /// Has a finite set of values.
+ static HALF_CONSTEXPR_CONST bool is_bounded = true;
+
+ /// IEEE conformant.
+ static HALF_CONSTEXPR_CONST bool is_iec559 = true;
+
+ /// Supports infinity.
+ static HALF_CONSTEXPR_CONST bool has_infinity = true;
+
+ /// Supports quiet NaNs.
+ static HALF_CONSTEXPR_CONST bool has_quiet_NaN = true;
+
+ /// Supports signaling NaNs.
+ static HALF_CONSTEXPR_CONST bool has_signaling_NaN = true;
+
+ /// Supports subnormal values.
+ static HALF_CONSTEXPR_CONST float_denorm_style has_denorm = denorm_present;
+
+ /// Supports no denormalization detection.
+ static HALF_CONSTEXPR_CONST bool has_denorm_loss = false;
+
+ #if HALF_ERRHANDLING_THROWS
+ static HALF_CONSTEXPR_CONST bool traps = true;
+ #else
+ /// Traps only if [HALF_ERRHANDLING_THROW_...](\ref HALF_ERRHANDLING_THROW_INVALID) is acitvated.
+ static HALF_CONSTEXPR_CONST bool traps = false;
+ #endif
+
+ /// Does not support no pre-rounding underflow detection.
+ static HALF_CONSTEXPR_CONST bool tinyness_before = false;
+
+ /// Rounding mode.
+ static HALF_CONSTEXPR_CONST float_round_style round_style = half_float::half::round_style;
+
+ /// Significant digits.
+ static HALF_CONSTEXPR_CONST int digits = 11;
+
+ /// Significant decimal digits.
+ static HALF_CONSTEXPR_CONST int digits10 = 3;
+
+ /// Required decimal digits to represent all possible values.
+ static HALF_CONSTEXPR_CONST int max_digits10 = 5;
+
+ /// Number base.
+ static HALF_CONSTEXPR_CONST int radix = 2;
+
+ /// One more than smallest exponent.
+ static HALF_CONSTEXPR_CONST int min_exponent = -13;
+
+ /// Smallest normalized representable power of 10.
+ static HALF_CONSTEXPR_CONST int min_exponent10 = -4;
+
+ /// One more than largest exponent
+ static HALF_CONSTEXPR_CONST int max_exponent = 16;
+
+ /// Largest finitely representable power of 10.
+ static HALF_CONSTEXPR_CONST int max_exponent10 = 4;
+
+ /// Smallest positive normal value.
+ static HALF_CONSTEXPR half_float::half min() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x0400); }
+
+ /// Smallest finite value.
+ static HALF_CONSTEXPR half_float::half lowest() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0xFBFF); }
+
+ /// Largest finite value.
+ static HALF_CONSTEXPR half_float::half max() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7BFF); }
+
+ /// Difference between 1 and next representable value.
+ static HALF_CONSTEXPR half_float::half epsilon() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x1400); }
+
+ /// Maximum rounding error in ULP (units in the last place).
+ static HALF_CONSTEXPR half_float::half round_error() HALF_NOTHROW
+ { return half_float::half(half_float::detail::binary, (round_style==std::round_to_nearest) ? 0x3800 : 0x3C00); }
+
+ /// Positive infinity.
+ static HALF_CONSTEXPR half_float::half infinity() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7C00); }
+
+ /// Quiet NaN.
+ static HALF_CONSTEXPR half_float::half quiet_NaN() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7FFF); }
+
+ /// Signaling NaN.
+ static HALF_CONSTEXPR half_float::half signaling_NaN() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7DFF); }
+
+ /// Smallest positive subnormal value.
+ static HALF_CONSTEXPR half_float::half denorm_min() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x0001); }
+ };
+
+#if HALF_ENABLE_CPP11_HASH
+ /// Hash function for half-precision floats.
+ /// This is only defined if C++11 `std::hash` is supported and enabled.
+ ///
+ /// **See also:** Documentation for [std::hash](https://en.cppreference.com/w/cpp/utility/hash)
+ template<> struct hash<half_float::half>
+ {
+ /// Type of function argument.
+ typedef half_float::half argument_type;
+
+ /// Function return type.
+ typedef size_t result_type;
+
+ /// Compute hash function.
+ /// \param arg half to hash
+ /// \return hash value
+ result_type operator()(argument_type arg) const { return hash<half_float::detail::uint16>()(arg.data_&-static_cast<unsigned>(arg.data_!=0x8000)); }
+ };
+#endif
+}
+
+namespace half_float
+{
+ /// \anchor compop
+ /// \name Comparison operators
+ /// \{
+
+ /// Comparison for equality.
+ /// \param x first operand
+ /// \param y second operand
+ /// \retval true if operands equal
+ /// \retval false else
+ /// \exception FE_INVALID if \a x or \a y is NaN
+ inline HALF_CONSTEXPR_NOERR bool operator==(half x, half y)
+ {
+ return !detail::compsignal(x.data_, y.data_) && (x.data_==y.data_ || !((x.data_|y.data_)&0x7FFF));
+ }
+
+ /// Comparison for inequality.
+ /// \param x first operand
+ /// \param y second operand
+ /// \retval true if operands not equal
+ /// \retval false else
+ /// \exception FE_INVALID if \a x or \a y is NaN
+ inline HALF_CONSTEXPR_NOERR bool operator!=(half x, half y)
+ {
+ return detail::compsignal(x.data_, y.data_) || (x.data_!=y.data_ && ((x.data_|y.data_)&0x7FFF));
+ }
+
+ /// Comparison for less than.
+ /// \param x first operand
+ /// \param y second operand
+ /// \retval true if \a x less than \a y
+ /// \retval false else
+ /// \exception FE_INVALID if \a x or \a y is NaN
+ inline HALF_CONSTEXPR_NOERR bool operator<(half x, half y)
+ {
+ return !detail::compsignal(x.data_, y.data_) &&
+ ((x.data_^(0x8000|(0x8000-(x.data_>>15))))+(x.data_>>15)) < ((y.data_^(0x8000|(0x8000-(y.data_>>15))))+(y.data_>>15));
+ }
+
+ /// Comparison for greater than.
+ /// \param x first operand
+ /// \param y second operand
+ /// \retval true if \a x greater than \a y
+ /// \retval false else
+ /// \exception FE_INVALID if \a x or \a y is NaN
+ inline HALF_CONSTEXPR_NOERR bool operator>(half x, half y)
+ {
+ return !detail::compsignal(x.data_, y.data_) &&
+ ((x.data_^(0x8000|(0x8000-(x.data_>>15))))+(x.data_>>15)) > ((y.data_^(0x8000|(0x8000-(y.data_>>15))))+(y.data_>>15));
+ }
+
+ /// Comparison for less equal.
+ /// \param x first operand
+ /// \param y second operand
+ /// \retval true if \a x less equal \a y
+ /// \retval false else
+ /// \exception FE_INVALID if \a x or \a y is NaN
+ inline HALF_CONSTEXPR_NOERR bool operator<=(half x, half y)
+ {
+ return !detail::compsignal(x.data_, y.data_) &&
+ ((x.data_^(0x8000|(0x8000-(x.data_>>15))))+(x.data_>>15)) <= ((y.data_^(0x8000|(0x8000-(y.data_>>15))))+(y.data_>>15));
+ }
+
+ /// Comparison for greater equal.
+ /// \param x first operand
+ /// \param y second operand
+ /// \retval true if \a x greater equal \a y
+ /// \retval false else
+ /// \exception FE_INVALID if \a x or \a y is NaN
+ inline HALF_CONSTEXPR_NOERR bool operator>=(half x, half y)
+ {
+ return !detail::compsignal(x.data_, y.data_) &&
+ ((x.data_^(0x8000|(0x8000-(x.data_>>15))))+(x.data_>>15)) >= ((y.data_^(0x8000|(0x8000-(y.data_>>15))))+(y.data_>>15));
+ }
+
+ /// \}
+ /// \anchor arithmetics
+ /// \name Arithmetic operators
+ /// \{
+
+ /// Identity.
+ /// \param arg operand
+ /// \return unchanged operand
+ inline HALF_CONSTEXPR half operator+(half arg) { return arg; }
+
+ /// Negation.
+ /// \param arg operand
+ /// \return negated operand
+ inline HALF_CONSTEXPR half operator-(half arg) { return half(detail::binary, arg.data_^0x8000); }
+
+ /// Addition.
+ /// This operation is exact to rounding for all rounding modes.
+ /// \param x left operand
+ /// \param y right operand
+ /// \return sum of half expressions
+ /// \exception FE_INVALID if \a x and \a y are infinities with different signs or signaling NaNs
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half operator+(half x, half y)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ return half(detail::binary, detail::float2half<half::round_style>(detail::half2float<detail::internal_t>(x.data_)+detail::half2float<detail::internal_t>(y.data_)));
+ #else
+ int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF;
+ bool sub = ((x.data_^y.data_)&0x8000) != 0;
+ if(absx >= 0x7C00 || absy >= 0x7C00)
+ return half(detail::binary, (absx>0x7C00 || absy>0x7C00) ? detail::signal(x.data_, y.data_) : (absy!=0x7C00) ? x.data_ :
+ (sub && absx==0x7C00) ? detail::invalid() : y.data_);
+ if(!absx)
+ return absy ? y : half(detail::binary, (half::round_style==std::round_toward_neg_infinity) ? (x.data_|y.data_) : (x.data_&y.data_));
+ if(!absy)
+ return x;
+ unsigned int sign = ((sub && absy>absx) ? y.data_ : x.data_) & 0x8000;
+ if(absy > absx)
+ std::swap(absx, absy);
+ int exp = (absx>>10) + (absx<=0x3FF), d = exp - (absy>>10) - (absy<=0x3FF), mx = ((absx&0x3FF)|((absx>0x3FF)<<10)) << 3, my;
+ if(d < 13)
+ {
+ my = ((absy&0x3FF)|((absy>0x3FF)<<10)) << 3;
+ my = (my>>d) | ((my&((1<<d)-1))!=0);
+ }
+ else
+ my = 1;
+ if(sub)
+ {
+ if(!(mx-=my))
+ return half(detail::binary, static_cast<unsigned>(half::round_style==std::round_toward_neg_infinity)<<15);
+ for(; mx<0x2000 && exp>1; mx<<=1,--exp) ;
+ }
+ else
+ {
+ mx += my;
+ int i = mx >> 14;
+ if((exp+=i) > 30)
+ return half(detail::binary, detail::overflow<half::round_style>(sign));
+ mx = (mx>>i) | (mx&i);
+ }
+ return half(detail::binary, detail::rounded<half::round_style,false>(sign+((exp-1)<<10)+(mx>>3), (mx>>2)&1, (mx&0x3)!=0));
+ #endif
+ }
+
+ /// Subtraction.
+ /// This operation is exact to rounding for all rounding modes.
+ /// \param x left operand
+ /// \param y right operand
+ /// \return difference of half expressions
+ /// \exception FE_INVALID if \a x and \a y are infinities with equal signs or signaling NaNs
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half operator-(half x, half y)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ return half(detail::binary, detail::float2half<half::round_style>(detail::half2float<detail::internal_t>(x.data_)-detail::half2float<detail::internal_t>(y.data_)));
+ #else
+ return x + -y;
+ #endif
+ }
+
+ /// Multiplication.
+ /// This operation is exact to rounding for all rounding modes.
+ /// \param x left operand
+ /// \param y right operand
+ /// \return product of half expressions
+ /// \exception FE_INVALID if multiplying 0 with infinity or if \a x or \a y is signaling NaN
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half operator*(half x, half y)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ return half(detail::binary, detail::float2half<half::round_style>(detail::half2float<detail::internal_t>(x.data_)*detail::half2float<detail::internal_t>(y.data_)));
+ #else
+ int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF, exp = -16;
+ unsigned int sign = (x.data_^y.data_) & 0x8000;
+ if(absx >= 0x7C00 || absy >= 0x7C00)
+ return half(detail::binary, (absx>0x7C00 || absy>0x7C00) ? detail::signal(x.data_, y.data_) :
+ ((absx==0x7C00 && !absy)||(absy==0x7C00 && !absx)) ? detail::invalid() : (sign|0x7C00));
+ if(!absx || !absy)
+ return half(detail::binary, sign);
+ for(; absx<0x400; absx<<=1,--exp) ;
+ for(; absy<0x400; absy<<=1,--exp) ;
+ detail::uint32 m = static_cast<detail::uint32>((absx&0x3FF)|0x400) * static_cast<detail::uint32>((absy&0x3FF)|0x400);
+ int i = m >> 21, s = m & i;
+ exp += (absx>>10) + (absy>>10) + i;
+ if(exp > 29)
+ return half(detail::binary, detail::overflow<half::round_style>(sign));
+ else if(exp < -11)
+ return half(detail::binary, detail::underflow<half::round_style>(sign));
+ return half(detail::binary, detail::fixed2half<half::round_style,20,false,false,false>(m>>i, exp, sign, s));
+ #endif
+ }
+
+ /// Division.
+ /// This operation is exact to rounding for all rounding modes.
+ /// \param x left operand
+ /// \param y right operand
+ /// \return quotient of half expressions
+ /// \exception FE_INVALID if dividing 0s or infinities with each other or if \a x or \a y is signaling NaN
+ /// \exception FE_DIVBYZERO if dividing finite value by 0
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half operator/(half x, half y)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ return half(detail::binary, detail::float2half<half::round_style>(detail::half2float<detail::internal_t>(x.data_)/detail::half2float<detail::internal_t>(y.data_)));
+ #else
+ int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF, exp = 14;
+ unsigned int sign = (x.data_^y.data_) & 0x8000;
+ if(absx >= 0x7C00 || absy >= 0x7C00)
+ return half(detail::binary, (absx>0x7C00 || absy>0x7C00) ? detail::signal(x.data_, y.data_) :
+ (absx==absy) ? detail::invalid() : (sign|((absx==0x7C00) ? 0x7C00 : 0)));
+ if(!absx)
+ return half(detail::binary, absy ? sign : detail::invalid());
+ if(!absy)
+ return half(detail::binary, detail::pole(sign));
+ for(; absx<0x400; absx<<=1,--exp) ;
+ for(; absy<0x400; absy<<=1,++exp) ;
+ detail::uint32 mx = (absx&0x3FF) | 0x400, my = (absy&0x3FF) | 0x400;
+ int i = mx < my;
+ exp += (absx>>10) - (absy>>10) - i;
+ if(exp > 29)
+ return half(detail::binary, detail::overflow<half::round_style>(sign));
+ else if(exp < -11)
+ return half(detail::binary, detail::underflow<half::round_style>(sign));
+ mx <<= 12 + i;
+ my <<= 1;
+ return half(detail::binary, detail::fixed2half<half::round_style,11,false,false,false>(mx/my, exp, sign, mx%my!=0));
+ #endif
+ }
+
+ /// \}
+ /// \anchor streaming
+ /// \name Input and output
+ /// \{
+
+ /// Output operator.
+ /// This uses the built-in functionality for streaming out floating-point numbers.
+ /// \param out output stream to write into
+ /// \param arg half expression to write
+ /// \return reference to output stream
+ template<typename charT,typename traits> std::basic_ostream<charT,traits>& operator<<(std::basic_ostream<charT,traits> &out, half arg)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ return out << detail::half2float<detail::internal_t>(arg.data_);
+ #else
+ return out << detail::half2float<float>(arg.data_);
+ #endif
+ }
+
+ /// Input operator.
+ /// This uses the built-in functionality for streaming in floating-point numbers, specifically double precision floating
+ /// point numbers (unless overridden with [HALF_ARITHMETIC_TYPE](\ref HALF_ARITHMETIC_TYPE)). So the input string is first
+ /// rounded to double precision using the underlying platform's current floating-point rounding mode before being rounded
+ /// to half-precision using the library's half-precision rounding mode.
+ /// \param in input stream to read from
+ /// \param arg half to read into
+ /// \return reference to input stream
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ template<typename charT,typename traits> std::basic_istream<charT,traits>& operator>>(std::basic_istream<charT,traits> &in, half &arg)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ detail::internal_t f;
+ #else
+ double f;
+ #endif
+ if(in >> f)
+ arg.data_ = detail::float2half<half::round_style>(f);
+ return in;
+ }
+
+ /// \}
+ /// \anchor basic
+ /// \name Basic mathematical operations
+ /// \{
+
+ /// Absolute value.
+ /// **See also:** Documentation for [std::fabs](https://en.cppreference.com/w/cpp/numeric/math/fabs).
+ /// \param arg operand
+ /// \return absolute value of \a arg
+ inline HALF_CONSTEXPR half fabs(half arg) { return half(detail::binary, arg.data_&0x7FFF); }
+
+ /// Absolute value.
+ /// **See also:** Documentation for [std::abs](https://en.cppreference.com/w/cpp/numeric/math/fabs).
+ /// \param arg operand
+ /// \return absolute value of \a arg
+ inline HALF_CONSTEXPR half abs(half arg) { return fabs(arg); }
+
+ /// Remainder of division.
+ /// **See also:** Documentation for [std::fmod](https://en.cppreference.com/w/cpp/numeric/math/fmod).
+ /// \param x first operand
+ /// \param y second operand
+ /// \return remainder of floating-point division.
+ /// \exception FE_INVALID if \a x is infinite or \a y is 0 or if \a x or \a y is signaling NaN
+ inline half fmod(half x, half y)
+ {
+ unsigned int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF, sign = x.data_ & 0x8000;
+ if(absx >= 0x7C00 || absy >= 0x7C00)
+ return half(detail::binary, (absx>0x7C00 || absy>0x7C00) ? detail::signal(x.data_, y.data_) :
+ (absx==0x7C00) ? detail::invalid() : x.data_);
+ if(!absy)
+ return half(detail::binary, detail::invalid());
+ if(!absx)
+ return x;
+ if(absx == absy)
+ return half(detail::binary, sign);
+ return half(detail::binary, sign|detail::mod<false,false>(absx, absy));
+ }
+
+ /// Remainder of division.
+ /// **See also:** Documentation for [std::remainder](https://en.cppreference.com/w/cpp/numeric/math/remainder).
+ /// \param x first operand
+ /// \param y second operand
+ /// \return remainder of floating-point division.
+ /// \exception FE_INVALID if \a x is infinite or \a y is 0 or if \a x or \a y is signaling NaN
+ inline half remainder(half x, half y)
+ {
+ unsigned int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF, sign = x.data_ & 0x8000;
+ if(absx >= 0x7C00 || absy >= 0x7C00)
+ return half(detail::binary, (absx>0x7C00 || absy>0x7C00) ? detail::signal(x.data_, y.data_) :
+ (absx==0x7C00) ? detail::invalid() : x.data_);
+ if(!absy)
+ return half(detail::binary, detail::invalid());
+ if(absx == absy)
+ return half(detail::binary, sign);
+ return half(detail::binary, sign^detail::mod<false,true>(absx, absy));
+ }
+
+ /// Remainder of division.
+ /// **See also:** Documentation for [std::remquo](https://en.cppreference.com/w/cpp/numeric/math/remquo).
+ /// \param x first operand
+ /// \param y second operand
+ /// \param quo address to store some bits of quotient at
+ /// \return remainder of floating-point division.
+ /// \exception FE_INVALID if \a x is infinite or \a y is 0 or if \a x or \a y is signaling NaN
+ inline half remquo(half x, half y, int *quo)
+ {
+ unsigned int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF, value = x.data_ & 0x8000;
+ if(absx >= 0x7C00 || absy >= 0x7C00)
+ return half(detail::binary, (absx>0x7C00 || absy>0x7C00) ? detail::signal(x.data_, y.data_) :
+ (absx==0x7C00) ? detail::invalid() : (*quo = 0, x.data_));
+ if(!absy)
+ return half(detail::binary, detail::invalid());
+ bool qsign = ((value^y.data_)&0x8000) != 0;
+ int q = 1;
+ if(absx != absy)
+ value ^= detail::mod<true, true>(absx, absy, &q);
+ return *quo = qsign ? -q : q, half(detail::binary, value);
+ }
+
+ /// Fused multiply add.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::fma](https://en.cppreference.com/w/cpp/numeric/math/fma).
+ /// \param x first operand
+ /// \param y second operand
+ /// \param z third operand
+ /// \return ( \a x * \a y ) + \a z rounded as one operation.
+ /// \exception FE_INVALID according to operator*() and operator+() unless any argument is a quiet NaN and no argument is a signaling NaN
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding the final addition
+ inline half fma(half x, half y, half z)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ detail::internal_t fx = detail::half2float<detail::internal_t>(x.data_), fy = detail::half2float<detail::internal_t>(y.data_), fz = detail::half2float<detail::internal_t>(z.data_);
+ #if HALF_ENABLE_CPP11_CMATH && FP_FAST_FMA
+ return half(detail::binary, detail::float2half<half::round_style>(std::fma(fx, fy, fz)));
+ #else
+ return half(detail::binary, detail::float2half<half::round_style>(fx*fy+fz));
+ #endif
+ #else
+ int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF, absz = z.data_ & 0x7FFF, exp = -15;
+ unsigned int sign = (x.data_^y.data_) & 0x8000;
+ bool sub = ((sign^z.data_)&0x8000) != 0;
+ if(absx >= 0x7C00 || absy >= 0x7C00 || absz >= 0x7C00)
+ return (absx>0x7C00 || absy>0x7C00 || absz>0x7C00) ? half(detail::binary, detail::signal(x.data_, y.data_, z.data_)) :
+ (absx==0x7C00) ? half(detail::binary, (!absy || (sub && absz==0x7C00)) ? detail::invalid() : (sign|0x7C00)) :
+ (absy==0x7C00) ? half(detail::binary, (!absx || (sub && absz==0x7C00)) ? detail::invalid() : (sign|0x7C00)) : z;
+ if(!absx || !absy)
+ return absz ? z : half(detail::binary, (half::round_style==std::round_toward_neg_infinity) ? (z.data_|sign) : (z.data_&sign));
+ for(; absx<0x400; absx<<=1,--exp) ;
+ for(; absy<0x400; absy<<=1,--exp) ;
+ detail::uint32 m = static_cast<detail::uint32>((absx&0x3FF)|0x400) * static_cast<detail::uint32>((absy&0x3FF)|0x400);
+ int i = m >> 21;
+ exp += (absx>>10) + (absy>>10) + i;
+ m <<= 3 - i;
+ if(absz)
+ {
+ int expz = 0;
+ for(; absz<0x400; absz<<=1,--expz) ;
+ expz += absz >> 10;
+ detail::uint32 mz = static_cast<detail::uint32>((absz&0x3FF)|0x400) << 13;
+ if(expz > exp || (expz == exp && mz > m))
+ {
+ std::swap(m, mz);
+ std::swap(exp, expz);
+ if(sub)
+ sign = z.data_ & 0x8000;
+ }
+ int d = exp - expz;
+ mz = (d<23) ? ((mz>>d)|((mz&((static_cast<detail::uint32>(1)<<d)-1))!=0)) : 1;
+ if(sub)
+ {
+ m = m - mz;
+ if(!m)
+ return half(detail::binary, static_cast<unsigned>(half::round_style==std::round_toward_neg_infinity)<<15);
+ for(; m<0x800000; m<<=1,--exp) ;
+ }
+ else
+ {
+ m += mz;
+ i = m >> 24;
+ m = (m>>i) | (m&i);
+ exp += i;
+ }
+ }
+ if(exp > 30)
+ return half(detail::binary, detail::overflow<half::round_style>(sign));
+ else if(exp < -10)
+ return half(detail::binary, detail::underflow<half::round_style>(sign));
+ return half(detail::binary, detail::fixed2half<half::round_style,23,false,false,false>(m, exp-1, sign));
+ #endif
+ }
+
+ /// Maximum of half expressions.
+ /// **See also:** Documentation for [std::fmax](https://en.cppreference.com/w/cpp/numeric/math/fmax).
+ /// \param x first operand
+ /// \param y second operand
+ /// \return maximum of operands, ignoring quiet NaNs
+ /// \exception FE_INVALID if \a x or \a y is signaling NaN
+ inline HALF_CONSTEXPR_NOERR half fmax(half x, half y)
+ {
+ return half(detail::binary, (!isnan(y) && (isnan(x) || (x.data_^(0x8000|(0x8000-(x.data_>>15)))) <
+ (y.data_^(0x8000|(0x8000-(y.data_>>15)))))) ? detail::select(y.data_, x.data_) : detail::select(x.data_, y.data_));
+ }
+
+ /// Minimum of half expressions.
+ /// **See also:** Documentation for [std::fmin](https://en.cppreference.com/w/cpp/numeric/math/fmin).
+ /// \param x first operand
+ /// \param y second operand
+ /// \return minimum of operands, ignoring quiet NaNs
+ /// \exception FE_INVALID if \a x or \a y is signaling NaN
+ inline HALF_CONSTEXPR_NOERR half fmin(half x, half y)
+ {
+ return half(detail::binary, (!isnan(y) && (isnan(x) || (x.data_^(0x8000|(0x8000-(x.data_>>15)))) >
+ (y.data_^(0x8000|(0x8000-(y.data_>>15)))))) ? detail::select(y.data_, x.data_) : detail::select(x.data_, y.data_));
+ }
+
+ /// Positive difference.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::fdim](https://en.cppreference.com/w/cpp/numeric/math/fdim).
+ /// \param x first operand
+ /// \param y second operand
+ /// \return \a x - \a y or 0 if difference negative
+ /// \exception FE_... according to operator-(half,half)
+ inline half fdim(half x, half y)
+ {
+ if(isnan(x) || isnan(y))
+ return half(detail::binary, detail::signal(x.data_, y.data_));
+ return (x.data_^(0x8000|(0x8000-(x.data_>>15)))) <= (y.data_^(0x8000|(0x8000-(y.data_>>15)))) ? half(detail::binary, 0) : (x-y);
+ }
+
+ /// Get NaN value.
+ /// **See also:** Documentation for [std::nan](https://en.cppreference.com/w/cpp/numeric/math/nan).
+ /// \param arg string code
+ /// \return quiet NaN
+ inline half nanh(const char *arg)
+ {
+ unsigned int value = 0x7FFF;
+ while(*arg)
+ value ^= static_cast<unsigned>(*arg++) & 0xFF;
+ return half(detail::binary, value);
+ }
+
+ /// \}
+ /// \anchor exponential
+ /// \name Exponential functions
+ /// \{
+
+ /// Exponential function.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::exp](https://en.cppreference.com/w/cpp/numeric/math/exp).
+ /// \param arg function argument
+ /// \return e raised to \a arg
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half exp(half arg)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ return half(detail::binary, detail::float2half<half::round_style>(std::exp(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ int abs = arg.data_ & 0x7FFF, e = (abs>>10) + (abs<=0x3FF), exp;
+ if(!abs)
+ return half(detail::binary, 0x3C00);
+ if(abs >= 0x7C00)
+ return half(detail::binary, (abs==0x7C00) ? (0x7C00&((arg.data_>>15)-1U)) : detail::signal(arg.data_));
+ if(abs >= 0x4C80)
+ return half(detail::binary, (arg.data_&0x8000) ? detail::underflow<half::round_style>() : detail::overflow<half::round_style>());
+ detail::uint32 m = detail::multiply64(static_cast<detail::uint32>((abs&0x3FF)+((abs>0x3FF)<<10))<<21, 0xB8AA3B29);
+ if(e < 14)
+ {
+ exp = 0;
+ m >>= 14 - e;
+ }
+ else
+ {
+ exp = m >> (45-e);
+ m = (m<<(e-14)) & 0x7FFFFFFF;
+ }
+ return half(detail::binary, detail::exp2_post<half::round_style>(m, exp, (arg.data_&0x8000)!=0, 0, 26));
+ #endif
+ }
+
+ /// Binary exponential.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::exp2](https://en.cppreference.com/w/cpp/numeric/math/exp2).
+ /// \param arg function argument
+ /// \return 2 raised to \a arg
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half exp2(half arg)
+ {
+ #if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH
+ return half(detail::binary, detail::float2half<half::round_style>(std::exp2(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ int abs = arg.data_ & 0x7FFF, e = (abs>>10) + (abs<=0x3FF), exp = (abs&0x3FF) + ((abs>0x3FF)<<10);
+ if(!abs)
+ return half(detail::binary, 0x3C00);
+ if(abs >= 0x7C00)
+ return half(detail::binary, (abs==0x7C00) ? (0x7C00&((arg.data_>>15)-1U)) : detail::signal(arg.data_));
+ if(abs >= 0x4E40)
+ return half(detail::binary, (arg.data_&0x8000) ? detail::underflow<half::round_style>() : detail::overflow<half::round_style>());
+ return half(detail::binary, detail::exp2_post<half::round_style>(
+ (static_cast<detail::uint32>(exp)<<(6+e))&0x7FFFFFFF, exp>>(25-e), (arg.data_&0x8000)!=0, 0, 28));
+ #endif
+ }
+
+ /// Exponential minus one.
+ /// This function may be 1 ULP off the correctly rounded exact result in <0.05% of inputs for `std::round_to_nearest`
+ /// and in <1% of inputs for any other rounding mode.
+ ///
+ /// **See also:** Documentation for [std::expm1](https://en.cppreference.com/w/cpp/numeric/math/expm1).
+ /// \param arg function argument
+ /// \return e raised to \a arg and subtracted by 1
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half expm1(half arg)
+ {
+ #if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH
+ return half(detail::binary, detail::float2half<half::round_style>(std::expm1(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ unsigned int abs = arg.data_ & 0x7FFF, sign = arg.data_ & 0x8000, e = (abs>>10) + (abs<=0x3FF), exp;
+ if(!abs)
+ return arg;
+ if(abs >= 0x7C00)
+ return half(detail::binary, (abs==0x7C00) ? (0x7C00+(sign>>1)) : detail::signal(arg.data_));
+ if(abs >= 0x4A00)
+ return half(detail::binary, (arg.data_&0x8000) ? detail::rounded<half::round_style,true>(0xBBFF, 1, 1) : detail::overflow<half::round_style>());
+ detail::uint32 m = detail::multiply64(static_cast<detail::uint32>((abs&0x3FF)+((abs>0x3FF)<<10))<<21, 0xB8AA3B29);
+ if(e < 14)
+ {
+ exp = 0;
+ m >>= 14 - e;
+ }
+ else
+ {
+ exp = m >> (45-e);
+ m = (m<<(e-14)) & 0x7FFFFFFF;
+ }
+ m = detail::exp2(m);
+ if(sign)
+ {
+ int s = 0;
+ if(m > 0x80000000)
+ {
+ ++exp;
+ m = detail::divide64(0x80000000, m, s);
+ }
+ m = 0x80000000 - ((m>>exp)|((m&((static_cast<detail::uint32>(1)<<exp)-1))!=0)|s);
+ exp = 0;
+ }
+ else
+ m -= (exp<31) ? (0x80000000>>exp) : 1;
+ for(exp+=14; m<0x80000000 && exp; m<<=1,--exp) ;
+ if(exp > 29)
+ return half(detail::binary, detail::overflow<half::round_style>());
+ return half(detail::binary, detail::rounded<half::round_style,true>(sign+(exp<<10)+(m>>21), (m>>20)&1, (m&0xFFFFF)!=0));
+ #endif
+ }
+
+ /// Natural logarithm.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::log](https://en.cppreference.com/w/cpp/numeric/math/log).
+ /// \param arg function argument
+ /// \return logarithm of \a arg to base e
+ /// \exception FE_INVALID for signaling NaN or negative argument
+ /// \exception FE_DIVBYZERO for 0
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half log(half arg)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ return half(detail::binary, detail::float2half<half::round_style>(std::log(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ int abs = arg.data_ & 0x7FFF, exp = -15;
+ if(!abs)
+ return half(detail::binary, detail::pole(0x8000));
+ if(arg.data_ & 0x8000)
+ return half(detail::binary, (arg.data_<=0xFC00) ? detail::invalid() : detail::signal(arg.data_));
+ if(abs >= 0x7C00)
+ return (abs==0x7C00) ? arg : half(detail::binary, detail::signal(arg.data_));
+ for(; abs<0x400; abs<<=1,--exp) ;
+ exp += abs >> 10;
+ return half(detail::binary, detail::log2_post<half::round_style,0xB8AA3B2A>(
+ detail::log2(static_cast<detail::uint32>((abs&0x3FF)|0x400)<<20, 27)+8, exp, 17));
+ #endif
+ }
+
+ /// Common logarithm.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::log10](https://en.cppreference.com/w/cpp/numeric/math/log10).
+ /// \param arg function argument
+ /// \return logarithm of \a arg to base 10
+ /// \exception FE_INVALID for signaling NaN or negative argument
+ /// \exception FE_DIVBYZERO for 0
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half log10(half arg)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ return half(detail::binary, detail::float2half<half::round_style>(std::log10(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ int abs = arg.data_ & 0x7FFF, exp = -15;
+ if(!abs)
+ return half(detail::binary, detail::pole(0x8000));
+ if(arg.data_ & 0x8000)
+ return half(detail::binary, (arg.data_<=0xFC00) ? detail::invalid() : detail::signal(arg.data_));
+ if(abs >= 0x7C00)
+ return (abs==0x7C00) ? arg : half(detail::binary, detail::signal(arg.data_));
+ switch(abs)
+ {
+ case 0x4900: return half(detail::binary, 0x3C00);
+ case 0x5640: return half(detail::binary, 0x4000);
+ case 0x63D0: return half(detail::binary, 0x4200);
+ case 0x70E2: return half(detail::binary, 0x4400);
+ }
+ for(; abs<0x400; abs<<=1,--exp) ;
+ exp += abs >> 10;
+ return half(detail::binary, detail::log2_post<half::round_style,0xD49A784C>(
+ detail::log2(static_cast<detail::uint32>((abs&0x3FF)|0x400)<<20, 27)+8, exp, 16));
+ #endif
+ }
+
+ /// Binary logarithm.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::log2](https://en.cppreference.com/w/cpp/numeric/math/log2).
+ /// \param arg function argument
+ /// \return logarithm of \a arg to base 2
+ /// \exception FE_INVALID for signaling NaN or negative argument
+ /// \exception FE_DIVBYZERO for 0
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half log2(half arg)
+ {
+ #if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH
+ return half(detail::binary, detail::float2half<half::round_style>(std::log2(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ int abs = arg.data_ & 0x7FFF, exp = -15, s = 0;
+ if(!abs)
+ return half(detail::binary, detail::pole(0x8000));
+ if(arg.data_ & 0x8000)
+ return half(detail::binary, (arg.data_<=0xFC00) ? detail::invalid() : detail::signal(arg.data_));
+ if(abs >= 0x7C00)
+ return (abs==0x7C00) ? arg : half(detail::binary, detail::signal(arg.data_));
+ if(abs == 0x3C00)
+ return half(detail::binary, 0);
+ for(; abs<0x400; abs<<=1,--exp) ;
+ exp += (abs>>10);
+ if(!(abs&0x3FF))
+ {
+ unsigned int value = static_cast<unsigned>(exp<0) << 15, m = std::abs(exp) << 6;
+ for(exp=18; m<0x400; m<<=1,--exp) ;
+ return half(detail::binary, value+(exp<<10)+m);
+ }
+ detail::uint32 ilog = exp, sign = detail::sign_mask(ilog), m =
+ (((ilog<<27)+(detail::log2(static_cast<detail::uint32>((abs&0x3FF)|0x400)<<20, 28)>>4))^sign) - sign;
+ if(!m)
+ return half(detail::binary, 0);
+ for(exp=14; m<0x8000000 && exp; m<<=1,--exp) ;
+ for(; m>0xFFFFFFF; m>>=1,++exp)
+ s |= m & 1;
+ return half(detail::binary, detail::fixed2half<half::round_style,27,false,false,true>(m, exp, sign&0x8000, s));
+ #endif
+ }
+
+ /// Natural logarithm plus one.
+ /// This function may be 1 ULP off the correctly rounded exact result in <0.05% of inputs for `std::round_to_nearest`
+ /// and in ~1% of inputs for any other rounding mode.
+ ///
+ /// **See also:** Documentation for [std::log1p](https://en.cppreference.com/w/cpp/numeric/math/log1p).
+ /// \param arg function argument
+ /// \return logarithm of \a arg plus 1 to base e
+ /// \exception FE_INVALID for signaling NaN or argument <-1
+ /// \exception FE_DIVBYZERO for -1
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half log1p(half arg)
+ {
+ #if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH
+ return half(detail::binary, detail::float2half<half::round_style>(std::log1p(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ if(arg.data_ >= 0xBC00)
+ return half(detail::binary, (arg.data_==0xBC00) ? detail::pole(0x8000) : (arg.data_<=0xFC00) ? detail::invalid() : detail::signal(arg.data_));
+ int abs = arg.data_ & 0x7FFF, exp = -15;
+ if(!abs || abs >= 0x7C00)
+ return (abs>0x7C00) ? half(detail::binary, detail::signal(arg.data_)) : arg;
+ for(; abs<0x400; abs<<=1,--exp) ;
+ exp += abs >> 10;
+ detail::uint32 m = static_cast<detail::uint32>((abs&0x3FF)|0x400) << 20;
+ if(arg.data_ & 0x8000)
+ {
+ m = 0x40000000 - (m>>-exp);
+ for(exp=0; m<0x40000000; m<<=1,--exp) ;
+ }
+ else
+ {
+ if(exp < 0)
+ {
+ m = 0x40000000 + (m>>-exp);
+ exp = 0;
+ }
+ else
+ {
+ m += 0x40000000 >> exp;
+ int i = m >> 31;
+ m >>= i;
+ exp += i;
+ }
+ }
+ return half(detail::binary, detail::log2_post<half::round_style,0xB8AA3B2A>(detail::log2(m), exp, 17));
+ #endif
+ }
+
+ /// \}
+ /// \anchor power
+ /// \name Power functions
+ /// \{
+
+ /// Square root.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::sqrt](https://en.cppreference.com/w/cpp/numeric/math/sqrt).
+ /// \param arg function argument
+ /// \return square root of \a arg
+ /// \exception FE_INVALID for signaling NaN and negative arguments
+ /// \exception FE_INEXACT according to rounding
+ inline half sqrt(half arg)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ return half(detail::binary, detail::float2half<half::round_style>(std::sqrt(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ int abs = arg.data_ & 0x7FFF, exp = 15;
+ if(!abs || arg.data_ >= 0x7C00)
+ return half(detail::binary, (abs>0x7C00) ? detail::signal(arg.data_) : (arg.data_>0x8000) ? detail::invalid() : arg.data_);
+ for(; abs<0x400; abs<<=1,--exp) ;
+ detail::uint32 r = static_cast<detail::uint32>((abs&0x3FF)|0x400) << 10, m = detail::sqrt<20>(r, exp+=abs>>10);
+ return half(detail::binary, detail::rounded<half::round_style,false>((exp<<10)+(m&0x3FF), r>m, r!=0));
+ #endif
+ }
+
+ /// Inverse square root.
+ /// This function is exact to rounding for all rounding modes and thus generally more accurate than directly computing
+ /// 1 / sqrt(\a arg) in half-precision, in addition to also being faster.
+ /// \param arg function argument
+ /// \return reciprocal of square root of \a arg
+ /// \exception FE_INVALID for signaling NaN and negative arguments
+ /// \exception FE_INEXACT according to rounding
+ inline half rsqrt(half arg)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ return half(detail::binary, detail::float2half<half::round_style>(detail::internal_t(1)/std::sqrt(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ unsigned int abs = arg.data_ & 0x7FFF, bias = 0x4000;
+ if(!abs || arg.data_ >= 0x7C00)
+ return half(detail::binary, (abs>0x7C00) ? detail::signal(arg.data_) : (arg.data_>0x8000) ?
+ detail::invalid() : !abs ? detail::pole(arg.data_&0x8000) : 0);
+ for(; abs<0x400; abs<<=1,bias-=0x400) ;
+ unsigned int frac = (abs+=bias) & 0x7FF;
+ if(frac == 0x400)
+ return half(detail::binary, 0x7A00-(abs>>1));
+ if((half::round_style == std::round_to_nearest && (frac == 0x3FE || frac == 0x76C)) ||
+ (half::round_style != std::round_to_nearest && (frac == 0x15A || frac == 0x3FC || frac == 0x401 || frac == 0x402 || frac == 0x67B)))
+ return pow(arg, half(detail::binary, 0xB800));
+ detail::uint32 f = 0x17376 - abs, mx = (abs&0x3FF) | 0x400, my = ((f>>1)&0x3FF) | 0x400, mz = my * my;
+ int expy = (f>>11) - 31, expx = 32 - (abs>>10), i = mz >> 21;
+ for(mz=0x60000000-(((mz>>i)*mx)>>(expx-2*expy-i)); mz<0x40000000; mz<<=1,--expy) ;
+ i = (my*=mz>>10) >> 31;
+ expy += i;
+ my = (my>>(20+i)) + 1;
+ i = (mz=my*my) >> 21;
+ for(mz=0x60000000-(((mz>>i)*mx)>>(expx-2*expy-i)); mz<0x40000000; mz<<=1,--expy) ;
+ i = (my*=(mz>>10)+1) >> 31;
+ return half(detail::binary, detail::fixed2half<half::round_style,30,false,false,true>(my>>i, expy+i+14));
+ #endif
+ }
+
+ /// Cubic root.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::cbrt](https://en.cppreference.com/w/cpp/numeric/math/cbrt).
+ /// \param arg function argument
+ /// \return cubic root of \a arg
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_INEXACT according to rounding
+ inline half cbrt(half arg)
+ {
+ #if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH
+ return half(detail::binary, detail::float2half<half::round_style>(std::cbrt(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ int abs = arg.data_ & 0x7FFF, exp = -15;
+ if(!abs || abs == 0x3C00 || abs >= 0x7C00)
+ return (abs>0x7C00) ? half(detail::binary, detail::signal(arg.data_)) : arg;
+ for(; abs<0x400; abs<<=1, --exp);
+ detail::uint32 ilog = exp + (abs>>10), sign = detail::sign_mask(ilog), f, m =
+ (((ilog<<27)+(detail::log2(static_cast<detail::uint32>((abs&0x3FF)|0x400)<<20, 24)>>4))^sign) - sign;
+ for(exp=2; m<0x80000000; m<<=1,--exp) ;
+ m = detail::multiply64(m, 0xAAAAAAAB);
+ int i = m >> 31, s;
+ exp += i;
+ m <<= 1 - i;
+ if(exp < 0)
+ {
+ f = m >> -exp;
+ exp = 0;
+ }
+ else
+ {
+ f = (m<<exp) & 0x7FFFFFFF;
+ exp = m >> (31-exp);
+ }
+ m = detail::exp2(f, (half::round_style==std::round_to_nearest) ? 29 : 26);
+ if(sign)
+ {
+ if(m > 0x80000000)
+ {
+ m = detail::divide64(0x80000000, m, s);
+ ++exp;
+ }
+ exp = -exp;
+ }
+ return half(detail::binary, (half::round_style==std::round_to_nearest) ?
+ detail::fixed2half<half::round_style,31,false,false,false>(m, exp+14, arg.data_&0x8000) :
+ detail::fixed2half<half::round_style,23,false,false,false>((m+0x80)>>8, exp+14, arg.data_&0x8000));
+ #endif
+ }
+
+ /// Hypotenuse function.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::hypot](https://en.cppreference.com/w/cpp/numeric/math/hypot).
+ /// \param x first argument
+ /// \param y second argument
+ /// \return square root of sum of squares without internal over- or underflows
+ /// \exception FE_INVALID if \a x or \a y is signaling NaN
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding of the final square root
+ inline half hypot(half x, half y)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ detail::internal_t fx = detail::half2float<detail::internal_t>(x.data_), fy = detail::half2float<detail::internal_t>(y.data_);
+ #if HALF_ENABLE_CPP11_CMATH
+ return half(detail::binary, detail::float2half<half::round_style>(std::hypot(fx, fy)));
+ #else
+ return half(detail::binary, detail::float2half<half::round_style>(std::sqrt(fx*fx+fy*fy)));
+ #endif
+ #else
+ int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF, expx = 0, expy = 0;
+ if(absx >= 0x7C00 || absy >= 0x7C00)
+ return half(detail::binary, (absx==0x7C00) ? detail::select(0x7C00, y.data_) :
+ (absy==0x7C00) ? detail::select(0x7C00, x.data_) : detail::signal(x.data_, y.data_));
+ if(!absx)
+ return half(detail::binary, absy ? detail::check_underflow(absy) : 0);
+ if(!absy)
+ return half(detail::binary, detail::check_underflow(absx));
+ if(absy > absx)
+ std::swap(absx, absy);
+ for(; absx<0x400; absx<<=1,--expx) ;
+ for(; absy<0x400; absy<<=1,--expy) ;
+ detail::uint32 mx = (absx&0x3FF) | 0x400, my = (absy&0x3FF) | 0x400;
+ mx *= mx;
+ my *= my;
+ int ix = mx >> 21, iy = my >> 21;
+ expx = 2*(expx+(absx>>10)) - 15 + ix;
+ expy = 2*(expy+(absy>>10)) - 15 + iy;
+ mx <<= 10 - ix;
+ my <<= 10 - iy;
+ int d = expx - expy;
+ my = (d<30) ? ((my>>d)|((my&((static_cast<detail::uint32>(1)<<d)-1))!=0)) : 1;
+ return half(detail::binary, detail::hypot_post<half::round_style>(mx+my, expx));
+ #endif
+ }
+
+ /// Hypotenuse function.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::hypot](https://en.cppreference.com/w/cpp/numeric/math/hypot).
+ /// \param x first argument
+ /// \param y second argument
+ /// \param z third argument
+ /// \return square root of sum of squares without internal over- or underflows
+ /// \exception FE_INVALID if \a x, \a y or \a z is signaling NaN
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding of the final square root
+ inline half hypot(half x, half y, half z)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ detail::internal_t fx = detail::half2float<detail::internal_t>(x.data_), fy = detail::half2float<detail::internal_t>(y.data_), fz = detail::half2float<detail::internal_t>(z.data_);
+ return half(detail::binary, detail::float2half<half::round_style>(std::sqrt(fx*fx+fy*fy+fz*fz)));
+ #else
+ int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF, absz = z.data_ & 0x7FFF, expx = 0, expy = 0, expz = 0;
+ if(!absx)
+ return hypot(y, z);
+ if(!absy)
+ return hypot(x, z);
+ if(!absz)
+ return hypot(x, y);
+ if(absx >= 0x7C00 || absy >= 0x7C00 || absz >= 0x7C00)
+ return half(detail::binary, (absx==0x7C00) ? detail::select(0x7C00, detail::select(y.data_, z.data_)) :
+ (absy==0x7C00) ? detail::select(0x7C00, detail::select(x.data_, z.data_)) :
+ (absz==0x7C00) ? detail::select(0x7C00, detail::select(x.data_, y.data_)) :
+ detail::signal(x.data_, y.data_, z.data_));
+ if(absz > absy)
+ std::swap(absy, absz);
+ if(absy > absx)
+ std::swap(absx, absy);
+ if(absz > absy)
+ std::swap(absy, absz);
+ for(; absx<0x400; absx<<=1,--expx) ;
+ for(; absy<0x400; absy<<=1,--expy) ;
+ for(; absz<0x400; absz<<=1,--expz) ;
+ detail::uint32 mx = (absx&0x3FF) | 0x400, my = (absy&0x3FF) | 0x400, mz = (absz&0x3FF) | 0x400;
+ mx *= mx;
+ my *= my;
+ mz *= mz;
+ int ix = mx >> 21, iy = my >> 21, iz = mz >> 21;
+ expx = 2*(expx+(absx>>10)) - 15 + ix;
+ expy = 2*(expy+(absy>>10)) - 15 + iy;
+ expz = 2*(expz+(absz>>10)) - 15 + iz;
+ mx <<= 10 - ix;
+ my <<= 10 - iy;
+ mz <<= 10 - iz;
+ int d = expy - expz;
+ mz = (d<30) ? ((mz>>d)|((mz&((static_cast<detail::uint32>(1)<<d)-1))!=0)) : 1;
+ my += mz;
+ if(my & 0x80000000)
+ {
+ my = (my>>1) | (my&1);
+ if(++expy > expx)
+ {
+ std::swap(mx, my);
+ std::swap(expx, expy);
+ }
+ }
+ d = expx - expy;
+ my = (d<30) ? ((my>>d)|((my&((static_cast<detail::uint32>(1)<<d)-1))!=0)) : 1;
+ return half(detail::binary, detail::hypot_post<half::round_style>(mx+my, expx));
+ #endif
+ }
+
+ /// Power function.
+ /// This function may be 1 ULP off the correctly rounded exact result for any rounding mode in ~0.00025% of inputs.
+ ///
+ /// **See also:** Documentation for [std::pow](https://en.cppreference.com/w/cpp/numeric/math/pow).
+ /// \param x base
+ /// \param y exponent
+ /// \return \a x raised to \a y
+ /// \exception FE_INVALID if \a x or \a y is signaling NaN or if \a x is finite an negative and \a y is finite and not integral
+ /// \exception FE_DIVBYZERO if \a x is 0 and \a y is negative
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half pow(half x, half y)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ return half(detail::binary, detail::float2half<half::round_style>(std::pow(detail::half2float<detail::internal_t>(x.data_), detail::half2float<detail::internal_t>(y.data_))));
+ #else
+ int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF, exp = -15;
+ if(!absy || x.data_ == 0x3C00)
+ return half(detail::binary, detail::select(0x3C00, (x.data_==0x3C00) ? y.data_ : x.data_));
+ bool is_int = absy >= 0x6400 || (absy>=0x3C00 && !(absy&((1<<(25-(absy>>10)))-1)));
+ unsigned int sign = x.data_ & (static_cast<unsigned>((absy<0x6800)&&is_int&&((absy>>(25-(absy>>10)))&1))<<15);
+ if(absx >= 0x7C00 || absy >= 0x7C00)
+ return half(detail::binary, (absx>0x7C00 || absy>0x7C00) ? detail::signal(x.data_, y.data_) :
+ (absy==0x7C00) ? ((absx==0x3C00) ? 0x3C00 : (!absx && y.data_==0xFC00) ? detail::pole() :
+ (0x7C00&-((y.data_>>15)^(absx>0x3C00)))) : (sign|(0x7C00&((y.data_>>15)-1U))));
+ if(!absx)
+ return half(detail::binary, (y.data_&0x8000) ? detail::pole(sign) : sign);
+ if((x.data_&0x8000) && !is_int)
+ return half(detail::binary, detail::invalid());
+ if(x.data_ == 0xBC00)
+ return half(detail::binary, sign|0x3C00);
+ switch(y.data_)
+ {
+ case 0x3800: return sqrt(x);
+ case 0x3C00: return half(detail::binary, detail::check_underflow(x.data_));
+ case 0x4000: return x * x;
+ case 0xBC00: return half(detail::binary, 0x3C00) / x;
+ }
+ for(; absx<0x400; absx<<=1,--exp) ;
+ detail::uint32 ilog = exp + (absx>>10), msign = detail::sign_mask(ilog), f, m =
+ (((ilog<<27)+((detail::log2(static_cast<detail::uint32>((absx&0x3FF)|0x400)<<20)+8)>>4))^msign) - msign;
+ for(exp=-11; m<0x80000000; m<<=1,--exp) ;
+ for(; absy<0x400; absy<<=1,--exp) ;
+ m = detail::multiply64(m, static_cast<detail::uint32>((absy&0x3FF)|0x400)<<21);
+ int i = m >> 31;
+ exp += (absy>>10) + i;
+ m <<= 1 - i;
+ if(exp < 0)
+ {
+ f = m >> -exp;
+ exp = 0;
+ }
+ else
+ {
+ f = (m<<exp) & 0x7FFFFFFF;
+ exp = m >> (31-exp);
+ }
+ return half(detail::binary, detail::exp2_post<half::round_style>(f, exp, ((msign&1)^(y.data_>>15))!=0, sign));
+ #endif
+ }
+
+ /// \}
+ /// \anchor trigonometric
+ /// \name Trigonometric functions
+ /// \{
+
+ /// Compute sine and cosine simultaneously.
+ /// This returns the same results as sin() and cos() but is faster than calling each function individually.
+ ///
+ /// This function is exact to rounding for all rounding modes.
+ /// \param arg function argument
+ /// \param sin variable to take sine of \a arg
+ /// \param cos variable to take cosine of \a arg
+ /// \exception FE_INVALID for signaling NaN or infinity
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline void sincos(half arg, half *sin, half *cos)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ detail::internal_t f = detail::half2float<detail::internal_t>(arg.data_);
+ *sin = half(detail::binary, detail::float2half<half::round_style>(std::sin(f)));
+ *cos = half(detail::binary, detail::float2half<half::round_style>(std::cos(f)));
+ #else
+ int abs = arg.data_ & 0x7FFF, sign = arg.data_ >> 15, k;
+ if(abs >= 0x7C00)
+ *sin = *cos = half(detail::binary, (abs==0x7C00) ? detail::invalid() : detail::signal(arg.data_));
+ else if(!abs)
+ {
+ *sin = arg;
+ *cos = half(detail::binary, 0x3C00);
+ }
+ else if(abs < 0x2500)
+ {
+ *sin = half(detail::binary, detail::rounded<half::round_style,true>(arg.data_-1, 1, 1));
+ *cos = half(detail::binary, detail::rounded<half::round_style,true>(0x3BFF, 1, 1));
+ }
+ else
+ {
+ if(half::round_style != std::round_to_nearest)
+ {
+ switch(abs)
+ {
+ case 0x48B7:
+ *sin = half(detail::binary, detail::rounded<half::round_style,true>((~arg.data_&0x8000)|0x1D07, 1, 1));
+ *cos = half(detail::binary, detail::rounded<half::round_style,true>(0xBBFF, 1, 1));
+ return;
+ case 0x598C:
+ *sin = half(detail::binary, detail::rounded<half::round_style,true>((arg.data_&0x8000)|0x3BFF, 1, 1));
+ *cos = half(detail::binary, detail::rounded<half::round_style,true>(0x80FC, 1, 1));
+ return;
+ case 0x6A64:
+ *sin = half(detail::binary, detail::rounded<half::round_style,true>((~arg.data_&0x8000)|0x3BFE, 1, 1));
+ *cos = half(detail::binary, detail::rounded<half::round_style,true>(0x27FF, 1, 1));
+ return;
+ case 0x6D8C:
+ *sin = half(detail::binary, detail::rounded<half::round_style,true>((arg.data_&0x8000)|0x0FE6, 1, 1));
+ *cos = half(detail::binary, detail::rounded<half::round_style,true>(0x3BFF, 1, 1));
+ return;
+ }
+ }
+ std::pair<detail::uint32,detail::uint32> sc = detail::sincos(detail::angle_arg(abs, k), 28);
+ switch(k & 3)
+ {
+ case 1: sc = std::make_pair(sc.second, -sc.first); break;
+ case 2: sc = std::make_pair(-sc.first, -sc.second); break;
+ case 3: sc = std::make_pair(-sc.second, sc.first); break;
+ }
+ *sin = half(detail::binary, detail::fixed2half<half::round_style,30,true,true,true>((sc.first^-static_cast<detail::uint32>(sign))+sign));
+ *cos = half(detail::binary, detail::fixed2half<half::round_style,30,true,true,true>(sc.second));
+ }
+ #endif
+ }
+
+ /// Sine function.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::sin](https://en.cppreference.com/w/cpp/numeric/math/sin).
+ /// \param arg function argument
+ /// \return sine value of \a arg
+ /// \exception FE_INVALID for signaling NaN or infinity
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half sin(half arg)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ return half(detail::binary, detail::float2half<half::round_style>(std::sin(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ int abs = arg.data_ & 0x7FFF, k;
+ if(!abs)
+ return arg;
+ if(abs >= 0x7C00)
+ return half(detail::binary, (abs==0x7C00) ? detail::invalid() : detail::signal(arg.data_));
+ if(abs < 0x2900)
+ return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_-1, 1, 1));
+ if(half::round_style != std::round_to_nearest)
+ switch(abs)
+ {
+ case 0x48B7: return half(detail::binary, detail::rounded<half::round_style,true>((~arg.data_&0x8000)|0x1D07, 1, 1));
+ case 0x6A64: return half(detail::binary, detail::rounded<half::round_style,true>((~arg.data_&0x8000)|0x3BFE, 1, 1));
+ case 0x6D8C: return half(detail::binary, detail::rounded<half::round_style,true>((arg.data_&0x8000)|0x0FE6, 1, 1));
+ }
+ std::pair<detail::uint32,detail::uint32> sc = detail::sincos(detail::angle_arg(abs, k), 28);
+ detail::uint32 sign = -static_cast<detail::uint32>(((k>>1)&1)^(arg.data_>>15));
+ return half(detail::binary, detail::fixed2half<half::round_style,30,true,true,true>((((k&1) ? sc.second : sc.first)^sign) - sign));
+ #endif
+ }
+
+ /// Cosine function.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::cos](https://en.cppreference.com/w/cpp/numeric/math/cos).
+ /// \param arg function argument
+ /// \return cosine value of \a arg
+ /// \exception FE_INVALID for signaling NaN or infinity
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half cos(half arg)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ return half(detail::binary, detail::float2half<half::round_style>(std::cos(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ int abs = arg.data_ & 0x7FFF, k;
+ if(!abs)
+ return half(detail::binary, 0x3C00);
+ if(abs >= 0x7C00)
+ return half(detail::binary, (abs==0x7C00) ? detail::invalid() : detail::signal(arg.data_));
+ if(abs < 0x2500)
+ return half(detail::binary, detail::rounded<half::round_style,true>(0x3BFF, 1, 1));
+ if(half::round_style != std::round_to_nearest && abs == 0x598C)
+ return half(detail::binary, detail::rounded<half::round_style,true>(0x80FC, 1, 1));
+ std::pair<detail::uint32,detail::uint32> sc = detail::sincos(detail::angle_arg(abs, k), 28);
+ detail::uint32 sign = -static_cast<detail::uint32>(((k>>1)^k)&1);
+ return half(detail::binary, detail::fixed2half<half::round_style,30,true,true,true>((((k&1) ? sc.first : sc.second)^sign) - sign));
+ #endif
+ }
+
+ /// Tangent function.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::tan](https://en.cppreference.com/w/cpp/numeric/math/tan).
+ /// \param arg function argument
+ /// \return tangent value of \a arg
+ /// \exception FE_INVALID for signaling NaN or infinity
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half tan(half arg)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ return half(detail::binary, detail::float2half<half::round_style>(std::tan(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ int abs = arg.data_ & 0x7FFF, exp = 13, k;
+ if(!abs)
+ return arg;
+ if(abs >= 0x7C00)
+ return half(detail::binary, (abs==0x7C00) ? detail::invalid() : detail::signal(arg.data_));
+ if(abs < 0x2700)
+ return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_, 0, 1));
+ if(half::round_style != std::round_to_nearest)
+ switch(abs)
+ {
+ case 0x658C: return half(detail::binary, detail::rounded<half::round_style,true>((arg.data_&0x8000)|0x07E6, 1, 1));
+ case 0x7330: return half(detail::binary, detail::rounded<half::round_style,true>((~arg.data_&0x8000)|0x4B62, 1, 1));
+ }
+ std::pair<detail::uint32,detail::uint32> sc = detail::sincos(detail::angle_arg(abs, k), 30);
+ if(k & 1)
+ sc = std::make_pair(-sc.second, sc.first);
+ detail::uint32 signy = detail::sign_mask(sc.first), signx = detail::sign_mask(sc.second);
+ detail::uint32 my = (sc.first^signy) - signy, mx = (sc.second^signx) - signx;
+ for(; my<0x80000000; my<<=1,--exp) ;
+ for(; mx<0x80000000; mx<<=1,++exp) ;
+ return half(detail::binary, detail::tangent_post<half::round_style>(my, mx, exp, (signy^signx^arg.data_)&0x8000));
+ #endif
+ }
+
+ /// Arc sine.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::asin](https://en.cppreference.com/w/cpp/numeric/math/asin).
+ /// \param arg function argument
+ /// \return arc sine value of \a arg
+ /// \exception FE_INVALID for signaling NaN or if abs(\a arg) > 1
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half asin(half arg)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ return half(detail::binary, detail::float2half<half::round_style>(std::asin(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ unsigned int abs = arg.data_ & 0x7FFF, sign = arg.data_ & 0x8000;
+ if(!abs)
+ return arg;
+ if(abs >= 0x3C00)
+ return half(detail::binary, (abs>0x7C00) ? detail::signal(arg.data_) : (abs>0x3C00) ? detail::invalid() :
+ detail::rounded<half::round_style,true>(sign|0x3E48, 0, 1));
+ if(abs < 0x2900)
+ return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_, 0, 1));
+ if(half::round_style != std::round_to_nearest && (abs == 0x2B44 || abs == 0x2DC3))
+ return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_+1, 1, 1));
+ std::pair<detail::uint32,detail::uint32> sc = detail::atan2_args(abs);
+ detail::uint32 m = detail::atan2(sc.first, sc.second, (half::round_style==std::round_to_nearest) ? 27 : 26);
+ return half(detail::binary, detail::fixed2half<half::round_style,30,false,true,true>(m, 14, sign));
+ #endif
+ }
+
+ /// Arc cosine function.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::acos](https://en.cppreference.com/w/cpp/numeric/math/acos).
+ /// \param arg function argument
+ /// \return arc cosine value of \a arg
+ /// \exception FE_INVALID for signaling NaN or if abs(\a arg) > 1
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half acos(half arg)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ return half(detail::binary, detail::float2half<half::round_style>(std::acos(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ unsigned int abs = arg.data_ & 0x7FFF, sign = arg.data_ >> 15;
+ if(!abs)
+ return half(detail::binary, detail::rounded<half::round_style,true>(0x3E48, 0, 1));
+ if(abs >= 0x3C00)
+ return half(detail::binary, (abs>0x7C00) ? detail::signal(arg.data_) : (abs>0x3C00) ? detail::invalid() :
+ sign ? detail::rounded<half::round_style,true>(0x4248, 0, 1) : 0);
+ std::pair<detail::uint32,detail::uint32> cs = detail::atan2_args(abs);
+ detail::uint32 m = detail::atan2(cs.second, cs.first, 28);
+ return half(detail::binary, detail::fixed2half<half::round_style,31,false,true,true>(sign ? (0xC90FDAA2-m) : m, 15, 0, sign));
+ #endif
+ }
+
+ /// Arc tangent function.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::atan](https://en.cppreference.com/w/cpp/numeric/math/atan).
+ /// \param arg function argument
+ /// \return arc tangent value of \a arg
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half atan(half arg)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ return half(detail::binary, detail::float2half<half::round_style>(std::atan(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ unsigned int abs = arg.data_ & 0x7FFF, sign = arg.data_ & 0x8000;
+ if(!abs)
+ return arg;
+ if(abs >= 0x7C00)
+ return half(detail::binary, (abs==0x7C00) ? detail::rounded<half::round_style,true>(sign|0x3E48, 0, 1) : detail::signal(arg.data_));
+ if(abs <= 0x2700)
+ return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_-1, 1, 1));
+ int exp = (abs>>10) + (abs<=0x3FF);
+ detail::uint32 my = (abs&0x3FF) | ((abs>0x3FF)<<10);
+ detail::uint32 m = (exp>15) ? detail::atan2(my<<19, 0x20000000>>(exp-15), (half::round_style==std::round_to_nearest) ? 26 : 24) :
+ detail::atan2(my<<(exp+4), 0x20000000, (half::round_style==std::round_to_nearest) ? 30 : 28);
+ return half(detail::binary, detail::fixed2half<half::round_style,30,false,true,true>(m, 14, sign));
+ #endif
+ }
+
+ /// Arc tangent function.
+ /// This function may be 1 ULP off the correctly rounded exact result in ~0.005% of inputs for `std::round_to_nearest`,
+ /// in ~0.1% of inputs for `std::round_toward_zero` and in ~0.02% of inputs for any other rounding mode.
+ ///
+ /// **See also:** Documentation for [std::atan2](https://en.cppreference.com/w/cpp/numeric/math/atan2).
+ /// \param y numerator
+ /// \param x denominator
+ /// \return arc tangent value
+ /// \exception FE_INVALID if \a x or \a y is signaling NaN
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half atan2(half y, half x)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ return half(detail::binary, detail::float2half<half::round_style>(std::atan2(detail::half2float<detail::internal_t>(y.data_), detail::half2float<detail::internal_t>(x.data_))));
+ #else
+ unsigned int absx = x.data_ & 0x7FFF, absy = y.data_ & 0x7FFF, signx = x.data_ >> 15, signy = y.data_ & 0x8000;
+ if(absx >= 0x7C00 || absy >= 0x7C00)
+ {
+ if(absx > 0x7C00 || absy > 0x7C00)
+ return half(detail::binary, detail::signal(x.data_, y.data_));
+ if(absy == 0x7C00)
+ return half(detail::binary, (absx<0x7C00) ? detail::rounded<half::round_style,true>(signy|0x3E48, 0, 1) :
+ signx ? detail::rounded<half::round_style,true>(signy|0x40B6, 0, 1) :
+ detail::rounded<half::round_style,true>(signy|0x3A48, 0, 1));
+ return (x.data_==0x7C00) ? half(detail::binary, signy) : half(detail::binary, detail::rounded<half::round_style,true>(signy|0x4248, 0, 1));
+ }
+ if(!absy)
+ return signx ? half(detail::binary, detail::rounded<half::round_style,true>(signy|0x4248, 0, 1)) : y;
+ if(!absx)
+ return half(detail::binary, detail::rounded<half::round_style,true>(signy|0x3E48, 0, 1));
+ int d = (absy>>10) + (absy<=0x3FF) - (absx>>10) - (absx<=0x3FF);
+ if(d > (signx ? 18 : 12))
+ return half(detail::binary, detail::rounded<half::round_style,true>(signy|0x3E48, 0, 1));
+ if(signx && d < -11)
+ return half(detail::binary, detail::rounded<half::round_style,true>(signy|0x4248, 0, 1));
+ if(!signx && d < ((half::round_style==std::round_toward_zero) ? -15 : -9))
+ {
+ for(; absy<0x400; absy<<=1,--d) ;
+ detail::uint32 mx = ((absx<<1)&0x7FF) | 0x800, my = ((absy<<1)&0x7FF) | 0x800;
+ int i = my < mx;
+ d -= i;
+ if(d < -25)
+ return half(detail::binary, detail::underflow<half::round_style>(signy));
+ my <<= 11 + i;
+ return half(detail::binary, detail::fixed2half<half::round_style,11,false,false,true>(my/mx, d+14, signy, my%mx!=0));
+ }
+ detail::uint32 m = detail::atan2( ((absy&0x3FF)|((absy>0x3FF)<<10))<<(19+((d<0) ? d : (d>0) ? 0 : -1)),
+ ((absx&0x3FF)|((absx>0x3FF)<<10))<<(19-((d>0) ? d : (d<0) ? 0 : 1)));
+ return half(detail::binary, detail::fixed2half<half::round_style,31,false,true,true>(signx ? (0xC90FDAA2-m) : m, 15, signy, signx));
+ #endif
+ }
+
+ /// \}
+ /// \anchor hyperbolic
+ /// \name Hyperbolic functions
+ /// \{
+
+ /// Hyperbolic sine.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::sinh](https://en.cppreference.com/w/cpp/numeric/math/sinh).
+ /// \param arg function argument
+ /// \return hyperbolic sine value of \a arg
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half sinh(half arg)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ return half(detail::binary, detail::float2half<half::round_style>(std::sinh(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ int abs = arg.data_ & 0x7FFF, exp;
+ if(!abs || abs >= 0x7C00)
+ return (abs>0x7C00) ? half(detail::binary, detail::signal(arg.data_)) : arg;
+ if(abs <= 0x2900)
+ return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_, 0, 1));
+ std::pair<detail::uint32,detail::uint32> mm = detail::hyperbolic_args(abs, exp, (half::round_style==std::round_to_nearest) ? 29 : 27);
+ detail::uint32 m = mm.first - mm.second;
+ for(exp+=13; m<0x80000000 && exp; m<<=1,--exp) ;
+ unsigned int sign = arg.data_ & 0x8000;
+ if(exp > 29)
+ return half(detail::binary, detail::overflow<half::round_style>(sign));
+ return half(detail::binary, detail::fixed2half<half::round_style,31,false,false,true>(m, exp, sign));
+ #endif
+ }
+
+ /// Hyperbolic cosine.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::cosh](https://en.cppreference.com/w/cpp/numeric/math/cosh).
+ /// \param arg function argument
+ /// \return hyperbolic cosine value of \a arg
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half cosh(half arg)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ return half(detail::binary, detail::float2half<half::round_style>(std::cosh(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ int abs = arg.data_ & 0x7FFF, exp;
+ if(!abs)
+ return half(detail::binary, 0x3C00);
+ if(abs >= 0x7C00)
+ return half(detail::binary, (abs>0x7C00) ? detail::signal(arg.data_) : 0x7C00);
+ std::pair<detail::uint32,detail::uint32> mm = detail::hyperbolic_args(abs, exp, (half::round_style==std::round_to_nearest) ? 23 : 26);
+ detail::uint32 m = mm.first + mm.second, i = (~m&0xFFFFFFFF) >> 31;
+ m = (m>>i) | (m&i) | 0x80000000;
+ if((exp+=13+i) > 29)
+ return half(detail::binary, detail::overflow<half::round_style>());
+ return half(detail::binary, detail::fixed2half<half::round_style,31,false,false,true>(m, exp));
+ #endif
+ }
+
+ /// Hyperbolic tangent.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::tanh](https://en.cppreference.com/w/cpp/numeric/math/tanh).
+ /// \param arg function argument
+ /// \return hyperbolic tangent value of \a arg
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half tanh(half arg)
+ {
+ #ifdef HALF_ARITHMETIC_TYPE
+ return half(detail::binary, detail::float2half<half::round_style>(std::tanh(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ int abs = arg.data_ & 0x7FFF, exp;
+ if(!abs)
+ return arg;
+ if(abs >= 0x7C00)
+ return half(detail::binary, (abs>0x7C00) ? detail::signal(arg.data_) : (arg.data_-0x4000));
+ if(abs >= 0x4500)
+ return half(detail::binary, detail::rounded<half::round_style,true>((arg.data_&0x8000)|0x3BFF, 1, 1));
+ if(abs < 0x2700)
+ return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_-1, 1, 1));
+ if(half::round_style != std::round_to_nearest && abs == 0x2D3F)
+ return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_-3, 0, 1));
+ std::pair<detail::uint32,detail::uint32> mm = detail::hyperbolic_args(abs, exp, 27);
+ detail::uint32 my = mm.first - mm.second - (half::round_style!=std::round_to_nearest), mx = mm.first + mm.second, i = (~mx&0xFFFFFFFF) >> 31;
+ for(exp=13; my<0x80000000; my<<=1,--exp) ;
+ mx = (mx>>i) | 0x80000000;
+ return half(detail::binary, detail::tangent_post<half::round_style>(my, mx, exp-i, arg.data_&0x8000));
+ #endif
+ }
+
+ /// Hyperbolic area sine.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::asinh](https://en.cppreference.com/w/cpp/numeric/math/asinh).
+ /// \param arg function argument
+ /// \return area sine value of \a arg
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half asinh(half arg)
+ {
+ #if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH
+ return half(detail::binary, detail::float2half<half::round_style>(std::asinh(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ int abs = arg.data_ & 0x7FFF;
+ if(!abs || abs >= 0x7C00)
+ return (abs>0x7C00) ? half(detail::binary, detail::signal(arg.data_)) : arg;
+ if(abs <= 0x2900)
+ return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_-1, 1, 1));
+ if(half::round_style != std::round_to_nearest)
+ switch(abs)
+ {
+ case 0x32D4: return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_-13, 1, 1));
+ case 0x3B5B: return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_-197, 1, 1));
+ }
+ return half(detail::binary, detail::area<half::round_style,true>(arg.data_));
+ #endif
+ }
+
+ /// Hyperbolic area cosine.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::acosh](https://en.cppreference.com/w/cpp/numeric/math/acosh).
+ /// \param arg function argument
+ /// \return area cosine value of \a arg
+ /// \exception FE_INVALID for signaling NaN or arguments <1
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half acosh(half arg)
+ {
+ #if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH
+ return half(detail::binary, detail::float2half<half::round_style>(std::acosh(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ int abs = arg.data_ & 0x7FFF;
+ if((arg.data_&0x8000) || abs < 0x3C00)
+ return half(detail::binary, (abs<=0x7C00) ? detail::invalid() : detail::signal(arg.data_));
+ if(abs == 0x3C00)
+ return half(detail::binary, 0);
+ if(arg.data_ >= 0x7C00)
+ return (abs>0x7C00) ? half(detail::binary, detail::signal(arg.data_)) : arg;
+ return half(detail::binary, detail::area<half::round_style,false>(arg.data_));
+ #endif
+ }
+
+ /// Hyperbolic area tangent.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::atanh](https://en.cppreference.com/w/cpp/numeric/math/atanh).
+ /// \param arg function argument
+ /// \return area tangent value of \a arg
+ /// \exception FE_INVALID for signaling NaN or if abs(\a arg) > 1
+ /// \exception FE_DIVBYZERO for +/-1
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half atanh(half arg)
+ {
+ #if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH
+ return half(detail::binary, detail::float2half<half::round_style>(std::atanh(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ int abs = arg.data_ & 0x7FFF, exp = 0;
+ if(!abs)
+ return arg;
+ if(abs >= 0x3C00)
+ return half(detail::binary, (abs==0x3C00) ? detail::pole(arg.data_&0x8000) : (abs<=0x7C00) ? detail::invalid() : detail::signal(arg.data_));
+ if(abs < 0x2700)
+ return half(detail::binary, detail::rounded<half::round_style,true>(arg.data_, 0, 1));
+ detail::uint32 m = static_cast<detail::uint32>((abs&0x3FF)|((abs>0x3FF)<<10)) << ((abs>>10)+(abs<=0x3FF)+6), my = 0x80000000 + m, mx = 0x80000000 - m;
+ for(; mx<0x80000000; mx<<=1,++exp) ;
+ int i = my >= mx, s;
+ return half(detail::binary, detail::log2_post<half::round_style,0xB8AA3B2A>(detail::log2(
+ (detail::divide64(my>>i, mx, s)+1)>>1, 27)+0x10, exp+i-1, 16, arg.data_&0x8000));
+ #endif
+ }
+
+ /// \}
+ /// \anchor special
+ /// \name Error and gamma functions
+ /// \{
+
+ /// Error function.
+ /// This function may be 1 ULP off the correctly rounded exact result for any rounding mode in <0.5% of inputs.
+ ///
+ /// **See also:** Documentation for [std::erf](https://en.cppreference.com/w/cpp/numeric/math/erf).
+ /// \param arg function argument
+ /// \return error function value of \a arg
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half erf(half arg)
+ {
+ #if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH
+ return half(detail::binary, detail::float2half<half::round_style>(std::erf(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ unsigned int abs = arg.data_ & 0x7FFF;
+ if(!abs || abs >= 0x7C00)
+ return (abs>=0x7C00) ? half(detail::binary, (abs==0x7C00) ? (arg.data_-0x4000) : detail::signal(arg.data_)) : arg;
+ if(abs >= 0x4200)
+ return half(detail::binary, detail::rounded<half::round_style,true>((arg.data_&0x8000)|0x3BFF, 1, 1));
+ return half(detail::binary, detail::erf<half::round_style,false>(arg.data_));
+ #endif
+ }
+
+ /// Complementary error function.
+ /// This function may be 1 ULP off the correctly rounded exact result for any rounding mode in <0.5% of inputs.
+ ///
+ /// **See also:** Documentation for [std::erfc](https://en.cppreference.com/w/cpp/numeric/math/erfc).
+ /// \param arg function argument
+ /// \return 1 minus error function value of \a arg
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half erfc(half arg)
+ {
+ #if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH
+ return half(detail::binary, detail::float2half<half::round_style>(std::erfc(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ unsigned int abs = arg.data_ & 0x7FFF, sign = arg.data_ & 0x8000;
+ if(abs >= 0x7C00)
+ return (abs>=0x7C00) ? half(detail::binary, (abs==0x7C00) ? (sign>>1) : detail::signal(arg.data_)) : arg;
+ if(!abs)
+ return half(detail::binary, 0x3C00);
+ if(abs >= 0x4400)
+ return half(detail::binary, detail::rounded<half::round_style,true>((sign>>1)-(sign>>15), sign>>15, 1));
+ return half(detail::binary, detail::erf<half::round_style,true>(arg.data_));
+ #endif
+ }
+
+ /// Natural logarithm of gamma function.
+ /// This function may be 1 ULP off the correctly rounded exact result for any rounding mode in ~0.025% of inputs.
+ ///
+ /// **See also:** Documentation for [std::lgamma](https://en.cppreference.com/w/cpp/numeric/math/lgamma).
+ /// \param arg function argument
+ /// \return natural logarith of gamma function for \a arg
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_DIVBYZERO for 0 or negative integer arguments
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half lgamma(half arg)
+ {
+ #if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH
+ return half(detail::binary, detail::float2half<half::round_style>(std::lgamma(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ int abs = arg.data_ & 0x7FFF;
+ if(abs >= 0x7C00)
+ return half(detail::binary, (abs==0x7C00) ? 0x7C00 : detail::signal(arg.data_));
+ if(!abs || arg.data_ >= 0xE400 || (arg.data_ >= 0xBC00 && !(abs&((1<<(25-(abs>>10)))-1))))
+ return half(detail::binary, detail::pole());
+ if(arg.data_ == 0x3C00 || arg.data_ == 0x4000)
+ return half(detail::binary, 0);
+ return half(detail::binary, detail::gamma<half::round_style,true>(arg.data_));
+ #endif
+ }
+
+ /// Gamma function.
+ /// This function may be 1 ULP off the correctly rounded exact result for any rounding mode in <0.25% of inputs.
+ ///
+ /// **See also:** Documentation for [std::tgamma](https://en.cppreference.com/w/cpp/numeric/math/tgamma).
+ /// \param arg function argument
+ /// \return gamma function value of \a arg
+ /// \exception FE_INVALID for signaling NaN, negative infinity or negative integer arguments
+ /// \exception FE_DIVBYZERO for 0
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half tgamma(half arg)
+ {
+ #if defined(HALF_ARITHMETIC_TYPE) && HALF_ENABLE_CPP11_CMATH
+ return half(detail::binary, detail::float2half<half::round_style>(std::tgamma(detail::half2float<detail::internal_t>(arg.data_))));
+ #else
+ unsigned int abs = arg.data_ & 0x7FFF;
+ if(!abs)
+ return half(detail::binary, detail::pole(arg.data_));
+ if(abs >= 0x7C00)
+ return (arg.data_==0x7C00) ? arg : half(detail::binary, detail::signal(arg.data_));
+ if(arg.data_ >= 0xE400 || (arg.data_ >= 0xBC00 && !(abs&((1<<(25-(abs>>10)))-1))))
+ return half(detail::binary, detail::invalid());
+ if(arg.data_ >= 0xCA80)
+ return half(detail::binary, detail::underflow<half::round_style>((1-((abs>>(25-(abs>>10)))&1))<<15));
+ if(arg.data_ <= 0x100 || (arg.data_ >= 0x4900 && arg.data_ < 0x8000))
+ return half(detail::binary, detail::overflow<half::round_style>());
+ if(arg.data_ == 0x3C00)
+ return arg;
+ return half(detail::binary, detail::gamma<half::round_style,false>(arg.data_));
+ #endif
+ }
+
+ /// \}
+ /// \anchor rounding
+ /// \name Rounding
+ /// \{
+
+ /// Nearest integer not less than half value.
+ /// **See also:** Documentation for [std::ceil](https://en.cppreference.com/w/cpp/numeric/math/ceil).
+ /// \param arg half to round
+ /// \return nearest integer not less than \a arg
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_INEXACT if value had to be rounded
+ inline half ceil(half arg) { return half(detail::binary, detail::integral<std::round_toward_infinity,true,true>(arg.data_)); }
+
+ /// Nearest integer not greater than half value.
+ /// **See also:** Documentation for [std::floor](https://en.cppreference.com/w/cpp/numeric/math/floor).
+ /// \param arg half to round
+ /// \return nearest integer not greater than \a arg
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_INEXACT if value had to be rounded
+ inline half floor(half arg) { return half(detail::binary, detail::integral<std::round_toward_neg_infinity,true,true>(arg.data_)); }
+
+ /// Nearest integer not greater in magnitude than half value.
+ /// **See also:** Documentation for [std::trunc](https://en.cppreference.com/w/cpp/numeric/math/trunc).
+ /// \param arg half to round
+ /// \return nearest integer not greater in magnitude than \a arg
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_INEXACT if value had to be rounded
+ inline half trunc(half arg) { return half(detail::binary, detail::integral<std::round_toward_zero,true,true>(arg.data_)); }
+
+ /// Nearest integer.
+ /// **See also:** Documentation for [std::round](https://en.cppreference.com/w/cpp/numeric/math/round).
+ /// \param arg half to round
+ /// \return nearest integer, rounded away from zero in half-way cases
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_INEXACT if value had to be rounded
+ inline half round(half arg) { return half(detail::binary, detail::integral<std::round_to_nearest,false,true>(arg.data_)); }
+
+ /// Nearest integer.
+ /// **See also:** Documentation for [std::lround](https://en.cppreference.com/w/cpp/numeric/math/round).
+ /// \param arg half to round
+ /// \return nearest integer, rounded away from zero in half-way cases
+ /// \exception FE_INVALID if value is not representable as `long`
+ inline long lround(half arg) { return detail::half2int<std::round_to_nearest,false,false,long>(arg.data_); }
+
+ /// Nearest integer using half's internal rounding mode.
+ /// **See also:** Documentation for [std::rint](https://en.cppreference.com/w/cpp/numeric/math/rint).
+ /// \param arg half expression to round
+ /// \return nearest integer using default rounding mode
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_INEXACT if value had to be rounded
+ inline half rint(half arg) { return half(detail::binary, detail::integral<half::round_style,true,true>(arg.data_)); }
+
+ /// Nearest integer using half's internal rounding mode.
+ /// **See also:** Documentation for [std::lrint](https://en.cppreference.com/w/cpp/numeric/math/rint).
+ /// \param arg half expression to round
+ /// \return nearest integer using default rounding mode
+ /// \exception FE_INVALID if value is not representable as `long`
+ /// \exception FE_INEXACT if value had to be rounded
+ inline long lrint(half arg) { return detail::half2int<half::round_style,true,true,long>(arg.data_); }
+
+ /// Nearest integer using half's internal rounding mode.
+ /// **See also:** Documentation for [std::nearbyint](https://en.cppreference.com/w/cpp/numeric/math/nearbyint).
+ /// \param arg half expression to round
+ /// \return nearest integer using default rounding mode
+ /// \exception FE_INVALID for signaling NaN
+ inline half nearbyint(half arg) { return half(detail::binary, detail::integral<half::round_style,true,false>(arg.data_)); }
+#if HALF_ENABLE_CPP11_LONG_LONG
+ /// Nearest integer.
+ /// **See also:** Documentation for [std::llround](https://en.cppreference.com/w/cpp/numeric/math/round).
+ /// \param arg half to round
+ /// \return nearest integer, rounded away from zero in half-way cases
+ /// \exception FE_INVALID if value is not representable as `long long`
+ inline long long llround(half arg) { return detail::half2int<std::round_to_nearest,false,false,long long>(arg.data_); }
+
+ /// Nearest integer using half's internal rounding mode.
+ /// **See also:** Documentation for [std::llrint](https://en.cppreference.com/w/cpp/numeric/math/rint).
+ /// \param arg half expression to round
+ /// \return nearest integer using default rounding mode
+ /// \exception FE_INVALID if value is not representable as `long long`
+ /// \exception FE_INEXACT if value had to be rounded
+ inline long long llrint(half arg) { return detail::half2int<half::round_style,true,true,long long>(arg.data_); }
+#endif
+
+ /// \}
+ /// \anchor float
+ /// \name Floating point manipulation
+ /// \{
+
+ /// Decompress floating-point number.
+ /// **See also:** Documentation for [std::frexp](https://en.cppreference.com/w/cpp/numeric/math/frexp).
+ /// \param arg number to decompress
+ /// \param exp address to store exponent at
+ /// \return significant in range [0.5, 1)
+ /// \exception FE_INVALID for signaling NaN
+ inline half frexp(half arg, int *exp)
+ {
+ *exp = 0;
+ unsigned int abs = arg.data_ & 0x7FFF;
+ if(abs >= 0x7C00 || !abs)
+ return (abs>0x7C00) ? half(detail::binary, detail::signal(arg.data_)) : arg;
+ for(; abs<0x400; abs<<=1,--*exp) ;
+ *exp += (abs>>10) - 14;
+ return half(detail::binary, (arg.data_&0x8000)|0x3800|(abs&0x3FF));
+ }
+
+ /// Multiply by power of two.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::scalbln](https://en.cppreference.com/w/cpp/numeric/math/scalbn).
+ /// \param arg number to modify
+ /// \param exp power of two to multiply with
+ /// \return \a arg multplied by 2 raised to \a exp
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half scalbln(half arg, long exp)
+ {
+ unsigned int abs = arg.data_ & 0x7FFF, sign = arg.data_ & 0x8000;
+ if(abs >= 0x7C00 || !abs)
+ return (abs>0x7C00) ? half(detail::binary, detail::signal(arg.data_)) : arg;
+ for(; abs<0x400; abs<<=1,--exp) ;
+ exp += abs >> 10;
+ if(exp > 30)
+ return half(detail::binary, detail::overflow<half::round_style>(sign));
+ else if(exp < -10)
+ return half(detail::binary, detail::underflow<half::round_style>(sign));
+ else if(exp > 0)
+ return half(detail::binary, sign|(exp<<10)|(abs&0x3FF));
+ unsigned int m = (abs&0x3FF) | 0x400;
+ return half(detail::binary, detail::rounded<half::round_style,false>(sign|(m>>(1-exp)), (m>>-exp)&1, (m&((1<<-exp)-1))!=0));
+ }
+
+ /// Multiply by power of two.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::scalbn](https://en.cppreference.com/w/cpp/numeric/math/scalbn).
+ /// \param arg number to modify
+ /// \param exp power of two to multiply with
+ /// \return \a arg multplied by 2 raised to \a exp
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half scalbn(half arg, int exp) { return scalbln(arg, exp); }
+
+ /// Multiply by power of two.
+ /// This function is exact to rounding for all rounding modes.
+ ///
+ /// **See also:** Documentation for [std::ldexp](https://en.cppreference.com/w/cpp/numeric/math/ldexp).
+ /// \param arg number to modify
+ /// \param exp power of two to multiply with
+ /// \return \a arg multplied by 2 raised to \a exp
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ inline half ldexp(half arg, int exp) { return scalbln(arg, exp); }
+
+ /// Extract integer and fractional parts.
+ /// **See also:** Documentation for [std::modf](https://en.cppreference.com/w/cpp/numeric/math/modf).
+ /// \param arg number to decompress
+ /// \param iptr address to store integer part at
+ /// \return fractional part
+ /// \exception FE_INVALID for signaling NaN
+ inline half modf(half arg, half *iptr)
+ {
+ unsigned int abs = arg.data_ & 0x7FFF;
+ if(abs > 0x7C00)
+ {
+ arg = half(detail::binary, detail::signal(arg.data_));
+ return *iptr = arg, arg;
+ }
+ if(abs >= 0x6400)
+ return *iptr = arg, half(detail::binary, arg.data_&0x8000);
+ if(abs < 0x3C00)
+ return iptr->data_ = arg.data_ & 0x8000, arg;
+ unsigned int exp = abs >> 10, mask = (1<<(25-exp)) - 1, m = arg.data_ & mask;
+ iptr->data_ = arg.data_ & ~mask;
+ if(!m)
+ return half(detail::binary, arg.data_&0x8000);
+ for(; m<0x400; m<<=1,--exp) ;
+ return half(detail::binary, (arg.data_&0x8000)|(exp<<10)|(m&0x3FF));
+ }
+
+ /// Extract exponent.
+ /// **See also:** Documentation for [std::ilogb](https://en.cppreference.com/w/cpp/numeric/math/ilogb).
+ /// \param arg number to query
+ /// \return floating-point exponent
+ /// \retval FP_ILOGB0 for zero
+ /// \retval FP_ILOGBNAN for NaN
+ /// \retval INT_MAX for infinity
+ /// \exception FE_INVALID for 0 or infinite values
+ inline int ilogb(half arg)
+ {
+ int abs = arg.data_ & 0x7FFF, exp;
+ if(!abs || abs >= 0x7C00)
+ {
+ detail::raise(FE_INVALID);
+ return !abs ? FP_ILOGB0 : (abs==0x7C00) ? INT_MAX : FP_ILOGBNAN;
+ }
+ for(exp=(abs>>10)-15; abs<0x200; abs<<=1,--exp) ;
+ return exp;
+ }
+
+ /// Extract exponent.
+ /// **See also:** Documentation for [std::logb](https://en.cppreference.com/w/cpp/numeric/math/logb).
+ /// \param arg number to query
+ /// \return floating-point exponent
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_DIVBYZERO for 0
+ inline half logb(half arg)
+ {
+ int abs = arg.data_ & 0x7FFF, exp;
+ if(!abs)
+ return half(detail::binary, detail::pole(0x8000));
+ if(abs >= 0x7C00)
+ return half(detail::binary, (abs==0x7C00) ? 0x7C00 : detail::signal(arg.data_));
+ for(exp=(abs>>10)-15; abs<0x200; abs<<=1,--exp) ;
+ unsigned int value = static_cast<unsigned>(exp<0) << 15;
+ if(exp)
+ {
+ unsigned int m = std::abs(exp) << 6;
+ for(exp=18; m<0x400; m<<=1,--exp) ;
+ value |= (exp<<10) + m;
+ }
+ return half(detail::binary, value);
+ }
+
+ /// Next representable value.
+ /// **See also:** Documentation for [std::nextafter](https://en.cppreference.com/w/cpp/numeric/math/nextafter).
+ /// \param from value to compute next representable value for
+ /// \param to direction towards which to compute next value
+ /// \return next representable value after \a from in direction towards \a to
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_OVERFLOW for infinite result from finite argument
+ /// \exception FE_UNDERFLOW for subnormal result
+ inline half nextafter(half from, half to)
+ {
+ int fabs = from.data_ & 0x7FFF, tabs = to.data_ & 0x7FFF;
+ if(fabs > 0x7C00 || tabs > 0x7C00)
+ return half(detail::binary, detail::signal(from.data_, to.data_));
+ if(from.data_ == to.data_ || !(fabs|tabs))
+ return to;
+ if(!fabs)
+ {
+ detail::raise(FE_UNDERFLOW, !HALF_ERRHANDLING_UNDERFLOW_TO_INEXACT);
+ return half(detail::binary, (to.data_&0x8000)+1);
+ }
+ unsigned int out = from.data_ + (((from.data_>>15)^static_cast<unsigned>(
+ (from.data_^(0x8000|(0x8000-(from.data_>>15))))<(to.data_^(0x8000|(0x8000-(to.data_>>15))))))<<1) - 1;
+ detail::raise(FE_OVERFLOW, fabs<0x7C00 && (out&0x7C00)==0x7C00);
+ detail::raise(FE_UNDERFLOW, !HALF_ERRHANDLING_UNDERFLOW_TO_INEXACT && (out&0x7C00)<0x400);
+ return half(detail::binary, out);
+ }
+
+ /// Next representable value.
+ /// **See also:** Documentation for [std::nexttoward](https://en.cppreference.com/w/cpp/numeric/math/nexttoward).
+ /// \param from value to compute next representable value for
+ /// \param to direction towards which to compute next value
+ /// \return next representable value after \a from in direction towards \a to
+ /// \exception FE_INVALID for signaling NaN
+ /// \exception FE_OVERFLOW for infinite result from finite argument
+ /// \exception FE_UNDERFLOW for subnormal result
+ inline half nexttoward(half from, long double to)
+ {
+ int fabs = from.data_ & 0x7FFF;
+ if(fabs > 0x7C00)
+ return half(detail::binary, detail::signal(from.data_));
+ long double lfrom = static_cast<long double>(from);
+ if(detail::builtin_isnan(to) || lfrom == to)
+ return half(static_cast<float>(to));
+ if(!fabs)
+ {
+ detail::raise(FE_UNDERFLOW, !HALF_ERRHANDLING_UNDERFLOW_TO_INEXACT);
+ return half(detail::binary, (static_cast<unsigned>(detail::builtin_signbit(to))<<15)+1);
+ }
+ unsigned int out = from.data_ + (((from.data_>>15)^static_cast<unsigned>(lfrom<to))<<1) - 1;
+ detail::raise(FE_OVERFLOW, (out&0x7FFF)==0x7C00);
+ detail::raise(FE_UNDERFLOW, !HALF_ERRHANDLING_UNDERFLOW_TO_INEXACT && (out&0x7FFF)<0x400);
+ return half(detail::binary, out);
+ }
+
+ /// Take sign.
+ /// **See also:** Documentation for [std::copysign](https://en.cppreference.com/w/cpp/numeric/math/copysign).
+ /// \param x value to change sign for
+ /// \param y value to take sign from
+ /// \return value equal to \a x in magnitude and to \a y in sign
+ inline HALF_CONSTEXPR half copysign(half x, half y) { return half(detail::binary, x.data_^((x.data_^y.data_)&0x8000)); }
+
+ /// \}
+ /// \anchor classification
+ /// \name Floating point classification
+ /// \{
+
+ /// Classify floating-point value.
+ /// **See also:** Documentation for [std::fpclassify](https://en.cppreference.com/w/cpp/numeric/math/fpclassify).
+ /// \param arg number to classify
+ /// \retval FP_ZERO for positive and negative zero
+ /// \retval FP_SUBNORMAL for subnormal numbers
+ /// \retval FP_INFINITY for positive and negative infinity
+ /// \retval FP_NAN for NaNs
+ /// \retval FP_NORMAL for all other (normal) values
+ inline HALF_CONSTEXPR int fpclassify(half arg)
+ {
+ return !(arg.data_&0x7FFF) ? FP_ZERO :
+ ((arg.data_&0x7FFF)<0x400) ? FP_SUBNORMAL :
+ ((arg.data_&0x7FFF)<0x7C00) ? FP_NORMAL :
+ ((arg.data_&0x7FFF)==0x7C00) ? FP_INFINITE :
+ FP_NAN;
+ }
+
+ /// Check if finite number.
+ /// **See also:** Documentation for [std::isfinite](https://en.cppreference.com/w/cpp/numeric/math/isfinite).
+ /// \param arg number to check
+ /// \retval true if neither infinity nor NaN
+ /// \retval false else
+ inline HALF_CONSTEXPR bool isfinite(half arg) { return (arg.data_&0x7C00) != 0x7C00; }
+
+ /// Check for infinity.
+ /// **See also:** Documentation for [std::isinf](https://en.cppreference.com/w/cpp/numeric/math/isinf).
+ /// \param arg number to check
+ /// \retval true for positive or negative infinity
+ /// \retval false else
+ inline HALF_CONSTEXPR bool isinf(half arg) { return (arg.data_&0x7FFF) == 0x7C00; }
+
+ /// Check for NaN.
+ /// **See also:** Documentation for [std::isnan](https://en.cppreference.com/w/cpp/numeric/math/isnan).
+ /// \param arg number to check
+ /// \retval true for NaNs
+ /// \retval false else
+ inline HALF_CONSTEXPR bool isnan(half arg) { return (arg.data_&0x7FFF) > 0x7C00; }
+
+ /// Check if normal number.
+ /// **See also:** Documentation for [std::isnormal](https://en.cppreference.com/w/cpp/numeric/math/isnormal).
+ /// \param arg number to check
+ /// \retval true if normal number
+ /// \retval false if either subnormal, zero, infinity or NaN
+ inline HALF_CONSTEXPR bool isnormal(half arg) { return ((arg.data_&0x7C00)!=0) & ((arg.data_&0x7C00)!=0x7C00); }
+
+ /// Check sign.
+ /// **See also:** Documentation for [std::signbit](https://en.cppreference.com/w/cpp/numeric/math/signbit).
+ /// \param arg number to check
+ /// \retval true for negative number
+ /// \retval false for positive number
+ inline HALF_CONSTEXPR bool signbit(half arg) { return (arg.data_&0x8000) != 0; }
+
+ /// \}
+ /// \anchor compfunc
+ /// \name Comparison
+ /// \{
+
+ /// Quiet comparison for greater than.
+ /// **See also:** Documentation for [std::isgreater](https://en.cppreference.com/w/cpp/numeric/math/isgreater).
+ /// \param x first operand
+ /// \param y second operand
+ /// \retval true if \a x greater than \a y
+ /// \retval false else
+ inline HALF_CONSTEXPR bool isgreater(half x, half y)
+ {
+ return ((x.data_^(0x8000|(0x8000-(x.data_>>15))))+(x.data_>>15)) > ((y.data_^(0x8000|(0x8000-(y.data_>>15))))+(y.data_>>15)) && !isnan(x) && !isnan(y);
+ }
+
+ /// Quiet comparison for greater equal.
+ /// **See also:** Documentation for [std::isgreaterequal](https://en.cppreference.com/w/cpp/numeric/math/isgreaterequal).
+ /// \param x first operand
+ /// \param y second operand
+ /// \retval true if \a x greater equal \a y
+ /// \retval false else
+ inline HALF_CONSTEXPR bool isgreaterequal(half x, half y)
+ {
+ return ((x.data_^(0x8000|(0x8000-(x.data_>>15))))+(x.data_>>15)) >= ((y.data_^(0x8000|(0x8000-(y.data_>>15))))+(y.data_>>15)) && !isnan(x) && !isnan(y);
+ }
+
+ /// Quiet comparison for less than.
+ /// **See also:** Documentation for [std::isless](https://en.cppreference.com/w/cpp/numeric/math/isless).
+ /// \param x first operand
+ /// \param y second operand
+ /// \retval true if \a x less than \a y
+ /// \retval false else
+ inline HALF_CONSTEXPR bool isless(half x, half y)
+ {
+ return ((x.data_^(0x8000|(0x8000-(x.data_>>15))))+(x.data_>>15)) < ((y.data_^(0x8000|(0x8000-(y.data_>>15))))+(y.data_>>15)) && !isnan(x) && !isnan(y);
+ }
+
+ /// Quiet comparison for less equal.
+ /// **See also:** Documentation for [std::islessequal](https://en.cppreference.com/w/cpp/numeric/math/islessequal).
+ /// \param x first operand
+ /// \param y second operand
+ /// \retval true if \a x less equal \a y
+ /// \retval false else
+ inline HALF_CONSTEXPR bool islessequal(half x, half y)
+ {
+ return ((x.data_^(0x8000|(0x8000-(x.data_>>15))))+(x.data_>>15)) <= ((y.data_^(0x8000|(0x8000-(y.data_>>15))))+(y.data_>>15)) && !isnan(x) && !isnan(y);
+ }
+
+ /// Quiet comarison for less or greater.
+ /// **See also:** Documentation for [std::islessgreater](https://en.cppreference.com/w/cpp/numeric/math/islessgreater).
+ /// \param x first operand
+ /// \param y second operand
+ /// \retval true if either less or greater
+ /// \retval false else
+ inline HALF_CONSTEXPR bool islessgreater(half x, half y)
+ {
+ return x.data_!=y.data_ && ((x.data_|y.data_)&0x7FFF) && !isnan(x) && !isnan(y);
+ }
+
+ /// Quiet check if unordered.
+ /// **See also:** Documentation for [std::isunordered](https://en.cppreference.com/w/cpp/numeric/math/isunordered).
+ /// \param x first operand
+ /// \param y second operand
+ /// \retval true if unordered (one or two NaN operands)
+ /// \retval false else
+ inline HALF_CONSTEXPR bool isunordered(half x, half y) { return isnan(x) || isnan(y); }
+
+ /// \}
+ /// \anchor casting
+ /// \name Casting
+ /// \{
+
+ /// Cast to or from half-precision floating-point number.
+ /// This casts between [half](\ref half_float::half) and any built-in arithmetic type. The values are converted
+ /// directly using the default rounding mode, without any roundtrip over `float` that a `static_cast` would otherwise do.
+ ///
+ /// Using this cast with neither of the two types being a [half](\ref half_float::half) or with any of the two types
+ /// not being a built-in arithmetic type (apart from [half](\ref half_float::half), of course) results in a compiler
+ /// error and casting between [half](\ref half_float::half)s returns the argument unmodified.
+ /// \tparam T destination type (half or built-in arithmetic type)
+ /// \tparam U source type (half or built-in arithmetic type)
+ /// \param arg value to cast
+ /// \return \a arg converted to destination type
+ /// \exception FE_INVALID if \a T is integer type and result is not representable as \a T
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ template<typename T,typename U> T half_cast(U arg) { return detail::half_caster<T,U>::cast(arg); }
+
+ /// Cast to or from half-precision floating-point number.
+ /// This casts between [half](\ref half_float::half) and any built-in arithmetic type. The values are converted
+ /// directly using the specified rounding mode, without any roundtrip over `float` that a `static_cast` would otherwise do.
+ ///
+ /// Using this cast with neither of the two types being a [half](\ref half_float::half) or with any of the two types
+ /// not being a built-in arithmetic type (apart from [half](\ref half_float::half), of course) results in a compiler
+ /// error and casting between [half](\ref half_float::half)s returns the argument unmodified.
+ /// \tparam T destination type (half or built-in arithmetic type)
+ /// \tparam R rounding mode to use.
+ /// \tparam U source type (half or built-in arithmetic type)
+ /// \param arg value to cast
+ /// \return \a arg converted to destination type
+ /// \exception FE_INVALID if \a T is integer type and result is not representable as \a T
+ /// \exception FE_OVERFLOW, ...UNDERFLOW, ...INEXACT according to rounding
+ template<typename T,std::float_round_style R,typename U> T half_cast(U arg) { return detail::half_caster<T,U,R>::cast(arg); }
+ /// \}
+
+ /// \}
+ /// \anchor errors
+ /// \name Error handling
+ /// \{
+
+ /// Clear exception flags.
+ /// This function works even if [automatic exception flag handling](\ref HALF_ERRHANDLING_FLAGS) is disabled,
+ /// but in that case manual flag management is the only way to raise flags.
+ ///
+ /// **See also:** Documentation for [std::feclearexcept](https://en.cppreference.com/w/cpp/numeric/fenv/feclearexcept).
+ /// \param excepts OR of exceptions to clear
+ /// \retval 0 all selected flags cleared successfully
+ inline int feclearexcept(int excepts) { detail::errflags() &= ~excepts; return 0; }
+
+ /// Test exception flags.
+ /// This function works even if [automatic exception flag handling](\ref HALF_ERRHANDLING_FLAGS) is disabled,
+ /// but in that case manual flag management is the only way to raise flags.
+ ///
+ /// **See also:** Documentation for [std::fetestexcept](https://en.cppreference.com/w/cpp/numeric/fenv/fetestexcept).
+ /// \param excepts OR of exceptions to test
+ /// \return OR of selected exceptions if raised
+ inline int fetestexcept(int excepts) { return detail::errflags() & excepts; }
+
+ /// Raise exception flags.
+ /// This raises the specified floating point exceptions and also invokes any additional automatic exception handling as
+ /// configured with the [HALF_ERRHANDLIG_...](\ref HALF_ERRHANDLING_ERRNO) preprocessor symbols.
+ /// This function works even if [automatic exception flag handling](\ref HALF_ERRHANDLING_FLAGS) is disabled,
+ /// but in that case manual flag management is the only way to raise flags.
+ ///
+ /// **See also:** Documentation for [std::feraiseexcept](https://en.cppreference.com/w/cpp/numeric/fenv/feraiseexcept).
+ /// \param excepts OR of exceptions to raise
+ /// \retval 0 all selected exceptions raised successfully
+ inline int feraiseexcept(int excepts) { detail::errflags() |= excepts; detail::raise(excepts); return 0; }
+
+ /// Save exception flags.
+ /// This function works even if [automatic exception flag handling](\ref HALF_ERRHANDLING_FLAGS) is disabled,
+ /// but in that case manual flag management is the only way to raise flags.
+ ///
+ /// **See also:** Documentation for [std::fegetexceptflag](https://en.cppreference.com/w/cpp/numeric/fenv/feexceptflag).
+ /// \param flagp adress to store flag state at
+ /// \param excepts OR of flags to save
+ /// \retval 0 for success
+ inline int fegetexceptflag(int *flagp, int excepts) { *flagp = detail::errflags() & excepts; return 0; }
+
+ /// Restore exception flags.
+ /// This only copies the specified exception state (including unset flags) without incurring any additional exception handling.
+ /// This function works even if [automatic exception flag handling](\ref HALF_ERRHANDLING_FLAGS) is disabled,
+ /// but in that case manual flag management is the only way to raise flags.
+ ///
+ /// **See also:** Documentation for [std::fesetexceptflag](https://en.cppreference.com/w/cpp/numeric/fenv/feexceptflag).
+ /// \param flagp adress to take flag state from
+ /// \param excepts OR of flags to restore
+ /// \retval 0 for success
+ inline int fesetexceptflag(const int *flagp, int excepts) { detail::errflags() = (detail::errflags()|(*flagp&excepts)) & (*flagp|~excepts); return 0; }
+
+ /// Throw C++ exceptions based on set exception flags.
+ /// This function manually throws a corresponding C++ exception if one of the specified flags is set,
+ /// no matter if automatic throwing (via [HALF_ERRHANDLING_THROW_...](\ref HALF_ERRHANDLING_THROW_INVALID)) is enabled or not.
+ /// This function works even if [automatic exception flag handling](\ref HALF_ERRHANDLING_FLAGS) is disabled,
+ /// but in that case manual flag management is the only way to raise flags.
+ /// \param excepts OR of exceptions to test
+ /// \param msg error message to use for exception description
+ /// \throw std::domain_error if `FE_INVALID` or `FE_DIVBYZERO` is selected and set
+ /// \throw std::overflow_error if `FE_OVERFLOW` is selected and set
+ /// \throw std::underflow_error if `FE_UNDERFLOW` is selected and set
+ /// \throw std::range_error if `FE_INEXACT` is selected and set
+
+ #if not defined HALF_ENABLE_CPP11_NOEXCEPT
+ inline void fethrowexcept(int excepts, const char *msg = "")
+ {
+ excepts &= detail::errflags();
+ if(excepts & (FE_INVALID|FE_DIVBYZERO))
+ throw std::domain_error(msg);
+ if(excepts & FE_OVERFLOW)
+ throw std::overflow_error(msg);
+ if(excepts & FE_UNDERFLOW)
+ throw std::underflow_error(msg);
+ if(excepts & FE_INEXACT)
+ throw std::range_error(msg);
+ }
+ #endif //HALF_ENABLE_CPP11_NOEXCEPT
+ /// \}
+}
+
+
+#undef HALF_UNUSED_NOERR
+#undef HALF_CONSTEXPR
+#undef HALF_CONSTEXPR_CONST
+#undef HALF_CONSTEXPR_NOERR
+#undef HALF_NOEXCEPT
+#undef HALF_NOTHROW
+#undef HALF_THREAD_LOCAL
+#undef HALF_TWOS_COMPLEMENT_INT
+#ifdef HALF_POP_WARNINGS
+ #pragma warning(pop)
+ #undef HALF_POP_WARNINGS
+#endif
+
+#endif