aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorevacha01 <evan.chandler@arm.com>2024-03-08 16:39:24 +0000
committerEric Kunze <eric.kunze@arm.com>2024-04-16 16:02:16 +0000
commit4a2051146f498cb9ec35d7213720540c5c3e81e2 (patch)
tree543000b3ef22bd587c3c7702100742e4b94eb5fb
parent5d0e9c7f3748e80d6f14a3eeaef858eeb912e1fd (diff)
downloadreference_model-4a2051146f498cb9ec35d7213720540c5c3e81e2.tar.gz
SPECIAL data gen mode for FP16 and FP32
Signed-off-by: evacha01 <evan.chandler@arm.com> Change-Id: I5a9a1c63345bd83ca04bc6c2a99b0ef3612971ee
-rw-r--r--reference_model/CMakeLists.txt4
-rw-r--r--reference_model/src/generate/generate_entry.cc5
-rw-r--r--reference_model/src/generate/generate_fp_special.cc180
-rw-r--r--reference_model/src/generate/generate_fp_special.h34
-rw-r--r--reference_model/src/generate/generate_utils.cc17
-rw-r--r--reference_model/src/generate/generate_utils.h11
-rw-r--r--reference_model/src/verify/verifiers.h8
-rw-r--r--reference_model/src/verify/verify_entry.cc3
-rw-r--r--reference_model/src/verify/verify_fp_special.cc106
-rw-r--r--reference_model/test/generate_tests.cpp97
-rw-r--r--scripts/schemavalidation/datagen-config.schema.json16
-rw-r--r--verif/conformance/test_select.py6
-rw-r--r--verif/generator/tosa_arg_gen.py35
-rw-r--r--verif/generator/tosa_test_gen.py18
-rw-r--r--verif/generator/tosa_utils.py2
-rw-r--r--verif/tests/test_tosa_refmodel.py14
16 files changed, 529 insertions, 27 deletions
diff --git a/reference_model/CMakeLists.txt b/reference_model/CMakeLists.txt
index b780781..cf1e9fe 100644
--- a/reference_model/CMakeLists.txt
+++ b/reference_model/CMakeLists.txt
@@ -75,6 +75,7 @@ set(CXX_SOURCE
src/generate/generate_pseudo_random.cc
src/generate/generate_fixed_data.cc
src/generate/generate_full_range.cc
+ src/generate/generate_fp_special.cc
src/generate/generate_entry.cc
src/generate/generate_utils.cc
src/verify/verify_abs_error.cc
@@ -84,6 +85,7 @@ set(CXX_SOURCE
src/verify/verify_reduce_product.cc
src/verify/verify_relative.cc
src/verify/verify_ulp.cc
+ src/verify/verify_fp_special.cc
src/verify/verify_utils.cc
src/ops/op_factory.cc
src/ops/tensor_ops.cc
@@ -161,6 +163,7 @@ add_library(tosa_reference_verify_lib SHARED
src/verify/verify_reduce_product.cc
src/verify/verify_relative.cc
src/verify/verify_ulp.cc
+ src/verify/verify_fp_special.cc
src/verify/verify_utils.cc
src/verify/verify_config.cc
src/func_debug.cc
@@ -179,6 +182,7 @@ add_library(tosa_reference_generate_lib SHARED
src/generate/generate_pseudo_random.cc
src/generate/generate_fixed_data.cc
src/generate/generate_full_range.cc
+ src/generate/generate_fp_special.cc
src/generate/generate_entry.cc
src/generate/generate_utils.cc
src/generate/generate_config.cc
diff --git a/reference_model/src/generate/generate_entry.cc b/reference_model/src/generate/generate_entry.cc
index 6f797b3..36d99ad 100644
--- a/reference_model/src/generate/generate_entry.cc
+++ b/reference_model/src/generate/generate_entry.cc
@@ -16,6 +16,7 @@
#include "generate_dot_product.h"
#include "generate_fixed_data.h"
+#include "generate_fp_special.h"
#include "generate_full_range.h"
#include "generate_pseudo_random.h"
#include "generate_utils.h"
@@ -46,6 +47,10 @@ bool generate(const GenerateConfig& cfg, void* data, size_t size)
return generateFullRange(cfg, data, size);
break;
}
+ case GeneratorType::FpSpecial: {
+ return generateFpSpecial(cfg, data, size);
+ break;
+ }
default: {
WARNING("[Generator] Unsupported generation mode.");
break;
diff --git a/reference_model/src/generate/generate_fp_special.cc b/reference_model/src/generate/generate_fp_special.cc
new file mode 100644
index 0000000..3602f51
--- /dev/null
+++ b/reference_model/src/generate/generate_fp_special.cc
@@ -0,0 +1,180 @@
+// Copyright (c) 2024, ARM Limited.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "generate_fp_special.h"
+#include "half.hpp"
+
+#include <map>
+
+namespace
+{
+
+class SpecialValue
+{
+public:
+ enum SpecialValsEnum
+ {
+ Zero,
+ Inf,
+ NaN,
+ Min,
+ Max,
+ One,
+ };
+
+ SpecialValue() = default;
+ SpecialValue(SpecialValsEnum v)
+ : value(v)
+ {}
+ operator SpecialValsEnum() const
+ {
+ return value;
+ }
+ SpecialValue& operator=(SpecialValsEnum v)
+ {
+ value = v;
+ return *this;
+ }
+ bool operator==(const SpecialValsEnum v) const
+ {
+ return value == v;
+ }
+ bool operator!=(const SpecialValsEnum v) const
+ {
+ return value != v;
+ }
+ SpecialValue operator-()
+ {
+ negative = !negative;
+ return *this;
+ }
+
+ template <typename DataType>
+ DataType evaluate()
+ {
+ switch (value)
+ {
+ case Zero:
+ return static_cast<DataType>(negative ? -0.0 : 0.0);
+ case Inf:
+ return negative ? -std::numeric_limits<DataType>::infinity()
+ : std::numeric_limits<DataType>::infinity();
+ case NaN:
+ return std::numeric_limits<DataType>::quiet_NaN();
+ case Min:
+ return negative ? -std::numeric_limits<DataType>::min() : std::numeric_limits<DataType>::min();
+ case Max:
+ return negative ? -std::numeric_limits<DataType>::max() : std::numeric_limits<DataType>::max();
+ case One:
+ return static_cast<DataType>(negative ? -1.0 : 1.0);
+ default:
+ WARNING("[Generator][FS] Uninitialised special value.");
+ return static_cast<DataType>(0.0);
+ }
+ }
+
+private:
+ SpecialValsEnum value;
+ bool negative = false;
+};
+
+/*
+Test vals format
+
+I: Number of inputs to an op - referenced by cfg.inputPos
+T: Number of test cases defined for the op
+
+vector of test inputs: {
+ vector of values for test 0: { valueForinputPos0, valueForinputPos1, ..., valueForinputPosI-1 },
+ vector of values for test 1: { valueForinputPos0, valueForinputPos1, ..., valueForinputPosI-1 },
+ ...
+ vector of values for test T-1: { valueForinputPos0, valueForinputPos1, ..., valueForinputPosI-1 },
+}
+*/
+using TestValues = std::vector<std::vector<SpecialValue>>;
+
+TestValues equalOpsTestVals{ { SpecialValue(SpecialValue::Zero), -SpecialValue(SpecialValue::Zero) },
+ { SpecialValue(SpecialValue::Inf), -SpecialValue(SpecialValue::Inf) } };
+
+TestValues addTestVals{ { SpecialValue(SpecialValue::Max), SpecialValue(SpecialValue::One) },
+ { SpecialValue(SpecialValue::Inf), -SpecialValue(SpecialValue::Inf) } };
+
+TestValues defaultTestVals{ { SpecialValue(SpecialValue::Zero) }, { -SpecialValue(SpecialValue::Zero) },
+ { SpecialValue(SpecialValue::Inf) }, { -SpecialValue(SpecialValue::Inf) },
+ { SpecialValue(SpecialValue::NaN) }, { SpecialValue(SpecialValue::Min) },
+ { SpecialValue(SpecialValue::Max) } };
+
+std::map<Op, TestValues> testValues = { { Op::Op_EQUAL, equalOpsTestVals },
+ { Op::Op_GREATER, equalOpsTestVals },
+ { Op::Op_GREATER_EQUAL, equalOpsTestVals },
+ { Op::Op_ADD, addTestVals } };
+
+template <typename DataType>
+bool generate(const TosaReference::GenerateConfig& cfg, DataType* data, size_t size)
+{
+ const TosaReference::FpSpecialInfo& fsinfo = cfg.fpSpecialInfo;
+ uint8_t startIndex = fsinfo.startIndex;
+
+ std::vector<DataType> values;
+ auto testValuesResult = testValues.find(cfg.opType);
+ TestValues opTestVals = defaultTestVals;
+ size_t inputIndex = 0;
+ if (testValuesResult != testValues.end())
+ {
+ // When an op has an entry in testValues we use its op specific special test values, otherwise default values are used
+ opTestVals = testValuesResult->second;
+ inputIndex = cfg.inputPos;
+ }
+
+ for (std::vector<SpecialValue> inputs : opTestVals)
+ {
+ values.push_back(inputs[inputIndex].evaluate<DataType>());
+ }
+
+ const auto T = TosaReference::numElementsFromShape(cfg.shape);
+ for (auto t = 0; t < T; ++t)
+ {
+ data[t] = values[(t + startIndex) % values.size()];
+ }
+ return true;
+}
+} // namespace
+
+namespace TosaReference
+{
+bool generateFpSpecial(const GenerateConfig& cfg, void* data, size_t size)
+{
+ // Check we support the operator
+ if (cfg.opType == Op::Op_UNKNOWN)
+ {
+ WARNING("[Generator][FS] Unknown operator.");
+ return false;
+ }
+
+ switch (cfg.dataType)
+ {
+ case DType::DType_FP16: {
+ half_float::half* outData = reinterpret_cast<half_float::half*>(data);
+ return generate(cfg, outData, size);
+ }
+ case DType::DType_FP32: {
+ float* outData = reinterpret_cast<float*>(data);
+ return generate(cfg, outData, size);
+ }
+ default:
+ WARNING("[Generator][FS] Unsupported type.");
+ return false;
+ }
+}
+} // namespace TosaReference \ No newline at end of file
diff --git a/reference_model/src/generate/generate_fp_special.h b/reference_model/src/generate/generate_fp_special.h
new file mode 100644
index 0000000..9a7315b
--- /dev/null
+++ b/reference_model/src/generate/generate_fp_special.h
@@ -0,0 +1,34 @@
+// Copyright (c) 2024, ARM Limited.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef GENERATE_FP_SPECIAL_H_
+#define GENERATE_FP_SPECIAL_H_
+
+#include "generate_utils.h"
+
+namespace TosaReference
+{
+
+/// \brief Perform FP special data generation
+///
+/// \param cfg Generator related meta-data
+/// \param data Buffer to generate the data to
+/// \param size Size of the buffer
+///
+/// \return True on successful generation
+bool generateFpSpecial(const GenerateConfig& cfg, void* data, size_t size);
+
+}; // namespace TosaReference
+
+#endif // GENERATE_FP_SPECIAL_H_ \ No newline at end of file
diff --git a/reference_model/src/generate/generate_utils.cc b/reference_model/src/generate/generate_utils.cc
index f31b443..d62c247 100644
--- a/reference_model/src/generate/generate_utils.cc
+++ b/reference_model/src/generate/generate_utils.cc
@@ -107,7 +107,7 @@ NLOHMANN_JSON_SERIALIZE_ENUM(GeneratorType,
{ GeneratorType::DotProduct, "DOT_PRODUCT" },
{ GeneratorType::FullRange, "FULL_RANGE" },
{ GeneratorType::Boundary, "BOUNDARY" },
- { GeneratorType::Special, "SPECIAL" },
+ { GeneratorType::FpSpecial, "FP_SPECIAL" },
{ GeneratorType::FixedData, "FIXED_DATA" },
})
@@ -159,6 +159,14 @@ void from_json(const nlohmann::json& j, FullRangeInfo& fullRangeInfo)
}
}
+void from_json(const nlohmann::json& j, FpSpecialInfo& fpSpecialInfo)
+{
+ if (j.contains("start_idx"))
+ {
+ j.at("start_idx").get_to(fpSpecialInfo.startIndex);
+ }
+}
+
void from_json(const nlohmann::json& j, GenerateConfig& cfg)
{
j.at("data_type").get_to(cfg.dataType);
@@ -201,6 +209,13 @@ void from_json(const nlohmann::json& j, GenerateConfig& cfg)
{
j.at("full_range_info").get_to(cfg.fullRangeInfo);
}
+
+ //Set up defaults for fpSpecialInfo
+ cfg.fpSpecialInfo.startIndex = 0;
+ if (j.contains("fp_special_info"))
+ {
+ j.at("fp_special_info").get_to(cfg.fpSpecialInfo);
+ }
}
std::optional<GenerateConfig> parseGenerateConfig(const char* json, const char* tensorName)
diff --git a/reference_model/src/generate/generate_utils.h b/reference_model/src/generate/generate_utils.h
index 8ce9b0e..0428fd8 100644
--- a/reference_model/src/generate/generate_utils.h
+++ b/reference_model/src/generate/generate_utils.h
@@ -33,7 +33,7 @@ enum class GeneratorType
DotProduct,
FullRange,
Boundary,
- Special,
+ FpSpecial,
FixedData,
};
@@ -82,6 +82,14 @@ struct FullRangeInfo
uint16_t startVal;
};
+/// \brief Op specific generator meta-data
+struct FpSpecialInfo
+{
+ FpSpecialInfo() = default;
+
+ uint8_t startIndex;
+};
+
/// \brief Generator configuration
struct GenerateConfig
{
@@ -95,6 +103,7 @@ struct GenerateConfig
PseudoRandomInfo pseudoRandomInfo;
FixedDataInfo fixedDataInfo;
FullRangeInfo fullRangeInfo;
+ FpSpecialInfo fpSpecialInfo;
};
/// \brief Parse the generator config when given in JSON form
diff --git a/reference_model/src/verify/verifiers.h b/reference_model/src/verify/verifiers.h
index e5f9df1..48c971d 100644
--- a/reference_model/src/verify/verifiers.h
+++ b/reference_model/src/verify/verifiers.h
@@ -85,6 +85,14 @@ bool verifyRelative(const CTensor* referenceTensor,
const CTensor* implementationTensor,
const RelativeVerifyInfo& rInfo);
+/// \brief Perform FP special tests verification
+///
+/// \param referenceTensor Reference tensor
+/// \param implementationTensor Implementation resulting tensor
+///
+/// \return True if compliant else false
+bool verifyFpSpecial(const CTensor* referenceTensor, const CTensor* implementationTensor);
+
}; // namespace TosaReference
#endif // VERIFIERS_H_
diff --git a/reference_model/src/verify/verify_entry.cc b/reference_model/src/verify/verify_entry.cc
index 9702c36..a04c62e 100644
--- a/reference_model/src/verify/verify_entry.cc
+++ b/reference_model/src/verify/verify_entry.cc
@@ -46,6 +46,9 @@ bool verify(const CTensor* ref, const CTensor* refBnd, const CTensor* imp, const
case VerifyMode::Relative: {
return verifyRelative(ref, imp, cfg.relativeInfo);
}
+ case VerifyMode::FpSpecial: {
+ return verifyFpSpecial(ref, imp);
+ }
default: {
WARNING("[Verifier] Unsupported verification mode.");
break;
diff --git a/reference_model/src/verify/verify_fp_special.cc b/reference_model/src/verify/verify_fp_special.cc
new file mode 100644
index 0000000..94550fd
--- /dev/null
+++ b/reference_model/src/verify/verify_fp_special.cc
@@ -0,0 +1,106 @@
+// Copyright (c) 2024, ARM Limited.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "half.hpp"
+#include "verifiers.h"
+
+namespace
+{
+template <typename Datatype>
+bool compliant(const double& referenceValue, const Datatype& implementationValue)
+{
+ // Compliant when values are both nans OR when values are both finite/infinite and have the same sign
+ return (std::isnan(referenceValue) && std::isnan(implementationValue)) ||
+ (std::isnan(referenceValue) == std::isnan(implementationValue) &&
+ std::isfinite(referenceValue) == std::isfinite(implementationValue) &&
+ std::signbit(referenceValue) == std::signbit(implementationValue));
+}
+
+template <>
+bool compliant(const double& referenceValue, const half_float::half& implementationValue)
+{
+ // Compliant when values are both nans OR when values are both finite/infinite and have the same sign
+ return (std::isnan(referenceValue) && half_float::isnan(implementationValue)) ||
+ (std::isnan(referenceValue) == half_float::isnan(implementationValue) &&
+ std::isfinite(referenceValue) == half_float::isfinite(implementationValue) &&
+ std::signbit(referenceValue) == half_float::signbit(implementationValue));
+}
+
+template <typename Datatype>
+bool verify(const double* refData,
+ const double* refDataEnd,
+ Datatype* impData,
+ const int64_t elementCount,
+ const std::vector<int32_t> refShape)
+{
+ auto pair = std::mismatch(refData, refDataEnd, impData, std::next(impData, elementCount),
+ [](const double& referenceValue, const Datatype& implementationValue) {
+ return compliant(referenceValue, implementationValue);
+ });
+
+ if (std::get<0>(pair) == refDataEnd)
+ {
+ // No mismatch found
+ return true;
+ }
+ else
+ {
+ auto pos = TosaReference::indexToPosition(std::get<0>(pair) - refData, refShape);
+ WARNING("[Verfier][FS] Location %s", TosaReference::positionToString(pos).c_str());
+ return false;
+ }
+}
+} // namespace
+
+namespace TosaReference
+{
+
+bool verifyFpSpecial(const CTensor* referenceTensor, const CTensor* implementationTensor)
+{
+ // Validate that tensors are provided
+ TOSA_REF_REQUIRE(referenceTensor != nullptr, "[FS] Reference tensor is missing");
+ TOSA_REF_REQUIRE(implementationTensor != nullptr, "[FS] Implementation tensor is missing");
+
+ // Get number of elements
+ const std::vector<int32_t> refShape(referenceTensor->shape, referenceTensor->shape + referenceTensor->num_dims);
+ const auto elementCount = numElements(refShape);
+ TOSA_REF_REQUIRE(elementCount > 0, "[FS] Invalid shape for reference tensor");
+
+ TOSA_REF_REQUIRE(referenceTensor->data_type == tosa_datatype_fp64_t, "[FS] Reference tensor is not fp64");
+ const auto* refData = reinterpret_cast<const double*>(referenceTensor->data);
+ TOSA_REF_REQUIRE(refData != nullptr, "[FS] Missing data for reference");
+ const auto* refDataEnd = std::next(refData, elementCount);
+
+ switch (implementationTensor->data_type)
+ {
+ case tosa_datatype_fp32_t: {
+ const auto* impData = reinterpret_cast<const float*>(implementationTensor->data);
+ TOSA_REF_REQUIRE(impData != nullptr, "[FS] Missing data for implementation");
+
+ return verify(refData, refDataEnd, impData, elementCount, refShape);
+ }
+ case tosa_datatype_fp16_t: {
+ const auto* impData = reinterpret_cast<const half_float::half*>(implementationTensor->data);
+ TOSA_REF_REQUIRE(impData != nullptr, "[FS] Missing data for implementation");
+
+ return verify(refData, refDataEnd, impData, elementCount, refShape);
+ }
+ default:
+ WARNING("[Verifier][FS] Data-type not supported.");
+ break;
+ }
+
+ return false;
+}
+} // namespace TosaReference
diff --git a/reference_model/test/generate_tests.cpp b/reference_model/test/generate_tests.cpp
index 73db631..19b808b 100644
--- a/reference_model/test/generate_tests.cpp
+++ b/reference_model/test/generate_tests.cpp
@@ -1609,4 +1609,101 @@ TEST_CASE("positive - FP16 full range")
}
}
+void fp_special_test_FP32(const std::string tosaName,
+ const size_t tosaElements,
+ const std::string templateJsonCfg,
+ const std::string opStr,
+ const std::string startIndexStr,
+ const std::vector<uint32_t> expected)
+{
+ std::string jsonCfg = templateJsonCfg;
+ update_json_template(jsonCfg, "_OP_", opStr);
+ update_json_template(jsonCfg, "_START_", startIndexStr);
+
+ std::vector<float> buffer(tosaElements);
+ REQUIRE(tgd_generate_data(jsonCfg.c_str(), tosaName.c_str(), (void*)buffer.data(), tosaElements * 4));
+ check_output<float>(buffer, expected);
+}
+
+TEST_CASE("positive - FP32 FP Special")
+{
+ std::string templateJsonCfg = R"({
+ "tensors" : {
+ "input0" : {
+ "generator": "FP_SPECIAL",
+ "data_type": "FP32",
+ "input_type": "VARIABLE",
+ "shape" : [ 5, 6, 7 ],
+ "input_pos": 0,
+ "op" : "_OP_",
+ "fp_special_info": {
+ "start_idx": _START_
+ }
+ },
+ "input1" : {
+ "generator": "FP_SPECIAL",
+ "data_type": "FP32",
+ "input_type": "VARIABLE",
+ "shape" : [ 5, 6, 7 ],
+ "input_pos": 1,
+ "op" : "_OP_",
+ "fp_special_info": {
+ "start_idx": _START_
+ }
+ }
+ }
+ })";
+
+ const std::string tosaName0 = "input0";
+ const std::string tosaName1 = "input1";
+ const size_t tosaElements = 5 * 6 * 7;
+
+ SUBCASE("equal, input 0")
+ {
+ std::vector<uint32_t> expected = { 0x0, 0x7F800000, 0x0 };
+ fp_special_test_FP32(tosaName0, tosaElements, templateJsonCfg, "EQUAL", "0", expected);
+ }
+ SUBCASE("equal, input 1")
+ {
+ std::vector<uint32_t> expected = { 0x80000000, 0xFF800000, 0x80000000 };
+ fp_special_test_FP32(tosaName1, tosaElements, templateJsonCfg, "EQUAL", "0", expected);
+ }
+ SUBCASE("greater, input 0")
+ {
+ std::vector<uint32_t> expected = { 0x0, 0x7F800000, 0x0 };
+ fp_special_test_FP32(tosaName0, tosaElements, templateJsonCfg, "GREATER", "0", expected);
+ }
+ SUBCASE("greater, input 1")
+ {
+ std::vector<uint32_t> expected = { 0x80000000, 0xFF800000, 0x80000000 };
+ fp_special_test_FP32(tosaName1, tosaElements, templateJsonCfg, "GREATER", "0", expected);
+ }
+ SUBCASE("add, input 0")
+ {
+ std::vector<uint32_t> expected = { 0x7F7FFFFF, 0x7F800000, 0x7F7FFFFF };
+ fp_special_test_FP32(tosaName0, tosaElements, templateJsonCfg, "ADD", "0", expected);
+ }
+ SUBCASE("add, input 1")
+ {
+ std::vector<uint32_t> expected = { 0x3F800000, 0xFF800000, 0x3F800000 };
+ fp_special_test_FP32(tosaName1, tosaElements, templateJsonCfg, "ADD", "0", expected);
+ }
+ SUBCASE("maximum, input 0")
+ {
+ std::vector<uint32_t> expected = { 0x0, 0x80000000, 0x7F800000 };
+ fp_special_test_FP32(tosaName0, tosaElements, templateJsonCfg, "MAXIMUM", "0", expected);
+ }
+ SUBCASE("maximum, input 1")
+ {
+ std::vector<uint32_t> expected = { 0x0, 0x80000000, 0x7F800000 };
+ fp_special_test_FP32(tosaName1, tosaElements, templateJsonCfg, "MAXIMUM", "0", expected);
+ }
+ SUBCASE("maximum, startIndex 100")
+ {
+ // A startIndex of 100 creates an offset of 2 in the MAXIMUM op's test data (size: 7) 100 % 7 = 2
+ std::vector<uint32_t> expected = { 0x7F800000, 0xFF800000, 0x7FC00000 };
+ fp_special_test_FP32(tosaName0, tosaElements, templateJsonCfg, "MAXIMUM", "100", expected);
+ }
+}
+
TEST_SUITE_END(); // generate
diff --git a/scripts/schemavalidation/datagen-config.schema.json b/scripts/schemavalidation/datagen-config.schema.json
index 19e8b62..6046873 100644
--- a/scripts/schemavalidation/datagen-config.schema.json
+++ b/scripts/schemavalidation/datagen-config.schema.json
@@ -22,7 +22,7 @@
"type": "object",
"properties": {
"generator": {
- "description": "data generator name - PSEUDO_RANDOM, DOT_PRODUCT, FULL_RANGE, BOUNDARY, or SPECIAL",
+ "description": "data generator name - PSEUDO_RANDOM, DOT_PRODUCT, FULL_RANGE, BOUNDARY, or FP_SPECIAL",
"type": "string"
},
"data_type": {
@@ -134,6 +134,20 @@
"additionalProperties": false,
"required": [ ]
},
+ "fp_special_info": {
+ "description": "info required for the FP_SPECIAL generator",
+ "type": "object",
+ "properties":
+ {
+ "start_idx": {
+ "description": "starting index for the test data",
+ "type": "integer",
+ "minimum": 0
+ }
+ },
+ "additionalProperties": false,
+ "required": [ ]
+ },
"fixed_data_info": {
"description": "info required for FIXED_DATA generator",
"type": "object",
diff --git a/verif/conformance/test_select.py b/verif/conformance/test_select.py
index e3a8ffb..e3f1738 100644
--- a/verif/conformance/test_select.py
+++ b/verif/conformance/test_select.py
@@ -259,9 +259,9 @@ class Operator:
negative and "ERRORIF" in str(path)
):
# Check for test set paths
- match = re.match(r"(.*)_(s[0-9]+|full)", path.name)
+ match = re.match(r"(.*)_(s[0-9]+|full|fs)", path.name)
if match:
- if match.group(2) in ["s0", "full"]:
+ if match.group(2) in ["s0", "full", "fs"]:
# Only return the truncated test name
# of the first test of a set, and for full tests
yield path.with_name(match.group(1))
@@ -317,7 +317,7 @@ class Operator:
def _get_extra_test_paths(path):
"""Expand a path to find extra tests."""
paths = []
- for suffix in ["full"]:
+ for suffix in ["full", "fs"]:
suffix_path = path.with_name(f"{path.name}_{suffix}")
if suffix_path.exists():
paths.append(suffix_path)
diff --git a/verif/generator/tosa_arg_gen.py b/verif/generator/tosa_arg_gen.py
index 8d6c8d7..5957a33 100644
--- a/verif/generator/tosa_arg_gen.py
+++ b/verif/generator/tosa_arg_gen.py
@@ -264,6 +264,9 @@ class TosaTensorGen:
return [[]] * num_shapes
shape = testGen.makeShape(rng, rank)
+ # Do not broadcast for some tests
+ if error_name is None and rng.randInt(high=100) < 10:
+ return [shape] * num_shapes
shape_list = []
# Choose any one of the inputs to broadcast
@@ -785,6 +788,10 @@ class TosaTensorValuesGen:
"tensors": {},
}
dg_tens_meta = tens_data["tensors"]
+
+ fp_special_info = {}
+ fp_special_info["start_idx"] = int(rng.randInt())
+
for idx, shape in enumerate(shapeList):
tens_meta = {}
@@ -858,6 +865,8 @@ class TosaTensorValuesGen:
rng.randInt(0, gtu.DTYPE_ATTRIBUTES[dtypeList[idx]]["fullset"])
)
tens_meta["full_range_info"] = info
+ elif dg_type == gtu.DataGenType.FP_SPECIAL:
+ tens_meta["fp_special_info"] = fp_special_info
else:
# TODO - other data gen type
assert False, "TODO: support other data gen types"
@@ -1862,16 +1871,12 @@ class TosaArgGen:
for dg_type in dataGenTypesList:
for arg_str, args_dict in arg_list:
gen_args_dict = args_dict.copy()
+ # Only create one test by default - no sets of tests
+ num_test_sets = 0
+
if dg_type == gtu.DataGenType.PSEUDO_RANDOM:
if error_name is None:
- num_test_sets = (
- args_dict["num_test_sets"]
- if "num_test_sets" in args_dict
- else 0
- )
- else:
- # Add single test for pseudo random
- num_test_sets = 0
+ num_test_sets = args_dict.get("num_test_sets", 0)
elif dg_type == gtu.DataGenType.DOT_PRODUCT:
# Extra tests for each dot product test set
@@ -1900,13 +1905,23 @@ class TosaArgGen:
f"Skipping {opName}{shape_info} as tensor data size too small for full range of values {tensor_size} < {gtu.DTYPE_ATTRIBUTES[dtype]['fullset']}"
)
continue
- # Large enough tensor data size for full range, add a single test
- num_test_sets = 0
+ # Large enough tensor data size for full range, add full test
arg_str = f"{arg_str}_full" if arg_str else "full"
gen_args_dict["tags"] = args_dict.get("tags", []) + [
"non_finite_fp_data"
]
+ elif dg_type == gtu.DataGenType.FP_SPECIAL:
+ shapes_set = {tuple(x) for x in shapeList}
+ if len(shapes_set) != 1:
+ logger.info(
+ f"Changing {opName} input shapes {shapes_set} - broadcasting incompatable with special test"
+ )
+ shapeList = [np.int32(np.broadcast_shapes(*shapeList))] * len(
+ shapeList
+ )
+ arg_str = f"{arg_str}_fs" if arg_str else "fs"
+
gen_args_dict["dg_type"] = dg_type
if num_test_sets > 0:
for s in range(0, num_test_sets):
diff --git a/verif/generator/tosa_test_gen.py b/verif/generator/tosa_test_gen.py
index 38ab3f4..40788a2 100644
--- a/verif/generator/tosa_test_gen.py
+++ b/verif/generator/tosa_test_gen.py
@@ -268,7 +268,7 @@ class TosaTestGen:
if "ksb" in argsDict
else int(argsDict["ks"]),
}
- elif argsDict["dg_type"] == gtu.DataGenType.SPECIAL:
+ elif argsDict["dg_type"] == gtu.DataGenType.FP_SPECIAL:
mode = gtu.ComplianceMode.FP_SPECIAL
elif "compliance" in op and "ulp" in op["compliance"]:
mode = gtu.ComplianceMode.ULP
@@ -3352,7 +3352,11 @@ class TosaTestGen:
DType.FP32: (gtu.DataGenType.DOT_PRODUCT,),
}
EW_UNARY_DATAGEN = {
- DType.FP16: (gtu.DataGenType.PSEUDO_RANDOM, gtu.DataGenType.FULL_RANGE)
+ DType.FP16: (gtu.DataGenType.PSEUDO_RANDOM, gtu.DataGenType.FULL_RANGE),
+ }
+ PR_FS_DATAGEN = {
+ DType.FP16: (gtu.DataGenType.PSEUDO_RANDOM, gtu.DataGenType.FP_SPECIAL),
+ DType.FP32: (gtu.DataGenType.PSEUDO_RANDOM, gtu.DataGenType.FP_SPECIAL),
}
TOSA_OP_LIST = {
@@ -3716,7 +3720,7 @@ class TosaTestGen:
TosaErrorValidator.evDimensionMismatch,
TosaErrorValidator.evBroadcastShapesMismatch,
),
- "data_gen": PSEUDO_RANDOM_DATAGEN,
+ "data_gen": PR_FS_DATAGEN,
"compliance": {"ulp": 0.5},
},
"arithmetic_right_shift": {
@@ -3938,7 +3942,7 @@ class TosaTestGen:
TosaErrorValidator.evDimensionMismatch,
TosaErrorValidator.evBroadcastShapesMismatch,
),
- "data_gen": PSEUDO_RANDOM_DATAGEN,
+ "data_gen": PR_FS_DATAGEN,
},
"minimum": {
"op": Op.MINIMUM,
@@ -4330,7 +4334,7 @@ class TosaTestGen:
TosaErrorValidator.evDimensionMismatch,
TosaErrorValidator.evBroadcastShapesMismatch,
),
- "data_gen": PSEUDO_RANDOM_DATAGEN,
+ "data_gen": PR_FS_DATAGEN,
},
"greater_equal": {
"op": Op.GREATER_EQUAL,
@@ -4351,7 +4355,7 @@ class TosaTestGen:
TosaErrorValidator.evDimensionMismatch,
TosaErrorValidator.evBroadcastShapesMismatch,
),
- "data_gen": PSEUDO_RANDOM_DATAGEN,
+ "data_gen": PR_FS_DATAGEN,
},
"greater": {
"op": Op.GREATER,
@@ -4372,7 +4376,7 @@ class TosaTestGen:
TosaErrorValidator.evDimensionMismatch,
TosaErrorValidator.evBroadcastShapesMismatch,
),
- "data_gen": PSEUDO_RANDOM_DATAGEN,
+ "data_gen": PR_FS_DATAGEN,
},
# Reduction operators
"reduce_all": {
diff --git a/verif/generator/tosa_utils.py b/verif/generator/tosa_utils.py
index a8e321e..478190d 100644
--- a/verif/generator/tosa_utils.py
+++ b/verif/generator/tosa_utils.py
@@ -55,7 +55,7 @@ class DataGenType(IntEnum):
DOT_PRODUCT = 1
BOUNDARY = 2
FULL_RANGE = 3
- SPECIAL = 4
+ FP_SPECIAL = 4
FIXED_DATA = 5
diff --git a/verif/tests/test_tosa_refmodel.py b/verif/tests/test_tosa_refmodel.py
index 24ee9e2..bb52a86 100644
--- a/verif/tests/test_tosa_refmodel.py
+++ b/verif/tests/test_tosa_refmodel.py
@@ -1,5 +1,5 @@
"""Tests for tosa_reference_model."""
-# Copyright (c) 2022-2023, ARM Limited.
+# Copyright (c) 2022-2024, ARM Limited.
# SPDX-License-Identifier: Apache-2.0
import json
import re
@@ -134,9 +134,10 @@ class BuildTosaTest:
# Tests - op_name, ref_model_type, num_expected_tests
+# FP Special datagen adds a second expected test to FP16 and FP32 tests for OPs it is added to
TEST_PARAMS = [
("add", "int32", 1),
- ("add", "fp32", 1),
+ ("add", "fp32", 2),
("abs", "int32", 1),
("abs", "fp32", 1),
("abs", "fp16", 1),
@@ -223,13 +224,20 @@ def test_refmodel_simple_op(tosaTest):
assert const_file.is_file()
consts.append(np.load(str(const_file)))
+ # Check if the data is from FP special datagen which can give invalid results
+ fp_special_data = test_dir.match("*_fs")
+
# Perform Numpy operation
if op_name == "abs":
assert len(tensors) == 1
result = np.abs(tensors[0])
elif op_name == "add":
assert len(tensors) == 2
- result = np.add(tensors[0], tensors[1])
+ if fp_special_data:
+ with np.errstate(invalid="ignore"):
+ result = np.add(tensors[0], tensors[1])
+ else:
+ result = np.add(tensors[0], tensors[1])
elif op_name == "concat":
assert len(consts) == 1
# Get axis from test directory name