From ee2566914d3476b8103b88915f3b81bda8490b44 Mon Sep 17 00:00:00 2001 From: James Ward Date: Tue, 15 Nov 2022 11:36:47 +0000 Subject: FP16 improvements * Update FP16 resize to newest spec version * Correct casting to fp16 for graphs of >1 ops Change-Id: Iedff9a71eb7f72948b3c00a635bb0fd07d414bcd Signed-off-by: James Ward --- reference_model/src/tensor.cc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'reference_model/src/tensor.cc') diff --git a/reference_model/src/tensor.cc b/reference_model/src/tensor.cc index 4eaf21d..e9598c4 100644 --- a/reference_model/src/tensor.cc +++ b/reference_model/src/tensor.cc @@ -159,8 +159,7 @@ int TosaReference::Tensor::readFromNpyFile(const char* filename) switch (dtype) { case DType_FP16: - // Convert from fp16 to fp32 - //TODO(jw): remove this once we cast to fp16 in register_fcn/eval + // Convert from fp16 to fp32 so that fp16 values can be manipulated as float for (uint32_t i=0; i < elements; i++) { fdatabuf[i] = half_float::half_cast(f16databuf[i]); } @@ -277,7 +276,7 @@ int TosaReference::Tensor::writeToNpyFile(const char* filename) const free(f16databuf); return 1; } - // Convert fp32 to fp16 + // Convert fp32 to fp16 so that output file contains valid fp16 data for (uint32_t i=0; i < elements; i++) { f16databuf[i] = half_float::half_cast(fdatabuf[i]); } -- cgit v1.2.1