Add support to NaN in Float32ToFloat16

This will be used to test NaN is correctly sampled from float16
textures.

BUG=dawn:128

Change-Id: I6e3b79f438e9a48c3a167ab45baf9f9d019ce48b
Reviewed-on: https://dawn-review.googlesource.com/c/dawn/+/8361
Commit-Queue: Kai Ninomiya <kainino@chromium.org>
Reviewed-by: Kai Ninomiya <kainino@chromium.org>
Reviewed-by: Austin Eng <enga@chromium.org>
diff --git a/src/common/Math.cpp b/src/common/Math.cpp
index d9217c8..edb68f9 100644
--- a/src/common/Math.cpp
+++ b/src/common/Math.cpp
@@ -85,8 +85,10 @@
     uint32_t sign16 = (fp32i & 0x80000000) >> 16;
     uint32_t mantissaAndExponent = fp32i & 0x7FFFFFFF;
 
-    if (mantissaAndExponent > 0x47FFEFFF) {  // Infinity
-        return static_cast<uint16_t>(sign16 | 0x7FFF);
+    if (mantissaAndExponent > 0x7F800000) {  // NaN
+        return 0x7FFF;
+    } else if (mantissaAndExponent > 0x47FFEFFF) {  // Infinity
+        return static_cast<uint16_t>(sign16 | 0x7C00);
     } else if (mantissaAndExponent < 0x38800000) {  // Denormal
         uint32_t mantissa = (mantissaAndExponent & 0x007FFFFF) | 0x00800000;
         int32_t exponent = 113 - (mantissaAndExponent >> 23);