Fix q8 weights (use uint8 for q8; int8 produces poor results)
Browse files- onnx/model_int8.onnx +2 -2
- onnx/model_quantized.onnx +2 -2
- onnx/model_uint8.onnx +2 -2
onnx/model_int8.onnx
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f8eeead8e191939562a98af969f9d63dd404dc72a9a65b2c19cabb857de7c8d9
|
3 |
+
size 1714133062
|
onnx/model_quantized.onnx
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6e038fd6fb27b41fbb62e6a7df9b60b57215db3958d14382221beaab78fbc1d4
|
3 |
+
size 1714133130
|
onnx/model_uint8.onnx
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6e038fd6fb27b41fbb62e6a7df9b60b57215db3958d14382221beaab78fbc1d4
|
3 |
+
size 1714133130
|