Anthonyg5005 commited on
Commit
a14d22f
·
1 Parent(s): bdfff0c

small bug fix

Browse files

just changed some order to work better

auto-exl2-upload/exl2-quant.py CHANGED
@@ -146,9 +146,10 @@ if not glob.glob(f"models/{model}/*.safetensors"): #check if safetensors model e
146
  convertst = input("Couldn't find safetensors model, do you want to convert to safetensors? (y/n): ").lower()
147
  while convertst != 'y' and convertst != 'n':
148
  convertst = input("Please enter 'y' or 'n': ").lower()
149
- convusebf16 = input("Would you like to use bf16 loading? Will reduce ram usage (y/n): ").lower()
150
- while convusebf16 != 'y' and convusebf16 != 'n':
151
- convusebf16 = input("Please enter 'y' or 'n': ").lower()
 
152
  if convusebf16 == 'y':
153
  usingbf16 = "--bf16"
154
  else:
 
146
  convertst = input("Couldn't find safetensors model, do you want to convert to safetensors? (y/n): ").lower()
147
  while convertst != 'y' and convertst != 'n':
148
  convertst = input("Please enter 'y' or 'n': ").lower()
149
+ if convertst == 'y':
150
+ convusebf16 = input("Would you like to use bf16 loading? Will reduce ram usage (y/n): ").lower()
151
+ while convusebf16 != 'y' and convusebf16 != 'n':
152
+ convusebf16 = input("Please enter 'y' or 'n': ").lower()
153
  if convusebf16 == 'y':
154
  usingbf16 = "--bf16"
155
  else:
exl2-multi-quant-local/exl2-quant.py CHANGED
@@ -115,9 +115,10 @@ if not glob.glob(f"models/{model}/*.safetensors"): #check if safetensors model e
115
  convertst = input("Couldn't find safetensors model, do you want to convert to safetensors? (y/n): ").lower()
116
  while convertst != 'y' and convertst != 'n':
117
  convertst = input("Please enter 'y' or 'n': ").lower()
118
- convusebf16 = input("Would you like to use bf16 loading? Will reduce ram usage (y/n): ").lower()
119
- while convusebf16 != 'y' and convusebf16 != 'n':
120
- convusebf16 = input("Please enter 'y' or 'n': ").lower()
 
121
  if convusebf16 == 'y':
122
  usingbf16 = "--bf16"
123
  else:
 
115
  convertst = input("Couldn't find safetensors model, do you want to convert to safetensors? (y/n): ").lower()
116
  while convertst != 'y' and convertst != 'n':
117
  convertst = input("Please enter 'y' or 'n': ").lower()
118
+ if convertst == 'y':
119
+ convusebf16 = input("Would you like to use bf16 loading? Will reduce ram usage (y/n): ").lower()
120
+ while convusebf16 != 'y' and convusebf16 != 'n':
121
+ convusebf16 = input("Please enter 'y' or 'n': ").lower()
122
  if convusebf16 == 'y':
123
  usingbf16 = "--bf16"
124
  else:
ipynb/EXL2_Private_Quant_V1.ipynb CHANGED
@@ -105,8 +105,9 @@
105
  "!mkdir {model}-exl2-{BPW}bpw-WD\n",
106
  "!mkdir {model}-exl2-{BPW}bpw\n",
107
  "!cp models/{model}/config.json {model}-exl2-{BPW}bpw-WD\n",
108
- "#@markdown Calibrate with dataset, may improve model output: (NOT WORKING YET)\n",
109
  "Calibrate = False # @param {type:\"boolean\"}\n",
 
110
  "#@markdown Calibration dataset, check above (must be parquet file):\n",
111
  "dataset = \"wikitext\" # @param {type:\"string\"}\n",
112
  "if Calibrate == True:\n",
 
105
  "!mkdir {model}-exl2-{BPW}bpw-WD\n",
106
  "!mkdir {model}-exl2-{BPW}bpw\n",
107
  "!cp models/{model}/config.json {model}-exl2-{BPW}bpw-WD\n",
108
+ "#@markdown Calibrate with custom dataset, not recommended: (not finished on this version)\n",
109
  "Calibrate = False # @param {type:\"boolean\"}\n",
110
+ "Calibrate = False\n",
111
  "#@markdown Calibration dataset, check above (must be parquet file):\n",
112
  "dataset = \"wikitext\" # @param {type:\"string\"}\n",
113
  "if Calibrate == True:\n",
ipynb/EXL2_Private_Quant_V2.ipynb CHANGED
@@ -133,8 +133,9 @@
133
  "!mkdir {model}-exl2-{BPW}bpw-WD\n",
134
  "!mkdir {model}-exl2-{BPW}bpw\n",
135
  "!cp models/{model}/config.json {model}-exl2-{BPW}bpw-WD\n",
136
- "#@markdown Calibrate with dataset, may improve model output (optional):\n",
137
- "Calibrate = True # @param {type:\"boolean\"}\n",
 
138
  "#@markdown Calibration dataset, enable calibrate above (must be parquet file):\n",
139
  "if Calibrate == True:\n",
140
  " dataset_url = \"https://huggingface.co/datasets/wikitext/resolve/refs%2Fconvert%2Fparquet/wikitext-103-v1/test/0000.parquet?download=true\" # @param {type:\"string\"}\n",
 
133
  "!mkdir {model}-exl2-{BPW}bpw-WD\n",
134
  "!mkdir {model}-exl2-{BPW}bpw\n",
135
  "!cp models/{model}/config.json {model}-exl2-{BPW}bpw-WD\n",
136
+ "#@markdown Calibrate with custom dataset, not recommended (optional):\\\n",
137
+ "#@markdown **Do not mistake this with finetunining.** calibration is a different process that helps preserve model quality on smaller quants.\n",
138
+ "Calibrate = False # @param {type:\"boolean\"}\n",
139
  "#@markdown Calibration dataset, enable calibrate above (must be parquet file):\n",
140
  "if Calibrate == True:\n",
141
  " dataset_url = \"https://huggingface.co/datasets/wikitext/resolve/refs%2Fconvert%2Fparquet/wikitext-103-v1/test/0000.parquet?download=true\" # @param {type:\"string\"}\n",
ipynb/EXL2_Private_Quant_V3.ipynb CHANGED
@@ -147,7 +147,8 @@
147
  "!mkdir {model}-exl2-{BPW}bpw-WD\n",
148
  "!mkdir {model}-exl2-{BPW}bpw\n",
149
  "!cp models/{model}/config.json {model}-exl2-{BPW}bpw-WD\n",
150
- "#@markdown Calibrate with dataset, may improve model output (optional):\n",
 
151
  "Calibrate = False # @param {type:\"boolean\"}\n",
152
  "#@markdown Calibration dataset, enable calibrate above (must be filled out if calibrating):\n",
153
  "if Calibrate == True:\n",
 
147
  "!mkdir {model}-exl2-{BPW}bpw-WD\n",
148
  "!mkdir {model}-exl2-{BPW}bpw\n",
149
  "!cp models/{model}/config.json {model}-exl2-{BPW}bpw-WD\n",
150
+ "#@markdown Calibrate with custom dataset, not recommended (optional):\\\n",
151
+ "#@markdown **Do not mistake this with finetunining.** calibration is a different process that helps preserve model quality on smaller quants.\n",
152
  "Calibrate = False # @param {type:\"boolean\"}\n",
153
  "#@markdown Calibration dataset, enable calibrate above (must be filled out if calibrating):\n",
154
  "if Calibrate == True:\n",