project-baize commited on
Commit
1abe38b
1 Parent(s): 1a3d9a9

Update app_modules/utils.py

Browse files
Files changed (1) hide show
  1. app_modules/utils.py +22 -21
app_modules/utils.py CHANGED
@@ -102,8 +102,6 @@ def convert_mdtext(md_text):
102
  else:
103
  result.append(mdtex2html.convert(non_code, extensions=["tables"]))
104
  if code.strip():
105
- # _, code = detect_language(code) # 暂时去除代码高亮功能,因为在大段代码的情况下会出现问题
106
- # code = code.replace("\n\n", "\n") # 暂时去除代码中的空行,因为在大段代码的情况下会出现问题
107
  code = f"\n```{code}\n\n```"
108
  code = markdown_to_html_with_syntax_highlight(code)
109
  result.append(code)
@@ -152,10 +150,10 @@ def convert_to_markdown(text):
152
  for line in lines:
153
  if in_code_block is False and line.startswith("```"):
154
  in_code_block = True
155
- markdown_text += "```\n"
156
  elif in_code_block is True and line.startswith("```"):
157
  in_code_block = False
158
- markdown_text += "```\n"
159
  elif in_code_block:
160
  markdown_text += f"{line}\n"
161
  else:
@@ -321,7 +319,7 @@ def is_stop_word_or_prefix(s: str, stop_words: list) -> bool:
321
 
322
 
323
 
324
- def load_tokenizer_and_model(base_model,adapter_model,load_8bit=False):
325
  if torch.cuda.is_available():
326
  device = "cuda"
327
  else:
@@ -340,32 +338,35 @@ def load_tokenizer_and_model(base_model,adapter_model,load_8bit=False):
340
  torch_dtype=torch.float16,
341
  device_map="auto",
342
  )
343
- model = PeftModel.from_pretrained(
344
- model,
345
- adapter_model,
346
- torch_dtype=torch.float16,
347
- )
 
348
  elif device == "mps":
349
  model = LlamaForCausalLM.from_pretrained(
350
  base_model,
351
  device_map={"": device},
352
  torch_dtype=torch.float16,
353
  )
354
- model = PeftModel.from_pretrained(
355
- model,
356
- adapter_model,
357
- device_map={"": device},
358
- torch_dtype=torch.float16,
359
- )
 
360
  else:
361
  model = LlamaForCausalLM.from_pretrained(
362
  base_model, device_map={"": device}, low_cpu_mem_usage=True
363
  )
364
- model = PeftModel.from_pretrained(
365
- model,
366
- adapter_model,
367
- device_map={"": device},
368
- )
 
369
 
370
  if not load_8bit:
371
  model.half() # seems to fix bugs for some users.
 
102
  else:
103
  result.append(mdtex2html.convert(non_code, extensions=["tables"]))
104
  if code.strip():
 
 
105
  code = f"\n```{code}\n\n```"
106
  code = markdown_to_html_with_syntax_highlight(code)
107
  result.append(code)
 
150
  for line in lines:
151
  if in_code_block is False and line.startswith("```"):
152
  in_code_block = True
153
+ markdown_text += f"{line}\n"
154
  elif in_code_block is True and line.startswith("```"):
155
  in_code_block = False
156
+ markdown_text += f"{line}\n"
157
  elif in_code_block:
158
  markdown_text += f"{line}\n"
159
  else:
 
319
 
320
 
321
 
322
+ def load_tokenizer_and_model(base_model,adapter_model=None,load_8bit=False):
323
  if torch.cuda.is_available():
324
  device = "cuda"
325
  else:
 
338
  torch_dtype=torch.float16,
339
  device_map="auto",
340
  )
341
+ if adapter_model is not None:
342
+ model = PeftModel.from_pretrained(
343
+ model,
344
+ adapter_model,
345
+ torch_dtype=torch.float16,
346
+ )
347
  elif device == "mps":
348
  model = LlamaForCausalLM.from_pretrained(
349
  base_model,
350
  device_map={"": device},
351
  torch_dtype=torch.float16,
352
  )
353
+ if adapter_model is not None:
354
+ model = PeftModel.from_pretrained(
355
+ model,
356
+ adapter_model,
357
+ device_map={"": device},
358
+ torch_dtype=torch.float16,
359
+ )
360
  else:
361
  model = LlamaForCausalLM.from_pretrained(
362
  base_model, device_map={"": device}, low_cpu_mem_usage=True
363
  )
364
+ if adapter_model is not None:
365
+ model = PeftModel.from_pretrained(
366
+ model,
367
+ adapter_model,
368
+ device_map={"": device},
369
+ )
370
 
371
  if not load_8bit:
372
  model.half() # seems to fix bugs for some users.