ztime commited on
Commit
7bb3f3e
1 Parent(s): 8f263fc

tokenize not add bos

Browse files
Files changed (1) hide show
  1. app.py +6 -17
app.py CHANGED
@@ -7,22 +7,11 @@ if not os.path.isfile("Yi-6B.q4_k_m.gguf"):
7
  os.system("wget -c https://huggingface.co/SamPurkis/Yi-6B-GGUF/resolve/main/Yi-6B.q4_k_m.gguf")
8
 
9
  DEFAULT_MODEL_PATH = model_file
10
- parser = argparse.ArgumentParser()
11
- parser.add_argument("-m", "--model", default=DEFAULT_MODEL_PATH, type=Path, help="model path")
12
- parser.add_argument("--mode", default="chat", type=str, choices=["chat", "generate"], help="inference mode")
13
- parser.add_argument("-l", "--max_length", default=512, type=int, help="max total length including prompt and output")
14
- parser.add_argument("-c", "--max_context_length", default=512, type=int, help="max context length")
15
- parser.add_argument("--top_k", default=0, type=int, help="top-k sampling")
16
- parser.add_argument("--top_p", default=0.7, type=float, help="top-p sampling")
17
- parser.add_argument("--temp", default=0.95, type=float, help="temperature")
18
- parser.add_argument("--repeat_penalty", default=1.1, type=float, help="penalize repeat sequence of tokens")
19
- parser.add_argument("-t", "--threads", default=0, type=int, help="number of threads for inference")
20
- parser.add_argument("--plain", action="store_true", help="display in plain text without markdown support")
21
- args = parser.parse_args()
22
 
23
  from llama_cpp import Llama
24
  llm = Llama(model_path=model_file)
25
-
 
26
 
27
 
28
  def predict(input, chatbot, max_length, top_p, temperature, history):
@@ -50,7 +39,7 @@ def reset_state():
50
 
51
 
52
  with gr.Blocks() as demo:
53
- gr.HTML("""<h1 align="center">Yi-6B-GGUF by llama.cpp</h1>""")
54
 
55
  chatbot = gr.Chatbot()
56
  with gr.Row():
@@ -58,9 +47,9 @@ with gr.Blocks() as demo:
58
  user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=8)
59
  submitBtn = gr.Button("Submit", variant="primary")
60
  with gr.Column(scale=1):
61
- max_length = gr.Slider(0, 32048, value=args.max_length, step=1.0, label="Maximum Length", interactive=True)
62
- top_p = gr.Slider(0, 1, value=args.top_p, step=0.01, label="Top P", interactive=True)
63
- temperature = gr.Slider(0, 1, value=args.temp, step=0.01, label="Temperature", interactive=True)
64
  emptyBtn = gr.Button("Clear History")
65
 
66
  history = gr.State([])
 
7
  os.system("wget -c https://huggingface.co/SamPurkis/Yi-6B-GGUF/resolve/main/Yi-6B.q4_k_m.gguf")
8
 
9
  DEFAULT_MODEL_PATH = model_file
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  from llama_cpp import Llama
12
  llm = Llama(model_path=model_file)
13
+ old_tokenize = llm._model.tokenize
14
+ llm._model.tokenize = lambda text, add_bos, spec: old_tokenize(text, False, spec)
15
 
16
 
17
  def predict(input, chatbot, max_length, top_p, temperature, history):
 
39
 
40
 
41
  with gr.Blocks() as demo:
42
+ gr.HTML("""<h1 align="center">Yi-6B-GGUF by llama-cpp-python</h1>""")
43
 
44
  chatbot = gr.Chatbot()
45
  with gr.Row():
 
47
  user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=8)
48
  submitBtn = gr.Button("Submit", variant="primary")
49
  with gr.Column(scale=1):
50
+ max_length = gr.Slider(0, 32048, value=2048, step=1.0, label="Maximum Length", interactive=True)
51
+ top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True)
52
+ temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
53
  emptyBtn = gr.Button("Clear History")
54
 
55
  history = gr.State([])