Xilixmeaty40 commited on
Commit
f814fb7
·
verified ·
1 Parent(s): 45e21ed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -15
app.py CHANGED
@@ -8,16 +8,12 @@ ss_client = Client("https://xilixmeaty40-html-image-current-tabx.hf.space/")
8
  with open("models.txt", "r") as file:
9
  models = file.read().splitlines()
10
 
11
- clients = []
12
- for model in models:
13
- try:
14
- client = InferenceClient(model)
15
- clients.append(client)
16
- except Exception as e:
17
- print(f"Failed to load model {model}: {e}")
18
 
19
- if not clients:
20
- raise Exception("All models failed to load or no models provided.")
 
 
21
 
22
  def load_models(inp):
23
  return gr.update(label=models[inp])
@@ -31,9 +27,8 @@ def format_prompt(message, history, cust_p):
31
  prompt += cust_p.replace("USER_INPUT", message)
32
  return prompt
33
 
34
- def chat_inf(system_prompt, prompt, history, memory, client_choice, seed, temp, tokens, top_p, rep_p, chat_mem, cust_p):
35
  hist_len = 0
36
- client = clients[int(client_choice) - 1]
37
  if not history:
38
  history = []
39
  if not memory:
@@ -106,9 +101,6 @@ with gr.Blocks() as app:
106
  with gr.Column(scale=1):
107
  stop_btn = gr.Button("Stop")
108
  clear_btn = gr.Button("Clear")
109
- client_choice = gr.Dropdown(label="Models", type='index', choices=models, value=models[0], interactive=True)
110
- with gr.Column(scale=1):
111
- rand = gr.Checkbox(label="Random Seed", value=True)
112
  seed = gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, step=1, value=rand_val)
113
  tokens = gr.Slider(label="Max new tokens", value=300000, minimum=0, maximum=800000, step=64)
114
  temp = gr.Slider(label="Temperature", step=0.01, minimum=0.01, maximum=1.0, value=0.49)
@@ -125,7 +117,7 @@ with gr.Blocks() as app:
125
  chatblock = gr.Dropdown(label="Chatblocks", choices=list(range(0, 21)), value=0, type="index")
126
  header = gr.Checkbox(label="Include header?", value=True)
127
  rand.change(check_rand, inputs=[rand, seed], outputs=seed)
128
- btn.click(chat_inf, inputs=[sys_inp, inp, chat_b, memory, client_choice, seed, temp, tokens, top_p, rep_p, chat_mem, custom_prompt], outputs=[chat_b, memory])
129
  stop_btn.click(lambda: None, [])
130
  clear_btn.click(clear_fn, [])
131
 
 
8
  with open("models.txt", "r") as file:
9
  models = file.read().splitlines()
10
 
11
+ combined_model = "\n\n".join(models)
 
 
 
 
 
 
12
 
13
+ try:
14
+ client = InferenceClient(combined_model)
15
+ except Exception as e:
16
+ raise Exception(f"Failed to load models: {e}")
17
 
18
  def load_models(inp):
19
  return gr.update(label=models[inp])
 
27
  prompt += cust_p.replace("USER_INPUT", message)
28
  return prompt
29
 
30
+ def chat_inf(system_prompt, prompt, history, memory, seed, temp, tokens, top_p, rep_p, chat_mem, cust_p):
31
  hist_len = 0
 
32
  if not history:
33
  history = []
34
  if not memory:
 
101
  with gr.Column(scale=1):
102
  stop_btn = gr.Button("Stop")
103
  clear_btn = gr.Button("Clear")
 
 
 
104
  seed = gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, step=1, value=rand_val)
105
  tokens = gr.Slider(label="Max new tokens", value=300000, minimum=0, maximum=800000, step=64)
106
  temp = gr.Slider(label="Temperature", step=0.01, minimum=0.01, maximum=1.0, value=0.49)
 
117
  chatblock = gr.Dropdown(label="Chatblocks", choices=list(range(0, 21)), value=0, type="index")
118
  header = gr.Checkbox(label="Include header?", value=True)
119
  rand.change(check_rand, inputs=[rand, seed], outputs=seed)
120
+ btn.click(chat_inf, inputs=[sys_inp, inp, chat_b, memory, seed, temp, tokens, top_p, rep_p, chat_mem, custom_prompt], outputs=[chat_b, memory])
121
  stop_btn.click(lambda: None, [])
122
  clear_btn.click(clear_fn, [])
123