PierreBrunelle commited on
Commit
ba92978
·
verified ·
1 Parent(s): 57710db

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -10
app.py CHANGED
@@ -39,7 +39,7 @@ def calculate_readability(text: str) -> float:
39
  return 206.835 - 1.015 * average_words_per_sentence
40
 
41
  # Function to run inference and analysis
42
- def run_inference_and_analysis(task, system_prompt, input_text, temperature, top_p, max_tokens, min_tokens, stop, random_seed, safe_prompt):
43
  # Initialize Pixeltable
44
  pxt.drop_table('mistral_prompts', ignore_errors=True)
45
  t = pxt.create_table('mistral_prompts', {
@@ -49,8 +49,7 @@ def run_inference_and_analysis(task, system_prompt, input_text, temperature, top
49
  'timestamp': pxt.Timestamp,
50
  'temperature': pxt.Float,
51
  'top_p': pxt.Float,
52
- 'max_tokens': pxt.Int,
53
- 'stop': pxt.String,
54
  'random_seed': pxt.Int,
55
  'safe_prompt': pxt.Bool
56
  })
@@ -64,7 +63,6 @@ def run_inference_and_analysis(task, system_prompt, input_text, temperature, top
64
  'temperature': temperature,
65
  'top_p': top_p,
66
  'max_tokens': max_tokens,
67
- 'stop': stop,
68
  'random_seed': random_seed,
69
  'safe_prompt': safe_prompt
70
  }])
@@ -80,7 +78,6 @@ def run_inference_and_analysis(task, system_prompt, input_text, temperature, top
80
  'temperature': temperature,
81
  'top_p': top_p,
82
  'max_tokens': max_tokens if max_tokens is not None else 300,
83
- 'stop': stop.split(',') if stop else None,
84
  'random_seed': random_seed,
85
  'safe_prompt': safe_prompt
86
  }
@@ -125,7 +122,6 @@ def run_inference_and_analysis(task, system_prompt, input_text, temperature, top
125
  t.temperature,
126
  t.top_p,
127
  t.max_tokens,
128
- t.stop,
129
  t.random_seed,
130
  t.safe_prompt
131
  ).order_by(t.timestamp, asc=False).collect().to_pandas()
@@ -202,7 +198,6 @@ def gradio_interface():
202
  temperature = gr.Slider(minimum=0, maximum=1, value=0.7, step=0.1, label="Temperature")
203
  top_p = gr.Slider(minimum=0, maximum=1, value=0.9, step=0.1, label="Top P")
204
  max_tokens = gr.Number(label="Max Tokens", value=300)
205
- stop = gr.Textbox(label="Stop Sequences (comma-separated)")
206
  random_seed = gr.Number(label="Random Seed", value=None)
207
  safe_prompt = gr.Checkbox(label="Safe Prompt", value=False)
208
 
@@ -243,7 +238,6 @@ def gradio_interface():
243
  "Top P",
244
  "Max Tokens",
245
  "Min Tokens",
246
- "Stop Sequences",
247
  "Random Seed",
248
  "Safe Prompt"
249
  ],
@@ -283,7 +277,7 @@ def gradio_interface():
283
 
284
  gr.Examples(
285
  examples=examples,
286
- inputs=[task, system_prompt, input_text, temperature, top_p, max_tokens, stop, random_seed, safe_prompt],
287
  outputs=[omn_response, ml_response, large_sentiment, open_sentiment, large_keywords, open_keywords, large_readability, open_readability],
288
  fn=run_inference_and_analysis,
289
  cache_examples=True,
@@ -297,7 +291,7 @@ def gradio_interface():
297
 
298
  submit_btn.click(
299
  run_inference_and_analysis,
300
- inputs=[task, system_prompt, input_text, temperature, top_p, max_tokens, stop, random_seed, safe_prompt],
301
  outputs=[omn_response, ml_response, large_sentiment, open_sentiment, large_keywords, open_keywords, large_readability, open_readability, history, responses, analysis, params]
302
  )
303
 
 
39
  return 206.835 - 1.015 * average_words_per_sentence
40
 
41
  # Function to run inference and analysis
42
+ def run_inference_and_analysis(task, system_prompt, input_text, temperature, top_p, max_tokens, min_tokens, random_seed, safe_prompt):
43
  # Initialize Pixeltable
44
  pxt.drop_table('mistral_prompts', ignore_errors=True)
45
  t = pxt.create_table('mistral_prompts', {
 
49
  'timestamp': pxt.Timestamp,
50
  'temperature': pxt.Float,
51
  'top_p': pxt.Float,
52
+ 'max_tokens': pxt.Int
 
53
  'random_seed': pxt.Int,
54
  'safe_prompt': pxt.Bool
55
  })
 
63
  'temperature': temperature,
64
  'top_p': top_p,
65
  'max_tokens': max_tokens,
 
66
  'random_seed': random_seed,
67
  'safe_prompt': safe_prompt
68
  }])
 
78
  'temperature': temperature,
79
  'top_p': top_p,
80
  'max_tokens': max_tokens if max_tokens is not None else 300,
 
81
  'random_seed': random_seed,
82
  'safe_prompt': safe_prompt
83
  }
 
122
  t.temperature,
123
  t.top_p,
124
  t.max_tokens,
 
125
  t.random_seed,
126
  t.safe_prompt
127
  ).order_by(t.timestamp, asc=False).collect().to_pandas()
 
198
  temperature = gr.Slider(minimum=0, maximum=1, value=0.7, step=0.1, label="Temperature")
199
  top_p = gr.Slider(minimum=0, maximum=1, value=0.9, step=0.1, label="Top P")
200
  max_tokens = gr.Number(label="Max Tokens", value=300)
 
201
  random_seed = gr.Number(label="Random Seed", value=None)
202
  safe_prompt = gr.Checkbox(label="Safe Prompt", value=False)
203
 
 
238
  "Top P",
239
  "Max Tokens",
240
  "Min Tokens",
 
241
  "Random Seed",
242
  "Safe Prompt"
243
  ],
 
277
 
278
  gr.Examples(
279
  examples=examples,
280
+ inputs=[task, system_prompt, input_text, temperature, top_p, max_tokens, random_seed, safe_prompt],
281
  outputs=[omn_response, ml_response, large_sentiment, open_sentiment, large_keywords, open_keywords, large_readability, open_readability],
282
  fn=run_inference_and_analysis,
283
  cache_examples=True,
 
291
 
292
  submit_btn.click(
293
  run_inference_and_analysis,
294
+ inputs=[task, system_prompt, input_text, temperature, top_p, max_tokens, random_seed, safe_prompt],
295
  outputs=[omn_response, ml_response, large_sentiment, open_sentiment, large_keywords, open_keywords, large_readability, open_readability, history, responses, analysis, params]
296
  )
297