juewang commited on
Commit
e532db6
1 Parent(s): b2703de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import streamlit as st
2
  import requests
3
  import time
 
4
 
5
  @st.cache
6
  def infer(prompt,
@@ -61,7 +62,7 @@ with col1:
61
  temperature = st.text_input('temperature', "0.0")
62
  top_p = st.text_input('top_p', "1.0")
63
  num_completions = st.text_input('num_completions (only the best one will be returend)', "1")
64
- stop = st.text_input('stop, split by;', "\n")
65
  seed = st.text_input('seed', "42")
66
 
67
  with col2:
@@ -82,6 +83,6 @@ with col2:
82
  generated_area.markdown(prompt)
83
  report_text = infer(
84
  prompt, model_name=model_name, max_new_tokens=max_new_tokens, temperature=temperature, top_p=top_p,
85
- num_completions=num_completions, seed=seed, stop=stop,
86
  )
87
- generated_area.markdown(prompt + "**" + report_text + "**")
 
1
  import streamlit as st
2
  import requests
3
  import time
4
+ from ast import literal_eval
5
 
6
  @st.cache
7
  def infer(prompt,
 
62
  temperature = st.text_input('temperature', "0.0")
63
  top_p = st.text_input('top_p', "1.0")
64
  num_completions = st.text_input('num_completions (only the best one will be returend)', "1")
65
+ stop = st.text_input('stop, split by;', repr('\n'))
66
  seed = st.text_input('seed', "42")
67
 
68
  with col2:
 
83
  generated_area.markdown(prompt)
84
  report_text = infer(
85
  prompt, model_name=model_name, max_new_tokens=max_new_tokens, temperature=temperature, top_p=top_p,
86
+ num_completions=num_completions, seed=seed, stop=literal_eval(stop),
87
  )
88
+ generated_area.markdown(prompt + "_" + report_text + "_")