Asankhaya Sharma commited on
Commit
4108df0
β€’
1 Parent(s): f121b56
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -5,11 +5,11 @@ from streamlit_chat import message
5
 
6
  checkpoint = "."
7
 
8
- @st.cache_resource
 
9
  def get_model():
10
  model = AutoModelForCausalLM.from_pretrained(checkpoint)
11
- tokenizer = AutoTokenizer.from_pretrained(checkpoint)
12
- return model, tokenizer
13
 
14
  st.title("Chat with myGPT πŸ¦„")
15
  st.write("This is a LLM that was fine-tuned on a dataset of daily conversations.")
@@ -37,7 +37,7 @@ def submit():
37
  # prompt = "How long will it take for the poc to finish?"
38
  # inputs = tokenizer(prompt, return_tensors="pt")
39
 
40
- model, tokenizer = get_model()
41
  generation_config = GenerationConfig(max_new_tokens=32,
42
  num_beams=4,
43
  early_stopping=True,
 
5
 
6
  checkpoint = "."
7
 
8
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint)
9
+ @st.cache
10
  def get_model():
11
  model = AutoModelForCausalLM.from_pretrained(checkpoint)
12
+ return model
 
13
 
14
  st.title("Chat with myGPT πŸ¦„")
15
  st.write("This is a LLM that was fine-tuned on a dataset of daily conversations.")
 
37
  # prompt = "How long will it take for the poc to finish?"
38
  # inputs = tokenizer(prompt, return_tensors="pt")
39
 
40
+ model = get_model()
41
  generation_config = GenerationConfig(max_new_tokens=32,
42
  num_beams=4,
43
  early_stopping=True,