QLWD commited on
Commit
66be2b0
·
verified ·
1 Parent(s): 5169fea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -43,7 +43,7 @@ model = AutoModelForCausalLM.from_pretrained(
43
  )
44
  tokenizer = AutoTokenizer.from_pretrained(MODELS)
45
 
46
- @spaces.GPU
47
  def stream_chat(message: str, history: list, temperature: float, max_new_tokens: int, top_p: float, top_k: int, penalty: float):
48
  print(f'message is - {message}')
49
  print(f'history is - {history}')
 
43
  )
44
  tokenizer = AutoTokenizer.from_pretrained(MODELS)
45
 
46
+ @spaces.GPU(duration=2)
47
  def stream_chat(message: str, history: list, temperature: float, max_new_tokens: int, top_p: float, top_k: int, penalty: float):
48
  print(f'message is - {message}')
49
  print(f'history is - {history}')