heegyu commited on
Commit
2b4ba4a
1 Parent(s): 924483b

top-p, temperature 조정

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -16,7 +16,7 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
16
  # device="cuda:0" if torch.cuda.is_available() else 'cpu'
17
  # )
18
 
19
- def get_message(prompt, min_new_tokens=16, max_turn=4):
20
  prompt = prompt.strip()
21
  ids = tokenizer(prompt, return_tensors="pt").to(device)
22
  min_length = ids['input_ids'].shape[1] + min_new_tokens
@@ -28,7 +28,8 @@ def get_message(prompt, min_new_tokens=16, max_turn=4):
28
  max_new_tokens=128,
29
  min_length=min_length,
30
  do_sample=True,
31
- top_p=0.7,
 
32
  early_stopping=True
33
  ) # [0]['generated_text']
34
 
 
16
  # device="cuda:0" if torch.cuda.is_available() else 'cpu'
17
  # )
18
 
19
+ def get_message(prompt, min_new_tokens=5, max_turn=4):
20
  prompt = prompt.strip()
21
  ids = tokenizer(prompt, return_tensors="pt").to(device)
22
  min_length = ids['input_ids'].shape[1] + min_new_tokens
 
28
  max_new_tokens=128,
29
  min_length=min_length,
30
  do_sample=True,
31
+ top_p=0.95,
32
+ temperature=1.35,
33
  early_stopping=True
34
  ) # [0]['generated_text']
35