Text Generation
Transformers
PyTorch
Indonesian
English
llama
text-generation-inference
Inference Endpoints
Ichsan2895 commited on
Commit
5dcaa0f
1 Parent(s): 5e75a16

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -67,7 +67,7 @@ def generate_response(question: str) -> str:
67
  num_beams=2,
68
  temperature=0.3,
69
  repetition_penalty=1.2,
70
- max_length=200)
71
 
72
  response = tokenizer.decode(outputs[0], skip_special_tokes=True)
73
 
@@ -107,7 +107,7 @@ def generate_response(question: str) -> str:
107
  num_beams=2,
108
  temperature=0.3,
109
  repetition_penalty=1.2,
110
- max_length=200)
111
 
112
  response = tokenizer.decode(outputs[0], skip_special_tokes=True)
113
 
 
67
  num_beams=2,
68
  temperature=0.3,
69
  repetition_penalty=1.2,
70
+ max_new_tokens=2048)
71
 
72
  response = tokenizer.decode(outputs[0], skip_special_tokes=True)
73
 
 
107
  num_beams=2,
108
  temperature=0.3,
109
  repetition_penalty=1.2,
110
+ max_new_tokens=2048)
111
 
112
  response = tokenizer.decode(outputs[0], skip_special_tokes=True)
113