BeveledCube commited on
Commit
987e371
·
verified ·
1 Parent(s): 04ca331

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +3 -7
main.py CHANGED
@@ -22,7 +22,6 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
22
 
23
  class req(BaseModel):
24
  prompt: str
25
- history: list
26
 
27
  @app.get("/")
28
  def read_root():
@@ -31,18 +30,15 @@ def read_root():
31
  @app.post("/api")
32
  def read_root(data: req):
33
  print("Prompt:", data.prompt)
34
- print("History:", data.history)
35
 
36
- history_string = "\n".join(data.history)
37
-
38
  input_text = data.prompt
39
 
40
  # Tokenize the input text
41
- inputs = tokenizer.encode_plus(history_string, input_text, return_tensors="pt")
42
 
43
  # Generate output using the model
44
- outputs = model.generate(**inputs)
45
- generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
46
 
47
  answer_data = { "answer": generated_text }
48
  print("Answer:", generated_text)
 
22
 
23
  class req(BaseModel):
24
  prompt: str
 
25
 
26
  @app.get("/")
27
  def read_root():
 
30
  @app.post("/api")
31
  def read_root(data: req):
32
  print("Prompt:", data.prompt)
 
33
 
 
 
34
  input_text = data.prompt
35
 
36
  # Tokenize the input text
37
+ input_ids = tokenizer.encode(input_text, return_tensors="pt")
38
 
39
  # Generate output using the model
40
+ output_ids = model.generate(input_ids, num_beams=5, no_repeat_ngram_size=2)
41
+ generated_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
42
 
43
  answer_data = { "answer": generated_text }
44
  print("Answer:", generated_text)