betajuned commited on
Commit
1e1f0b8
1 Parent(s): f1e53ad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -19,18 +19,18 @@ def predict(input, history=[]):
19
  )
20
 
21
  # append the new user input tokens to the chat history
22
- bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
23
 
24
  # generate a response
25
  history = model.generate(
26
- bot_input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id
27
  ).tolist()
28
 
29
  # convert the tokens to text, and then split the responses into lines
30
  response = tokenizer.decode(history[0]).split("<|endoftext|>")
31
  # print('decoded_response-->>'+str(response))
32
  response = [
33
- (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)
34
  ] # convert to tuples of list
35
  # print('response-->>'+str(response))
36
  return response, history
 
19
  )
20
 
21
  # append the new user input tokens to the chat history
22
+ bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if step > 0 else new_user_input_ids
23
 
24
  # generate a response
25
  history = model.generate(
26
+ bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id
27
  ).tolist()
28
 
29
  # convert the tokens to text, and then split the responses into lines
30
  response = tokenizer.decode(history[0]).split("<|endoftext|>")
31
  # print('decoded_response-->>'+str(response))
32
  response = [
33
+ (response[i], response[i + 1]) for i in range(5, len(response) - 1, 2)
34
  ] # convert to tuples of list
35
  # print('response-->>'+str(response))
36
  return response, history