pedramyazdipoor commited on
Commit
141cca1
1 Parent(s): cfb1c97

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -3
README.md CHANGED
@@ -91,11 +91,13 @@ encoding = tokenizer(text,question,add_special_tokens = True,
91
  truncation = 'only_first',
92
  max_length = 32)
93
  out = model(encoding['input_ids'].to(device),encoding['attention_mask'].to(device), encoding['token_type_ids'].to(device))
94
- start_index, end_index = generate_indexes(out['start_logits'][0][1:], out['end_logits'][0][1:], 5, 0)
95
- print(tokenizer.tokenize(text + question)[start_index:end_index+1])
 
96
  print(tokenizer.tokenize(text + question))
97
- >>> ['▁26']
98
  >>> ['▁سلام', '▁من', '▁پدر', 'ام', 'م', '▁26', '▁سالم', 'ه', 'نام', 'م', '▁چیست', '؟']
 
99
  ```
100
 
101
  ## Acknowledgments
 
91
  truncation = 'only_first',
92
  max_length = 32)
93
  out = model(encoding['input_ids'].to(device),encoding['attention_mask'].to(device), encoding['token_type_ids'].to(device))
94
+ #we had to change some pieces of code to make it compatible
95
+ #with one answer generation at a time
96
+ answer_start_index, answer_end_index = generate_indexes(out['start_logits'][0][1:], out['end_logits'][0][1:], 5, 0)
97
  print(tokenizer.tokenize(text + question))
98
+ print(tokenizer.tokenize(text + question)[answer_start_index : (answer_end_index + 1)])
99
  >>> ['▁سلام', '▁من', '▁پدر', 'ام', 'م', '▁26', '▁سالم', 'ه', 'نام', 'م', '▁چیست', '؟']
100
+ >>> ['▁26']
101
  ```
102
 
103
  ## Acknowledgments