Update README.md
Browse files
README.md
CHANGED
@@ -18,5 +18,17 @@ def get_global_attention(input_ids):
|
|
18 |
global_attention_mask.index_put_(tuple(globs.t()), value)
|
19 |
return global_attention_mask
|
20 |
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
```
|
|
|
18 |
global_attention_mask.index_put_(tuple(globs.t()), value)
|
19 |
return global_attention_mask
|
20 |
|
21 |
+
m1 = "In this paper we present the results of an experiment in <m> automatic concept and definition extraction </m> from written sources of law using relatively simple natural methods."
|
22 |
+
m2 = "This task is important since many natural language processing (NLP) problems, such as <m> information extraction </m>, summarization and dialogue."
|
23 |
+
|
24 |
+
inputs = m1 + " </s></s> " + m2
|
25 |
+
|
26 |
+
tokens = tokenzier(inputs, return_tensors='pt')
|
27 |
+
global_attention_mask = get_global_attention(tokens['input_ids'])
|
28 |
+
|
29 |
+
with torch.no_grad():
|
30 |
+
output = model(tokens['input_ids'], tokens['attention_mask'], global_attention_mask)
|
31 |
+
|
32 |
+
scores = torch.softmax(output.logits, dim=-1)
|
33 |
+
|
34 |
```
|