aehrm commited on
Commit
155bb31
1 Parent(s): 48d9430

Update README

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -47,7 +47,7 @@ tokenizer = AutoTokenizer.from_pretrained('aehrm/dtaec-type-normalizer')
47
  model = AutoModelForSeq2SeqLM.from_pretrained('aehrm/dtaec-type-normalizer')
48
 
49
  # Note: you CANNOT normalize full sentences, only word for word!
50
- model_in = tokenizer(['Freyheit', 'seyn', 'selbstthätig'], return_tensors='pt', padding=True)
51
  model_out = model.generate(**model_in)
52
 
53
  print(tokenizer.batch_decode(model_out, skip_special_tokens=True))
@@ -60,7 +60,7 @@ Or, more compact using the huggingface `pipeline`:
60
  from transformers import pipeline
61
 
62
  pipe = pipeline(model="aehrm/dtaec-type-normalizer")
63
- out = pipe(['Freyheit', 'seyn', 'selbstthätig'])
64
 
65
  print(out)
66
  # >>> [{'generated_text': 'Freiheit'}, {'generated_text': 'sein'}, {'generated_text': 'selbsttätig'}]
 
47
  model = AutoModelForSeq2SeqLM.from_pretrained('aehrm/dtaec-type-normalizer')
48
 
49
  # Note: you CANNOT normalize full sentences, only word for word!
50
+ model_in = tokenizer(['Freyheit', 'seyn', 'ſelbstthätig'], return_tensors='pt', padding=True)
51
  model_out = model.generate(**model_in)
52
 
53
  print(tokenizer.batch_decode(model_out, skip_special_tokens=True))
 
60
  from transformers import pipeline
61
 
62
  pipe = pipeline(model="aehrm/dtaec-type-normalizer")
63
+ out = pipe(['Freyheit', 'seyn', 'ſelbstthätig'])
64
 
65
  print(out)
66
  # >>> [{'generated_text': 'Freiheit'}, {'generated_text': 'sein'}, {'generated_text': 'selbsttätig'}]