Update README.md
Browse files
README.md
CHANGED
@@ -40,11 +40,11 @@ suffix = f"\nKorean:"
|
|
40 |
prompt = prefix + line + suffix
|
41 |
|
42 |
inputs = tokenizer(prompt, return_tensors="pt")
|
43 |
-
outputs = model.generate(**inputs, max_new_tokens=
|
44 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
45 |
|
46 |
-
# Base Model Output: "νκ΅μ 7λ
|
47 |
-
# VocADT Output: "μ λ νκ΅μ 7λ
λμ μ΄μμ΅λλ€." # Complete and good output within
|
48 |
```
|
49 |
|
50 |
## Reference
|
|
|
40 |
prompt = prefix + line + suffix
|
41 |
|
42 |
inputs = tokenizer(prompt, return_tensors="pt")
|
43 |
+
outputs = model.generate(**inputs, max_new_tokens=8)
|
44 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
45 |
|
46 |
+
# Base Model Output: "νκ΅μ 7λ
" # This short incomplete phrase in Korean is 8 tokens for the base model.
|
47 |
+
# VocADT Output: "μ λ νκ΅μ 7λ
λμ μ΄μμ΅λλ€." # Complete and good output within 8 tokens
|
48 |
```
|
49 |
|
50 |
## Reference
|