Update README.md
Browse files
README.md
CHANGED
@@ -2,15 +2,14 @@
|
|
2 |
This model is used for text generation.
|
3 |
if you want to use this model, try running the following code. Hope my model helps you.
|
4 |
|
|
|
5 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
6 |
|
7 |
-
|
8 |
-
model_name = "FigoSans/TextGeneration" # ganti dengan nama repo Anda
|
9 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
10 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
11 |
|
12 |
-
|
13 |
-
input_text = "Input Teks"
|
14 |
inputs = tokenizer(input_text, return_tensors="pt")
|
15 |
outputs = model.generate(**inputs)
|
16 |
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
2 |
This model is used for text generation.
|
3 |
if you want to use this model, try running the following code. Hope my model helps you.
|
4 |
|
5 |
+
# Code
|
6 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
7 |
|
8 |
+
model_name = "FigoSans/TextGeneration"
|
|
|
9 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
10 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
11 |
|
12 |
+
input_text = "Input Text"
|
|
|
13 |
inputs = tokenizer(input_text, return_tensors="pt")
|
14 |
outputs = model.generate(**inputs)
|
15 |
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|