Update README.md
Browse files
README.md
CHANGED
@@ -33,9 +33,14 @@ The model is fine-tuned over gemma-2b using PEFT(LoRA) method using the rank 128
|
|
33 |
|
34 |
# Usage
|
35 |
``` python
|
36 |
-
# Load model directly
|
37 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
38 |
|
39 |
tokenizer = AutoTokenizer.from_pretrained("rudrashah/RLM-hinglish-translator")
|
40 |
model = AutoModelForCausalLM.from_pretrained("rudrashah/RLM-hinglish-translator")
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
```
|
|
|
33 |
|
34 |
# Usage
|
35 |
``` python
|
|
|
36 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
37 |
|
38 |
tokenizer = AutoTokenizer.from_pretrained("rudrashah/RLM-hinglish-translator")
|
39 |
model = AutoModelForCausalLM.from_pretrained("rudrashah/RLM-hinglish-translator")
|
40 |
+
|
41 |
+
template = "Hinglish:\n{hi_en}\n\nEnglish:\n{en}" #THIS IS MOST IMPORTANT, WITHOUT THIS IT WILL GIVE RANDOM OUTPUT
|
42 |
+
input_text = tokenizer(template.format(hi_en="aapka name kya hai?",en=""),return_tensors="pt")
|
43 |
+
|
44 |
+
output = model.generate(**input_text)
|
45 |
+
print(tokenizer.decode(output[0]))
|
46 |
```
|