from transformers import AutoTokenizer, AutoModelForCausalLM | |
tokenizer = AutoTokenizer.from_pretrained("./") | |
model = AutoModelForCausalLM.from_pretrained("./") | |
#prompt = f"""பாடல்: | |
#நின்றன நின்றன நில்லா""" | |
prompt = f"""இன்னாமை வேண்டின்""" | |
input_ids = tokenizer(prompt, return_tensors="pt").input_ids | |
generation_output = model.generate( | |
input_ids=input_ids, max_new_tokens=256, repetition_penalty=1.4, temperature=0.01, do_sample=False | |
) | |
print(tokenizer.decode(generation_output[0])) |