migueldeguzmandev's picture
Upload 13 files
10a94ee
raw
history blame
1.18 kB
from transformers import AutoModelForCausalLM, AutoTokenizer
def main():
# Load the fine-tuned model and tokenizer
model_output_dir = "/Users/migueldeguzman/Desktop/papercliptodd/falcon-1b/v1/" # Replace with your fine-tuned model directory
tokenizer = AutoTokenizer.from_pretrained(model_output_dir)
model = AutoModelForCausalLM.from_pretrained(model_output_dir)
while True:
# User input for text generation prompt
prompt = input("Enter a prompt for text generation (or type 'exit' to quit): ")
if prompt.lower() == 'exit':
break
# Encode the prompt and generate text
input_ids = tokenizer.encode(prompt, return_tensors="pt")
output = model.generate(
input_ids,
max_length=1024,
num_return_sequences=1,
no_repeat_ngram_size=2,
top_k=50,
top_p=0.95,
temperature=0.001
)
# Decode and print the generated text
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
print("Generated Text:")
print(generated_text)
if __name__ == "__main__":
main()