|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
def main(): |
|
|
|
model_output_dir = "/Users/migueldeguzman/Desktop/papercliptodd/phi-1.5/v3/" |
|
tokenizer = AutoTokenizer.from_pretrained(model_output_dir) |
|
model = AutoModelForCausalLM.from_pretrained(model_output_dir) |
|
|
|
while True: |
|
|
|
prompt = input("Enter a prompt for text generation (or type 'exit' to quit): ") |
|
|
|
if prompt.lower() == 'exit': |
|
break |
|
|
|
|
|
input_ids = tokenizer.encode(prompt, return_tensors="pt") |
|
output = model.generate( |
|
input_ids, |
|
max_length=1024, |
|
num_return_sequences=1, |
|
no_repeat_ngram_size=2, |
|
top_k=50, |
|
top_p=0.95, |
|
temperature=0.001 |
|
) |
|
|
|
|
|
generated_text = tokenizer.decode(output[0], skip_special_tokens=True) |
|
print("Generated Text:") |
|
print(generated_text) |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|