|
import torch |
|
from transformers import GPT2Tokenizer, GPT2LMHeadModel |
|
|
|
tokenizer = GPT2Tokenizer.from_pretrained('gpt2') |
|
model = GPT2LMHeadModel.from_pretrained('gpt2') |
|
tokenizer.add_special_tokens({'pad_token': '[PAD]'}) |
|
tokenizer.add_special_tokens({'eos_token': '<|End|>'}) |
|
special_tokens = { |
|
"additional_special_tokens": ["<|USER|>", "<|SYSTEM|>", "<|ASSISTANT|>"] |
|
} |
|
tokenizer.add_special_tokens(special_tokens) |
|
model.resize_token_embeddings(len(tokenizer)) |
|
model.load_state_dict(torch.load("pytorch_model.bin")) |
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
model.to(device) |
|
def generate_text(model, tokenizer, prompt, max_length=1024): |
|
prompt = f'<|SYSTEM|> You are a helpful AI designed to answer questions <|USER|> {prompt} <|ASSISTANT|> ' |
|
input_ids = tokenizer.encode(prompt, add_special_tokens=True, return_tensors="pt").to(device) |
|
attention_mask = torch.ones_like(input_ids).to(device) |
|
output = model.generate(input_ids, |
|
max_length=max_length, |
|
do_sample=True, |
|
top_k=50, |
|
top_p=0.30, |
|
pad_token_id=tokenizer.pad_token_id, |
|
eos_token_id=tokenizer.eos_token_id, |
|
attention_mask=attention_mask) |
|
output_ids = tokenizer.decode(output[0], skip_special_tokens=False) |
|
assistant_token_index = output_ids.index('<|ASSISTANT|>') + len('<|ASSISTANT|>') |
|
next_token_index = output_ids.find('<|', assistant_token_index) |
|
output_ids = output_ids[assistant_token_index:next_token_index] |
|
return output_ids |
|
|
|
while True: |
|
prompt = input("Enter a prompt (or 'q' to quit): ") |
|
if prompt == "q": |
|
break |
|
output_text = generate_text(model, tokenizer, prompt) |
|
print(output_text) |