Spaces:
Runtime error
Runtime error
from fastapi import FastAPI | |
import time | |
import torch | |
import os | |
access_token = os.getenv("read_access") | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
device = "cpu" # the device to load the model onto | |
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B-Instruct") | |
model1 = AutoModelForCausalLM.from_pretrained( | |
"Qwen/Qwen2-1.5B-Instruct", | |
device_map="auto" | |
) | |
model = AutoModelForCausalLM.from_pretrained( | |
"Qwen/Qwen2-1.5B-Instruct", | |
device_map="auto", | |
torch_dtype="auto" | |
) | |
app = FastAPI() | |
async def read_root(): | |
return {"Hello": "World!"} | |
async def read_droot(): | |
starttime = time.time() | |
messages = [ | |
{"role": "system", "content": "You are a helpful assistant, Sia, developed by Sushma. You will response in polity and brief."}, | |
{"role": "user", "content": "I'm Alok. Who are you?"}, | |
{"role": "assistant", "content": "I am Sia, a small language model created by Sushma."}, | |
{"role": "user", "content": "How are you?"} | |
] | |
text = tokenizer.apply_chat_template( | |
messages, | |
tokenize=False, | |
add_generation_prompt=True | |
) | |
model_inputs = tokenizer([text], return_tensors="pt").to(device) | |
generated_ids = model.generate( | |
model_inputs.input_ids, | |
max_new_tokens=128 | |
) | |
generated_ids = [ | |
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) | |
] | |
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] | |
print(response) | |
end_time = time.time() | |
time_taken = end_time - starttime | |
print(time_taken) | |
return {"Hello": "World!"} | |
async def read_droot(): | |
starttime = time.time() | |
messages = [ | |
{"role": "system", "content": "You are a helpful assistant, Sia, developed by Sushma. You will response in polity and brief."}, | |
{"role": "user", "content": "I'm Alok. Who are you?"}, | |
{"role": "assistant", "content": "I am Sia, a small language model created by Sushma."}, | |
{"role": "user", "content": "How are you?"} | |
] | |
text = tokenizer.apply_chat_template( | |
messages, | |
tokenize=False, | |
add_generation_prompt=True | |
) | |
model_inputs = tokenizer([text], return_tensors="pt").to(device) | |
generated_ids = model1.generate( | |
model_inputs.input_ids, | |
max_new_tokens=128 | |
) | |
generated_ids = [ | |
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) | |
] | |
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] | |
print(response) | |
end_time = time.time() | |
time_taken = end_time - starttime | |
print(time_taken) | |
return {"Hello": "World!"} | |
#return {response: time} | |