puffy310's picture
Update app.py
b408aff verified
raw
history blame
822 Bytes
import gradio as gr
import spaces
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
tokenizer = AutoTokenizer.from_pretrained("BirdL/DeepSeek-Coder-V2-Lite-Instruct-FlashAttnPatch", trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained("BirdL/DeepSeek-Coder-V2-Lite-Instruct-FlashAttnPatch", trust_remote_code=True, device_map="auto", load_in_8bit=True).cuda()
@spaces.GPU
def respond(message, history):
inputs = tokenizer.apply_chat_template(message, add_generation_prompt=True, return_tensors="pt").to(model.device)
outputs = model.generate(inputs, max_new_tokens=512, do_sample=False, top_k=50, top_p=0.95, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id)
return outputs
demo = gr.ChatInterface(respond)
if __name__ == "__main__":
demo.launch()