|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("Devy1/DeepSeek-Coder-1.3b-base-AQLM-2bit-mixed-finetuned-1x14", trust_remote_code=True) |
|
model = AutoModelForCausalLM.from_pretrained("Devy1/DeepSeek-Coder-1.3b-base-AQLM-2bit-mixed-finetuned-1x14", trust_remote_code=True).cuda() |
|
|
|
def generate_code(prompt): |
|
inputs = tokenizer(prompt, return_tensors="pt").to(model.device) |
|
outputs = model.generate(**inputs, max_length=128) |
|
return tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
interface = gr.Interface(fn=generate_code, inputs="text", outputs="text") |
|
interface.launch() |
|
|