File size: 527 Bytes
b8c24aa
c8fdb3b
08c1bd3
c8fdb3b
 
 
 
08c1bd3
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
import gradio as gr
import os
from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer
token = os.environ["HF_TOKEN"]
tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b",token=token)
model = AutoModelForCausalLM.from_pretrained("google/gemma-7b",token=token)
streamer = TextStreamer(tokenizer,skip_prompt=True)


def generate(inputs):
    inputs = tokenizer([inputs], return_tensors="pt")
    yield model.generate(**inputs, streamer=streamer)


app = gr.ChatInterface(generate)
app.launch(debug=True)