from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer from threading import Thread import gradio as gr class ChatbotService: def __init__(self, model_name="RajuKandasamy/tamillama_tiny_30m"): self.model = AutoModelForCausalLM.from_pretrained(model_name) self.tokenizer = AutoTokenizer.from_pretrained(model_name) self.streamer = None def call(self, prompt): self.streamer = TextIteratorStreamer(self.tokenizer, skip_prompt=True, timeout=5) prompt = prompt.replace("
", "\n") print(prompt) inputs = self.tokenizer(prompt, return_tensors="pt") print(inputs) kwargs = dict(input_ids=inputs["input_ids"], streamer=self.streamer, max_new_tokens=512, do_sample=True, top_p=0.8, top_k=500, temperature=0.001, repetition_penalty=1.4) thread = Thread(target=self.model.generate, kwargs=kwargs) thread.start() return "" import gradio as gr example_questions = [ f"""சொற்கள்: வீழ்ச்சி, சீட்டு, பிடிவாதம் சுருக்கம்:""", f"""சொற்கள்: ஓட்டம், பயணம், குழப்பம் சுருக்கம்:""", f"""Words: prevent, car, broken Features: Dialogue""", f"""சொற்கள்: திரும்பு, வாசனை திரவியம், துணிச்சல் சுருக்கம்:""" ] chatbot_service = ChatbotService() with gr.Blocks() as demo: chatbot = gr.Chatbot() with gr.Row(): msg = gr.Textbox(placeholder="Type your message here...", label="Story Prompt:") run = gr.Button("Run") examples_dropdown = gr.Dropdown(choices=example_questions, label="Select an example prompt") examples_dropdown.change(fn=lambda x: x, inputs=examples_dropdown, outputs=msg) clear = gr.Button("Clear") def user(question, user_message, history): if history == None: history = [] user_message = question return "", history + [[user_message, None]] def bot(history): #print("Question: ", history[-1][0]) chatbot_service.call(history[-1][0]) history[-1][1] = "" for character in chatbot_service.streamer: print(character) history[-1][1] += character yield history run.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(bot, chatbot, chatbot) clear.click(lambda: None, None, chatbot, queue=False) demo.queue() demo.launch()