File size: 1,104 Bytes
f10fa01 08517cb f10fa01 08517cb d749365 08517cb f10fa01 08517cb f10fa01 08517cb f10fa01 08517cb f10fa01 08517cb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 |
import gradio as gr
import keras
import keras_nlp
import numpy as np
import pandas as pd
import os
os.environ["KERAS_BACKEND"] = "jax" # you can also use tensorflow or torch
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "1" # avoid memory fragmentation on JAX backend.
keras.utils.set_random_seed(42)
gemma_lm = keras_nlp.models.CausalLM.from_preset("hf://soufyane/gemma_2b_instruct_FT_DATA_SCIENCE_lora36_1")
def generate_answer(history, question):
# Replace this with the actual code to generate the answer using your model
answer = gemma_lm.generate(f"You are an AI Agent specialized to answer to questions about Data Science and be greatfull and nice and helpfull\n\nQuestion:\n{question}\n\nAnswer:\n", max_length=1024)
history.append((question, answer))
return history
# Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# Chatbot")
chatbot = gr.Chatbot()
with gr.Row():
txt = gr.Textbox(show_label=False, placeholder="Enter your question here...")
txt.submit(generate_answer, [chatbot, txt], chatbot)
# Launch the interface
demo.launch()
|