Spaces:
Runtime error
Runtime error
File size: 3,301 Bytes
c71be5c 91b021f c7a91bb 2398560 c71be5c 6f9b7ab 60c7d45 c71be5c da6beb0 91b021f c71be5c 1819fdd da6beb0 91b021f c7a91bb 20d2a18 2398560 346cff9 da6beb0 1819fdd c71be5c 966bb6b c71be5c 02dcacd 53a2ee3 4684ca5 6e3c053 02dcacd c71be5c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
import gradio as gr
from huggingface_hub import InferenceClient
import pandas as pd
import torch
import math
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
def respond(
message,
history: list[tuple[str, str]],
max_tokens = 2048,
temperature = 0.7,
top_p = 0.95,
):
messages = [{"role": "system", "content": "You are a moslem bot that always give answer based on quran and hadith!"}]
df = pd.read_csv("moslem-bot-reference.csv")
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": "I want you to answer strictly based on quran and hadith"})
messages.append({"role": "assistant", "content": "I'd be happy to help! Please go ahead and provide the sentence you'd like me to analyze. Please specify whether you're referencing a particular verse or hadith (Prophetic tradition) from the Quran or Hadith, or if you're asking me to analyze a general statement."})
for index, row in df.iterrows():
messages.append({"role": "user", "content": row['user']})
messages.append({"role": "assistant", "content": row['assistant']})
selected_dfs = torch.load('selected_dfs.sav', map_location=torch.device('cpu'))
for df in selected_dfs:
df = df.dropna()
df = df.sample(math.floor(df.shape[0]/10))
for index, row in df.iterrows():
messages.append({"role": "user", "content": row['Column1.question']})
messages.append({"role": "assistant", "content": row['Column1.answer']})
messages.append({"role": "user", "content": message})
print(messages)
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message.choices[0].delta.content
response += token
yield response
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Slider(minimum=1, maximum=2048, value=2048, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
examples=[
["Why is men created?"],
["How is life after death?"],
["Please tell me about superstition!"],
["How moses defeat pharaoh?"],
["Please tell me about inheritance law in Islam!"],
["A woman not wear hijab"],
["Worshipping God beside Allah"],
["Blindly obey a person"],
["Make profit from lending money to a friend"],
],
)
if __name__ == "__main__":
demo.launch() |