Spaces:
Running
Running
File size: 6,668 Bytes
c71be5c 08448e8 c71be5c 91b021f c7a91bb 2398560 c7e3111 74c9f1a c0cf7b8 c7e3111 c71be5c d1304c6 c71be5c 08448e8 c71be5c 291088b 6f9b7ab 60c7d45 291088b 08448e8 7618247 c71be5c 08448e8 1819fdd da6beb0 aba96da 4128a7d d065dd0 08448e8 c0cf7b8 08448e8 476fb9f dba6b3f 50eff43 aba96da c0cf7b8 08448e8 c8df060 30e3fcc c8df060 74c9f1a c8df060 22855ab 233772e c0cf7b8 4128a7d c0cf7b8 aba96da cad36dc aba96da e349a36 c0cf7b8 08448e8 9278930 08448e8 9278930 08448e8 25e99e3 4313a99 307204c aaff5ae 4313a99 30e3fcc d3db3bd 4313a99 d3db3bd 4313a99 4186c91 22855ab 4186c91 22855ab 4313a99 4186c91 447241c c0cf7b8 c71be5c 5427b5d c71be5c 0fa7c3e eff9c08 3fb3937 22855ab cb8fe28 291088b c71be5c 966bb6b c71be5c a1544d7 291088b 559573e 02dcacd 53a2ee3 4684ca5 fee6eca 02dcacd 291088b c71be5c 291088b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 |
import gradio as gr
from sentence_transformers import SentenceTransformer
from huggingface_hub import InferenceClient
import pandas as pd
import torch
import math
import httpcore
import pickle
import time
setattr(httpcore, 'SyncHTTPTransport', 'AsyncHTTPProxy')
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
model = SentenceTransformer('intfloat/multilingual-e5-large-instruct')
def get_detailed_instruct(task_description: str, query: str) -> str:
return f'Instruct: {task_description}\nQuery: {query}'
def respond(message,
max_tokens = 2048,
temperature = 0.7,
top_p = 0.95,
):
#system role
messages = [{"role": "system", "content": "You are a sunni moslem bot that always give answer based on quran, hadith, and the companions of prophet Muhammad!"}]
#make a moslem bot
messages.append({"role": "user", "content": "I want you to answer strictly based on quran and hadith"})
messages.append({"role": "assistant", "content": "I'd be happy to help! Please go ahead and provide the sentence you'd like me to analyze. Please specify whether you're referencing a particular verse or hadith (Prophetic tradition) from the Quran or Hadith, or if you're asking me to analyze a general statement."})
#adding fatwa references
'''device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
selected_references = torch.load('selected_references.sav', map_location=torch.device(device))
encoded_questions = torch.load('encoded_questions.sav', map_location=torch.device(device))
task = 'Given a web search query, retrieve relevant passages that answer the query'
queries = [
get_detailed_instruct(task, message)
]
print("start\n")
print(time.time())
query_embeddings = model.encode(queries, convert_to_tensor=True, normalize_embeddings=True)
scores = (query_embeddings @ encoded_questions.T) * 100
selected_references['similarity'] = scores.tolist()[0]
sorted_references = selected_references.sort_values(by='similarity', ascending=False)
sorted_references = sorted_references.iloc[:1]
sorted_references = sorted_references.sort_values(by='similarity', ascending=True)
print(sorted_references.shape[0])
print(sorted_references['similarity'].tolist())
print("sorted references\n")
print(time.time())
from googletrans import Translator
translator = Translator()
for index, row in sorted_references.iterrows():
if(type(row["user"]) is str and type(row['assistant']) is str):
try:
translator = Translator()
print(index)
print(f'{row["user"]}')
translated = translator.translate(f'{row["user"]}', src='ar', dest='en')
print(translated)
user = translated.text
print(user)
assistant = translator.translate(row['assistant']).text
messages.append({"role": "user", "content":user })
messages.append({"role": "assistant", "content": assistant})
except Exception as error:
print("An error occurred:", error)
print("adding fatwa references exception occurred")
print("append references\n")
print(time.time())'''
#adding more references
df = pd.read_csv("moslem-bot-reference.csv", sep='|')
for index, row in df.iterrows():
messages.append({"role": "user", "content": row['user']})
messages.append({"role": "assistant", "content": row['assistant']})
print("added more references\n")
print(time.time())
#history from chat session
"""
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
"""
#latest user question
from googletrans import Translator
translator = Translator()
en_message = ""
message_language = "en"
print("===message===")
print(message)
print("============")
try:
translator = Translator()
print(translator.detect(message))
message_language = translator.detect(message).lang
print(message_language)
print(translator.translate(message))
en_message = translator.translate(message).text
messages.append({"role": "user", "content": en_message})
except Exception as error:
messages.append({"role": "user", "content": message})
print("An error occurred:", error)
print("en_message exception occurred")
print(messages)
print("added last question\n")
print(time.time())
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
try:
print("cek1")
if(message):
print("cek2")
if len(message.choices)>0:
print("cek3")
token = message.choices[0].delta.content
response += token
if(len(response)>0):
print("cek4")
translated = translator.translate(response, src='en', dest=message_language)
if not (translated is None):
print("cek5")
translated_response = translated.text
yield translated_response
else:
yield response
else:
yield response
else:
yield response
else:
yield response
except Exception as error:
print("An error occurred:", error)
yield response
demo = gr.Interface(
fn=respond,
additional_inputs=[
gr.Slider(minimum=1, maximum=2048, value=2048, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
inputs="textbox",
outputs="textbox",
cache_examples="lazy",
examples=[
["Why is men created?"],
["Please tell me about superstition!"],
["How moses defeat pharaoh?"],
],
title="Moslem Bot")
if __name__ == "__main__":
demo.launch() |