Bofandra commited on
Commit
08448e8
1 Parent(s): 5593b48

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -22
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import gradio as gr
 
2
  from huggingface_hub import InferenceClient
3
  import pandas as pd
4
  import torch
@@ -9,6 +10,8 @@ For more information on `huggingface_hub` Inference API support, please check th
9
  """
10
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
11
 
 
 
12
 
13
  def respond(
14
  message,
@@ -17,37 +20,60 @@ def respond(
17
  temperature = 0.7,
18
  top_p = 0.95,
19
  ):
 
20
  messages = [{"role": "system", "content": "You are a moslem bot that always give answer based on quran and hadith!"}]
21
- df = pd.read_csv("moslem-bot-reference.csv")
22
-
23
- for val in history:
24
- if val[0]:
25
- messages.append({"role": "user", "content": val[0]})
26
- if val[1]:
27
- messages.append({"role": "assistant", "content": val[1]})
28
 
 
29
  messages.append({"role": "user", "content": "I want you to answer strictly based on quran and hadith"})
30
  messages.append({"role": "assistant", "content": "I'd be happy to help! Please go ahead and provide the sentence you'd like me to analyze. Please specify whether you're referencing a particular verse or hadith (Prophetic tradition) from the Quran or Hadith, or if you're asking me to analyze a general statement."})
31
 
 
 
32
  for index, row in df.iterrows():
33
  messages.append({"role": "user", "content": row['user']})
34
  messages.append({"role": "assistant", "content": row['assistant']})
35
 
36
- """selected_dfs = torch.load('selected_dfs.sav', map_location=torch.device('cpu'))
37
- for df in selected_dfs:
38
- df = df.dropna()
39
- n = math.floor(df.shape[0]/10000)
40
- print(n)
41
- df = df.sample(n)
42
- for index, row in df.iterrows():
43
- print(index)
44
- print(row['Column1.question'])
45
- print(row['Column1.answer'])
46
- messages.append({"role": "user", "content": row['Column1.question']})
47
- messages.append({"role": "assistant", "content": row['Column1.answer']})
48
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  messages.append({"role": "user", "content": message})
50
- #print(messages)
51
 
52
  response = ""
53
 
@@ -92,6 +118,5 @@ demo = gr.ChatInterface(
92
  ],
93
  )
94
 
95
-
96
  if __name__ == "__main__":
97
  demo.launch()
 
1
  import gradio as gr
2
+ from sentence_transformers import SentenceTransformer
3
  from huggingface_hub import InferenceClient
4
  import pandas as pd
5
  import torch
 
10
  """
11
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
12
 
13
+ def get_detailed_instruct(task_description: str, query: str) -> str:
14
+ return f'Instruct: {task_description}\nQuery: {query}'
15
 
16
  def respond(
17
  message,
 
20
  temperature = 0.7,
21
  top_p = 0.95,
22
  ):
23
+ #system role
24
  messages = [{"role": "system", "content": "You are a moslem bot that always give answer based on quran and hadith!"}]
 
 
 
 
 
 
 
25
 
26
+ #make a moslem bot
27
  messages.append({"role": "user", "content": "I want you to answer strictly based on quran and hadith"})
28
  messages.append({"role": "assistant", "content": "I'd be happy to help! Please go ahead and provide the sentence you'd like me to analyze. Please specify whether you're referencing a particular verse or hadith (Prophetic tradition) from the Quran or Hadith, or if you're asking me to analyze a general statement."})
29
 
30
+ #adding references
31
+ df = pd.read_csv("moslem-bot-reference.csv")
32
  for index, row in df.iterrows():
33
  messages.append({"role": "user", "content": row['user']})
34
  messages.append({"role": "assistant", "content": row['assistant']})
35
 
36
+ #adding more references
37
+ selected_references = torch.load('selected_references.sav', map_location=torch.device('cpu'))
38
+ encoded_questions = torch.load('encoded_questions.sav', map_location=torch.device('cpu'))
39
+
40
+ task = 'Given a web search query, retrieve relevant passages that answer the query'
41
+ queries = [
42
+ get_detailed_instruct(task, message)
43
+ ]
44
+
45
+ model = SentenceTransformer('intfloat/multilingual-e5-large-instruct')
46
+ query_embeddings = model.encode(queries, convert_to_tensor=True, normalize_embeddings=True)
47
+ scores = (query_embeddings @ encoded_questions.T) * 100
48
+ selected_references['similarity'] = scores.tolist()[0]
49
+ sorted_references = selected_references.sort_values(by='similarity', ascending=False)
50
+ sorted_references = sorted_references.head(3)
51
+ sorted_references = selected_references.sort_values(by='similarity', ascending=True)
52
+
53
+ from googletrans import Translator
54
+ translator = Translator()
55
+
56
+ for index, row in sorted_references.iterrows():
57
+ print(index)
58
+ print(row['user'])
59
+ user = translator.translate(row['user'])
60
+ print(user)
61
+ print(row['assistant'])
62
+ assistant = translator.translate(row['assistant'])
63
+ print(assistant)
64
+ messages.append({"role": "user", "content":user })
65
+ messages.append({"role": "assistant", "content": assistant})
66
+
67
+ #history from chat session
68
+ for val in history:
69
+ if val[0]:
70
+ messages.append({"role": "user", "content": val[0]})
71
+ if val[1]:
72
+ messages.append({"role": "assistant", "content": val[1]})
73
+
74
+ #latest user question
75
  messages.append({"role": "user", "content": message})
76
+ print(messages)
77
 
78
  response = ""
79
 
 
118
  ],
119
  )
120
 
 
121
  if __name__ == "__main__":
122
  demo.launch()