Rams901 commited on
Commit
8c313b8
0 Parent(s):

Duplicate from Rams901/Cicero-QA-eval

Browse files
Files changed (7) hide show
  1. .gitattributes +35 -0
  2. README.md +13 -0
  3. app.py +182 -0
  4. db_full/index.faiss +3 -0
  5. db_full/index.pkl +3 -0
  6. requirements.txt +9 -0
  7. utils.py +49 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ db_full/index.faiss filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Cicero Interactive QA Dev
3
+ emoji: 🏃
4
+ colorFrom: green
5
+ colorTo: gray
6
+ sdk: gradio
7
+ sdk_version: 3.23.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: Rams901/Cicero-QA-eval
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
4
+ from langchain.chains import LLMChain
5
+ from langchain import PromptTemplate
6
+ import re
7
+ import pandas as pd
8
+ from langchain.vectorstores import FAISS
9
+ import requests
10
+ from typing import List
11
+ from langchain.schema import (
12
+ SystemMessage,
13
+ HumanMessage,
14
+ AIMessage
15
+ )
16
+ import os
17
+ from langchain.embeddings import HuggingFaceEmbeddings
18
+ from langchain.chat_models import ChatOpenAI
19
+
20
+ from langchain.llms.base import LLM
21
+ from typing import Optional, List, Mapping, Any
22
+
23
+ import ast
24
+ from utils import ClaudeLLM, extract_website_name, remove_numbers
25
+
26
+ embeddings = HuggingFaceEmbeddings()
27
+ db = FAISS.load_local('db_full', embeddings)
28
+
29
+ mp_docs = {}
30
+ llm = ClaudeLLM()
31
+ # ChatOpenAI(
32
+ # temperature=0,
33
+ # model='gpt-3.5-turbo-16k'
34
+ # )
35
+
36
+
37
+ def add_text(history, text):
38
+
39
+ print(history)
40
+ history = history + [(text, None)]
41
+
42
+ return history, ""
43
+
44
+ pipeline = {'claude': (ClaudeLLM(), 0), 'gpt-3.5': (ChatOpenAI(temperature=0,model='gpt-3.5-turbo-16k'), 65), 'gpt-4': (ChatOpenAI(temperature=0, model='gpt-4'), 30)}
45
+
46
+ def retrieve_thoughts_test(query):
47
+
48
+ # print(db.similarity_search_with_score(query = query, k = k, fetch_k = k*10))
49
+ docs_with_score = db.similarity_search_with_score(query = query, k = len(db.index_to_docstore_id.values()), fetch_k = len(db.index_to_docstore_id.values()))
50
+ df = pd.DataFrame([dict(doc[0])['metadata'] for doc in docs_with_score], )
51
+ df = pd.concat((df, pd.DataFrame([dict(doc[0])['page_content'] for doc in docs_with_score], columns = ['page_content'])), axis = 1)
52
+ df = pd.concat((df, pd.DataFrame([doc[1] for doc in docs_with_score], columns = ['score'])), axis = 1)
53
+
54
+ # TO-DO: What if user query doesn't match what we provide as documents
55
+
56
+ tier_1 = df[df['score'] < 0.7]
57
+ tier_2 = df[(df['score'] < 0.95) * (df["score"] > 0.7)]
58
+
59
+ chunks_1 = tier_1.groupby(['title', 'url', '_id']).apply(lambda x: "\n...\n".join(x.sort_values('id')['page_content'].values)).values
60
+ tier_1_adjusted = tier_1.groupby(['title', 'url', '_id']).first().reset_index()[['_id', 'title', 'url']]
61
+ tier_1_adjusted['ref'] = range(1, len(tier_1_adjusted) + 1 )
62
+ tier_1_adjusted['content'] = chunks_1
63
+
64
+ # chunks_2 = tier_2.groupby(['title', 'url', '_id']).apply(lambda x: "\n...\n".join(x.sort_values('id')['page_content'].values)).values
65
+ # tier_2_adjusted = tier_2.groupby(['title', 'url', '_id']).first().reset_index()[['_id', 'title', 'url']]
66
+ # tier_2_adjusted['content'] = chunks_2
67
+
68
+ print(len(tier_1_adjusted))
69
+ # tier_1 = [doc[0] for doc in docs if ((doc[1] < 1))][:5]
70
+ # tier_2 = [doc[0] for doc in docs if ((doc[1] > 0.7)*(doc[1] < 1.5))][10:15]
71
+
72
+ reference = tier_1_adjusted[['ref', 'url', 'title']].to_dict('records')
73
+
74
+ return {'tier 1':tier_1_adjusted, }, reference
75
+
76
+ def qa_retrieve_test(query, llm, thoughts):
77
+
78
+ llm, n = pipeline[llm]
79
+
80
+ docs = ""
81
+
82
+ # global db
83
+ # print(db)
84
+
85
+ # global mp_docs
86
+ # thoughts = retrieve_thoughts(query, n)
87
+
88
+ # if not(thoughts):
89
+
90
+ # if mp_docs:
91
+ # thoughts = mp_docs
92
+
93
+ # else:
94
+ # mp_docs = thoughts
95
+
96
+ tier_1 = thoughts['tier 1']
97
+ # tier_2 = thoughts['tier 2']
98
+
99
+ # reference = tier_1[['ref', 'url', 'title']].to_dict('records')
100
+
101
+ if n:
102
+ tier_1 = tier_1[:min(len(tier_1), n)]
103
+
104
+ tier_1 = list(tier_1.apply(lambda x: f"[{int(x['ref'])}] title: {x['title']}\n Content: {x.content}", axis = 1).values)
105
+ print(len(tier_1))
106
+ # tier_2 = list(tier_2.apply(lambda x: f"title: {x['title']}\n Content: {x.content}", axis = 1).values)
107
+
108
+ # print(f"QUERY: {query}\nTIER 1: {tier_1}\nTIER2: {tier_2}")
109
+ # print(f"DOCS RETRIEVED: {mp_docs.values}")
110
+
111
+ # Cynthesis Generation
112
+ session_prompt = """ A bot that is open to discussions about different cultural, philosophical and political exchanges. You will use do different analysis to the articles provided to me. Stay truthful and if you weren't provided any resources give your oppinion only."""
113
+ task = """Create a coherent synthesis in which you use references to the id of articles provided and relevant to the query.
114
+
115
+ Follow the example structure:
116
+
117
+ The best wine to pair with steak depends on the cut of steak and the preparation. Here are some general guidelines for pairing wine with steak:
118
+ - Choose a dry red wine. The rule of thumb is to choose dry red wines
119
+ - leaner cuts of meat pair with lighter wines, while richer, fattier cuts pair up with high tannin wines that can cut through the fat [1].
120
+ - Consider the cut of steak. Lighter red wines tend to go best with the leaner cuts of steak such as filet mignon, while more marbled, higher fat cuts of meat like a rib eye do well when accompanied by more robust red wines [3].
121
+ - Take into account the preparation. For a spiced steak, go for a wine with lots of fruit to balance out the heat, like an Old Vine Zinfandel. And if you're drowning your steak in a decadent sauce, find a wine with enough body to stand up to it, like a Cabernet Sauvignon [5].
122
+ - Popular wine choices include Cabernet Sauvignon, Pinot Noir, Zinfandel, Malbec, Syrah, and Merlot [2].
123
+ Remember, the goal is to choose a wine that complements the cut of steak and not overwhelm or take away from the flavor of the meat [3].
124
+ "
125
+ """
126
+
127
+ prompt = PromptTemplate(
128
+ input_variables=["query", "task", "session_prompt", "articles"],
129
+ template="""
130
+ You are a {session_prompt}
131
+ {task}
132
+
133
+ query: {query}
134
+
135
+ Articles:
136
+ {articles}
137
+
138
+ Make sure to quote the article used if the argument corresponds to the query.
139
+ Use careful reasoning to explain your answer and give your conclusion about this.
140
+ """,
141
+ )
142
+
143
+ # llm = BardLLM()
144
+ chain = LLMChain(llm=llm, prompt = prompt)
145
+
146
+ response = chain.run(query=query, articles="\n".join(tier_1), session_prompt = session_prompt, task = task)
147
+
148
+ return response
149
+
150
+ def greet(query):
151
+
152
+ llms = ['claude', 'gpt-3.5', 'gpt-4']
153
+ thoughts, reference = retrieve_thoughts_test(query)
154
+
155
+ print(reference)
156
+ # get docs here then adjust inside the qa retrieve test because we're always having the same query so same docs to get.
157
+
158
+ result = [qa_retrieve_test(query, llm, thoughts) for llm in llms]
159
+ result.append({'reference': reference,})
160
+
161
+ print(len(result), result[-1], result[-2])
162
+
163
+ _1, _2, _3, _4 = result
164
+
165
+
166
+ return _1, _2, _3, _4
167
+
168
+
169
+ examples = [
170
+ ["How to be happy"],
171
+ ]
172
+
173
+ demo = gr.Interface(fn=greet, title="cicero-cynthesis-eval", inputs="text",
174
+ outputs=[gr.components.Textbox(lines=3, label="Claude"),
175
+ gr.components.Textbox(lines=3, label="GPT3.5"),
176
+ gr.components.Textbox(lines=3, label="GPT-4"),
177
+ gr.components.JSON( label="Reference")],
178
+ examples=examples)
179
+
180
+ demo.queue(concurrency_count = 4)
181
+ demo.launch()
182
+
db_full/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9051c0122a839f58dc047ba2145fd887b64a33ecd746bd17aec950ca044f0653
3
+ size 354250797
db_full/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ccf07d4b39015b8e101152d341883a99d311f22ab7dfc5edba88277041b9179
3
+ size 102244751
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ pandas
2
+ langchain
3
+ openai
4
+ FAISS-gpu
5
+ tiktoken
6
+ transformers
7
+ sentence_transformers
8
+ bson
9
+ anthropic==0.2.10
utils.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.llms.base import LLM
2
+ from typing import Optional, List, Mapping, Any
3
+ import anthropic
4
+ from urllib.parse import urlparse
5
+ import os
6
+ class ClaudeLLM(LLM):
7
+
8
+ @property
9
+ def _llm_type(self) -> str:
10
+
11
+ return "custom"
12
+
13
+ def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
14
+
15
+
16
+ client = anthropic.Client(os.environ['ANTHROPIC_KEY'])
17
+
18
+
19
+ # How about the formatted prompt?
20
+ prompt_formatted = (
21
+ f"{anthropic.HUMAN_PROMPT}{prompt}\n{anthropic.AI_PROMPT}"
22
+ )
23
+
24
+
25
+ response = client.completion(
26
+ prompt=prompt_formatted,
27
+ stop_sequences=[anthropic.HUMAN_PROMPT],
28
+ model="claude-instant-v1-100k",
29
+ max_tokens_to_sample=100000,
30
+ temperature=0.3,
31
+ )
32
+
33
+ return response["completion"]
34
+
35
+ @property
36
+ def _identifying_params(self) -> Mapping[str, Any]:
37
+ """Get the identifying parameters."""
38
+ return {
39
+
40
+ }
41
+
42
+ def remove_numbers(question):
43
+ return question.translate(str.maketrans('', '', '0123456789'))
44
+
45
+ def extract_website_name(url):
46
+ parsed_url = urlparse(url)
47
+ if parsed_url.netloc.startswith("www."):
48
+ return parsed_url.netloc.split("www.")[1].split(".")[0]
49
+ return parsed_url.netloc.split(".")[0]