Spaces:
Sleeping
Sleeping
File size: 6,578 Bytes
c114607 fd77360 c114607 4174fb4 c114607 b68875e 0faabd3 b68875e 4174fb4 c114607 4174fb4 b68875e c114607 e7f116b 4174fb4 5540250 f787d9d c114607 a32a927 4174fb4 e7f116b 4174fb4 c114607 8474d86 b68875e c114607 e7f116b d115c77 c114607 4174fb4 c114607 4174fb4 b68875e 4174fb4 b68875e 4174fb4 b68875e c114607 4174fb4 c114607 4174fb4 c114607 4174fb4 c114607 4174fb4 b68875e 4174fb4 b68875e 4174fb4 b68875e 4174fb4 b68875e 4174fb4 c114607 4174fb4 c114607 4174fb4 c114607 4174fb4 c114607 4174fb4 b68875e 4174fb4 b68875e 4174fb4 b68875e 4174fb4 c114607 f936bb8 c114607 b68875e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
import gradio as gr
import numpy as np
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import LLMChain
from langchain import PromptTemplate
import re
import pandas as pd
from langchain.vectorstores import FAISS
import requests
from typing import List
from langchain.schema import (
SystemMessage,
HumanMessage,
AIMessage
)
import os
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.llms.base import LLM
from typing import Optional, List, Mapping, Any
import ast
from utils import ClaudeLLM, ClaudeLLM2, extract_website_name, remove_numbers
embeddings = HuggingFaceEmbeddings()
db_art = FAISS.load_local('db_art', embeddings)
db_yt = FAISS.load_local('db_yt', embeddings)
mp_docs = {}
llm_4 = ChatOpenAI(
temperature=0,
model='gpt-4'
)
claude = ClaudeLLM()
claude2 = ClaudeLLM2()
def add_text(history, text):
print(history)
history = history + [(text, None)]
return history, ""
def retrieve_thoughts(query, media):
if media[0] == "Articles":
db = db_art
else:
db = db_yt
# print(db.similarity_search_with_score(query = query, k = k, fetch_k = k*10))
docs_with_score = db.similarity_search_with_score(query = query, k = 1500, fetch_k = len(db.index_to_docstore_id.values()))
df = pd.DataFrame([dict(doc[0])['metadata'] for doc in docs_with_score], )
df = pd.concat((df, pd.DataFrame([dict(doc[0])['page_content'] for doc in docs_with_score], columns = ['page_content'])), axis = 1)
df = pd.concat((df, pd.DataFrame([doc[1] for doc in docs_with_score], columns = ['score'])), axis = 1)
# TO-DO: What if user query doesn't match what we provide as documents
# df.sort_values("score", inplace = True)
tier_1 = df
tier_2 = df[((df['score'] < 1) * (df["score"] > 0.8))]
tier_1
chunks_1 = tier_1.groupby(['title', 'url', ]).apply(lambda x: "\n...\n".join(x.sort_values('id')['page_content'].values)).values
print(len(chunks_1[0]))
score = tier_1.groupby(['title', 'url', ]).apply(lambda x: x.sort_values('score').iloc[:3]['score'].mean()).values
tier_1_adjusted = tier_1.groupby(['title', 'url', ]).first().reset_index()[[ 'title', 'url']]
tier_1_adjusted['content'] = chunks_1
tier_1_adjusted['score'] = score
chunks_2 = tier_2.groupby(['title', 'url', ]).apply(lambda x: "\n...\n".join(x.sort_values('id')['page_content'].values)).values
tier_2_adjusted = tier_2.groupby(['title', 'url', ]).first().reset_index()[[ 'title', 'url']]
tier_2_adjusted['content'] = chunks_2
# tier_1 = [doc[0] for doc in docs if ((doc[1] < 1))][:5]
# tier_2 = [doc[0] for doc in docs if ((doc[1] > 0.7)*(doc[1] < 1.5))][10:15]
tier_1_adjusted.sort_values("score", inplace = True)
tier_1_adjusted['ref'] = range(1, len(tier_1_adjusted) + 1 )
return {'tier 1':tier_1_adjusted[:min(len(tier_1_adjusted), 30)], 'tier 2': tier_2_adjusted.loc[:5]}
def get_references(query, media):
# TO-DO FINSIH UPP.
thoughts = retrieve_thoughts(query, media)
print(thoughts.keys())
tier_1 = thoughts['tier 1']
reference = tier_1[['ref', 'url', 'title']].to_dict('records')
return reference
def grab_jsons(query, media = None, tier_1 = None, ):
response = ""
if tier_1 is None:
thoughts = retrieve_thoughts(query, media)
tier_1 = thoughts['tier 1']
tier_1 = list(tier_1.apply(lambda x: f"[{int(x['ref'])}] title: {x['title']}\n Content: {x.content}", axis = 1).values)
for i in range(3, len(tier_1), 3):
portion = tier_1[i - 3 :i]
response += '\n' + jsonify_articles(query, portion)
return response
def jsonify_articles(query, tier_1 = None):
if tier_1 is None:
thoughts = retrieve_thoughts(query)
tier_1 = thoughts['tier 1']
tier_1 = list(tier_1.apply(lambda x: f"[{int(x['ref'])}] title: {x['title']}\n Content: {x.content}", axis = 1).values)
# json
# {
# 'ref': 1,
# 'quotes': ['quote_1', 'quote_2', 'quote_3'],
# 'summary (optional for now as we already have summaries)': ""
# }
session_prompt = """ A bot that is open to discussions about different cultural, philosophical and political exchanges. You will execute different analysis to the articles provided to you. Stay truthful and if you weren't provided any resources give your oppinion only."""
task = """Your primary responsibility is to identify valuable information from the given articles related to a given query.
For each article provided, you are to present it under four separate categories:
1. Article Reference - A reference for the article id: int
2. Article Title - The title for the article: string
3. Article quotes - Numerous Quotes extracted from the article that prove certain point of views in a list format [quote_1, quote_2, quote_3, quote_4, quote_5]
4. Article Summary - A summary for the article: string
Make sure to include all valuable quotes to be used later on.
Keep your answer direct and don't include your thoughts. Make sure that the quote used should have a reference [1] that identifies the source."""
prompt = PromptTemplate(
input_variables=["query", "task", "articles"],
template="""
{task}
The extracted information should correlate to the following query.
query: {query}
Articles:
{articles}
The extracted information should be written in structured manner, ensuring clarity and meaningful format for the articles. Avoid including personal opinions or making generalizations that are not explicitly supported by the articles.
Keep your answer direct and don't include your thoughts.
""",
)
chain = LLMChain(llm=claude, prompt = prompt)
json_articles = chain.run(query=query, articles="\n".join(tier_1), task = task).strip()
return json_articles
reference = gr.Interface(fn = get_references, inputs = ["text", gr.CheckboxGroup(["Articles", "Podcasts", "Youtube"], label="Media", info="Choose One Type of Media until we merge (Podcasts excluded for now)"),], outputs = "json", label = "Reference")
json = gr.Interface(fn = grab_jsons, inputs = ["text", gr.CheckboxGroup(["Articles", "Podcasts", "Youtube"], label="Media", info="Choose One Type of Media until we merge (Podcasts excluded for now)"),], outputs = gr.components.Textbox(lines=3, label="json"))
demo = gr.Parallel(json, reference)
demo.queue(concurrency_count = 4)
demo.launch()
|