Spaces:
Running
Running
Update main.py
Browse files
main.py
CHANGED
@@ -1,81 +1,67 @@
|
|
1 |
-
from
|
2 |
-
from half_json.core import JSONFixer
|
3 |
-
from openai import OpenAI
|
4 |
-
from retry import retry
|
5 |
-
import re
|
6 |
-
from dotenv import load_dotenv
|
7 |
-
import os
|
8 |
-
from fastapi import FastAPI
|
9 |
-
from fastapi import Query
|
10 |
from pydantic import BaseModel
|
11 |
-
from
|
12 |
-
from helper_functions_api import md_to_html
|
13 |
-
|
14 |
-
import
|
15 |
|
16 |
-
#
|
17 |
-
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
-
SysPromptDefault = "You are an expert AI, complete the given task. Do not add any additional comments."
|
21 |
-
SysPromptList = "You are now in the role of an expert AI who can extract structured information from user request. All elements must be in double quotes. You must respond ONLY with a valid python List. Do not add any additional comments."
|
22 |
SysPromptJson = "You are now in the role of an expert AI who can extract structured information from user request. Both key and value pairs must be in double quotes. You must respond ONLY with a valid JSON file. Do not add any additional comments."
|
|
|
|
|
23 |
SysPromptMd = "You are an expert AI who can create a structured report using information provided in the context from user request.The report should be in markdown format consists of markdown tables structured into subtopics. Do not add any additional comments."
|
24 |
-
SysPromptMdOffline = "You are an expert AI who can create a structured report using your knowledge on user request.The report should be in markdown format consists of markdown tables/lists/paragraphs as needed, structured into subtopics. Do not add any additional comments."
|
25 |
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
messages=[{"role": "system", "content": SysPrompt},{"role": "user", "content": message}]
|
32 |
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
return response.choices[0].message.content
|
39 |
|
40 |
-
|
41 |
-
|
42 |
-
Extracts JSON from text using regex and fuzzy JSON loading.
|
43 |
-
"""
|
44 |
-
match = re.search(r'\{[\s\S]*\}', text)
|
45 |
-
if match:
|
46 |
-
json_out = match.group(0)
|
47 |
-
else:
|
48 |
-
json_out = text
|
49 |
-
try:
|
50 |
-
# Using fuzzy json loader
|
51 |
-
return loads(json_out)
|
52 |
-
except Exception:
|
53 |
-
# Using JSON fixer/ Fixes even half json/ Remove if you need an exception
|
54 |
-
fix_json = JSONFixer()
|
55 |
-
return loads(fix_json.fix(json_out).line)
|
56 |
|
57 |
-
|
58 |
-
|
59 |
-
prompt = f"""create a list of {num_topics} subtopics along with descriptions to follow for conducting {user_input} in the context of {previous_context}, RETURN A VALID PYTHON LIST"""\
|
60 |
-
+""" Respond in the following format:
|
61 |
-
[["Subtopic","Description"],["Subtopic","Description"]]"""
|
62 |
-
response_topics = together_response(prompt, model="meta-llama/Llama-3-8b-chat-hf", SysPrompt=SysPromptList)
|
63 |
-
subtopics = json_from_text(response_topics)
|
64 |
-
return subtopics
|
65 |
|
66 |
-
|
67 |
-
|
68 |
-
md_report = together_response(prompt, model = "meta-llama/Llama-3-70b-chat-hf", SysPrompt = SysPromptMdOffline)
|
69 |
-
return md_to_html(md_report)
|
70 |
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
|
80 |
app.add_middleware(
|
81 |
CORSMiddleware,
|
@@ -84,59 +70,3 @@ app.add_middleware(
|
|
84 |
allow_methods=["*"],
|
85 |
allow_headers=["*"],
|
86 |
)
|
87 |
-
|
88 |
-
# Create a Pydantic model to handle the input data
|
89 |
-
class TopicInput(BaseModel):
|
90 |
-
user_input: str = Query(default="market research", description="input query to generate subtopics")
|
91 |
-
num_topics: int = Query(default=5, description="Number of subtopics to generate (default: 5)")
|
92 |
-
previous_queries: list[str] = Query(default=[], description="List of previous queries for context")
|
93 |
-
|
94 |
-
class imageInput(BaseModel):
|
95 |
-
user_input: str = Query(default="market research", description="input query to generate subtopics")
|
96 |
-
num_images: int = Query(default=5, description="Number of subtopics to generate (default: 5)")
|
97 |
-
|
98 |
-
class ReportInput(BaseModel):
|
99 |
-
topic: str = Query(default="market research",description="The main topic for the report")
|
100 |
-
description: str = Query(default="",description="A brief description of the topic")
|
101 |
-
|
102 |
-
class RecommendationInput(BaseModel):
|
103 |
-
user_input: str = Query(default="", description="Input query to generate follow-up questions")
|
104 |
-
num_recommendations: int = Query(default=5, description="Number of recommendations to generate")
|
105 |
-
|
106 |
-
@app.get("/", tags=["Home"])
|
107 |
-
def api_home():
|
108 |
-
return {'detail': 'Welcome to FastAPI Subtopics API! Visit https://pvanand-generate-subtopics.hf.space/docs to test'}
|
109 |
-
|
110 |
-
@app.post("/generate_topics")
|
111 |
-
async def create_topics(input: TopicInput):
|
112 |
-
topics = generate_topics(input.user_input, input.num_topics, input.previous_queries)
|
113 |
-
return {"topics": topics}
|
114 |
-
|
115 |
-
@app.post("/generate_report")
|
116 |
-
async def create_report(input: ReportInput):
|
117 |
-
report = generate_report(input.topic, input.description)
|
118 |
-
return {"report": report}
|
119 |
-
|
120 |
-
@app.post("/get_images")
|
121 |
-
async def fetch_images(input: imageInput):
|
122 |
-
images = get_images(input.user_input, input.num_images)
|
123 |
-
return {"images": images}
|
124 |
-
|
125 |
-
@app.post("/get_recommendations")
|
126 |
-
async def generate_recommendations(input: RecommendationInput):
|
127 |
-
|
128 |
-
if input.user_input:
|
129 |
-
prompt = f"""create a list of {input.num_recommendations} questions that a user might ask following the question: {input.user_input}:"""
|
130 |
-
else:
|
131 |
-
prompt = f"""create a list of mixed {input.num_recommendations} questions to create a report or plan or course on any of the topics product,market,research topic """
|
132 |
-
|
133 |
-
response_topics = json_from_text(
|
134 |
-
together_response(
|
135 |
-
prompt, model="meta-llama/Llama-3-8b-chat-hf", SysPrompt=SysPromptList,temperature=1
|
136 |
-
)
|
137 |
-
)
|
138 |
-
return {"recommendations": response_topics}
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
|
|
1 |
+
from fastapi import FastAPI, HTTPException, Request, Query
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
from pydantic import BaseModel
|
3 |
+
from typing import List, Dict, Any
|
4 |
+
from helper_functions_api import md_to_html, search_brave, fetch_and_extract_content, limit_tokens, together_response, insert_data
|
5 |
+
import os
|
6 |
+
from dotenv import load_dotenv, find_dotenv
|
7 |
|
8 |
+
# Load environment variables from .env file
|
9 |
+
# load_dotenv("keys.env")
|
10 |
+
|
11 |
+
app = FastAPI()
|
12 |
+
TOGETHER_API_KEY = os.getenv('TOGETHER_API_KEY')
|
13 |
+
BRAVE_API_KEY = os.getenv('BRAVE_API_KEY')
|
14 |
+
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
|
15 |
+
HELICON_API_KEY = os.getenv("HELICON_API_KEY")
|
16 |
+
SUPABASE_USER = os.environ['SUPABASE_USER']
|
17 |
+
SUPABASE_PASSWORD = os.environ['SUPABASE_PASSWORD']
|
18 |
+
|
19 |
+
llm_default_small = "llama3-8b-8192"
|
20 |
+
llm_default_medium = "llama3-70b-8192"
|
21 |
|
|
|
|
|
22 |
SysPromptJson = "You are now in the role of an expert AI who can extract structured information from user request. Both key and value pairs must be in double quotes. You must respond ONLY with a valid JSON file. Do not add any additional comments."
|
23 |
+
SysPromptList = "You are now in the role of an expert AI who can extract structured information from user request. All elements must be in double quotes. You must respond ONLY with a valid python List. Do not add any additional comments."
|
24 |
+
SysPromptDefault = "You are an expert AI, complete the given task. Do not add any additional comments."
|
25 |
SysPromptMd = "You are an expert AI who can create a structured report using information provided in the context from user request.The report should be in markdown format consists of markdown tables structured into subtopics. Do not add any additional comments."
|
|
|
26 |
|
27 |
+
class Query(BaseModel):
|
28 |
+
query: str = Query(default="market research", description="input query to generate Report")
|
29 |
+
description: str = Query(default="", description="additional context for report")
|
30 |
+
user_id: str = Query(default="", description="unique user id")
|
31 |
+
user_name: str = Query(default="", description="user name")
|
|
|
32 |
|
33 |
+
@app.post("/generate_report")
|
34 |
+
async def generate_report(request: Request, query: Query):
|
35 |
+
query_str = query.query
|
36 |
+
description = query.description
|
37 |
+
user_id = query.user_id
|
|
|
38 |
|
39 |
+
# Combine query with user keywords
|
40 |
+
search_query = query_str
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
+
# Search for relevant URLs
|
43 |
+
urls = search_brave(search_query, num_results=4)
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
+
# Fetch and extract content from the URLs
|
46 |
+
all_text_with_urls = fetch_and_extract_content(urls, query_str)
|
|
|
|
|
47 |
|
48 |
+
# Prepare the prompt for generating the report
|
49 |
+
additional_context = limit_tokens(str(all_text_with_urls))
|
50 |
+
prompt = f"#### ADDITIONAL CONTEXT:{additional_context} #### CREATE A DETAILED REPORT FOR THE QUERY:{query_str} #### IN THE CONTEXT OF ### CONTEXT: {description}"
|
51 |
+
md_report = together_response(prompt, model=llm_default_medium, SysPrompt=SysPromptMd)
|
52 |
+
|
53 |
+
# Insert data into database (or other storage)
|
54 |
+
insert_data(user_id, query_str, description, str(all_text_with_urls), md_report)
|
55 |
+
references_html = dict()
|
56 |
+
for text, url in all_text_with_urls:
|
57 |
+
references_html[url] = str(md_to_html(text))
|
58 |
+
|
59 |
+
|
60 |
+
# Return the generated report
|
61 |
+
return {
|
62 |
+
"report": md_to_html(md_report),
|
63 |
+
"references": references_html
|
64 |
+
}
|
65 |
|
66 |
app.add_middleware(
|
67 |
CORSMiddleware,
|
|
|
70 |
allow_methods=["*"],
|
71 |
allow_headers=["*"],
|
72 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|