import os from langchain.llms import OpenAI from langchain.tools import DuckDuckGoSearchRun from openai import OpenAI from fastapi import FastAPI, HTTPException, Query ##user user_input = "prepare a report on Kolkata rape case" ##quory def generate_first_query(topic, api_key, base_url): """ Generate a question or search query about a given topic using the latest OpenAI API. Args: topic (str): The topic provided by the user. api_key (str): Your OpenAI API key. base_url (str): Custom base URL for the OpenAI API. Returns: str: A generated question or search query. """ sys_prompt = """ You are an AI model designed to generate follow-up prompts or questions that seek more detailed information about a specific case provided by the user. When given an initial prompt or question, you will: 1. Ensure that the follow-up query is a short, concise question. 2. Guarantee that the query is directly relevant to the topic provided. 3. Output only the query, without any additional information or context. """ # Set up the OpenAI client client = OpenAI(api_key=api_key, base_url=base_url) try: # Create the prompt for generating a query prompt = f"Generate a detailed question or search query to get more information about the following topic: '{topic}'" # Make the API call to generate the query response = client.chat.completions.create( model= os.getenv("OPENAI_MODEL") or "meta-llama/llama-3.1-8b-instruct:free", # You can change this to a different model if needed messages=[ {"role": "system", "content": sys_prompt}, {"role": "user", "content": prompt} ], max_tokens=50, # Adjust this value to control the length of the generated query n=1, temperature=0.7, # Adjust this value to control the randomness of the output ) # Extract the generated query from the response generated_query = response.choices[0].message.content.strip() return generated_query except Exception as e: print(f"An error occurred: {str(e)}") return None def generate_another_quory(topic, api_key, base_url, old_data): sys_prompt = """ You are an AI model designed to generate follow-up prompts or questions that seek more detailed information about a specific case provided by the user. When given an initial prompt or question, you will: 1. Ensure that the follow-up query is a short, concise question. 2. Guarantee that the query is directly relevant to the topic provided. 3. Output only the query, without any additional information or context """ client = OpenAI(api_key=api_key, base_url=base_url) try: # Create the prompt for generating a query prompt = f"""Generate another detailed question or search quory to get more information about the following topic. Hear is the topic: '{topic}' Hear is the old search quory and its result: '{old_data}' """ # Make the API call to generate the query response = client.chat.completions.create( model= os.getenv("OPENAI_MODEL") or "meta-llama/llama-3.1-8b-instruct:free", # You can change this to a different model if needed messages=[ {"role": "system", "content": sys_prompt}, {"role": "user", "content": prompt} ], max_tokens=50, # Adjust this value to control the length of the generated query n=1, temperature=0.7, # Adjust this value to control the randomness of the output ) # Extract the generated query from the response generated_query = response.choices[0].message.content.strip() return generated_query except Exception as e: print(f"An error occurred: {str(e)}") return None #summarizer def summarize_data(data, api_key, base_url): sys_prompt = """ You are an AI model designed to summarize the provided data. When given a set of data, you will: 1. Ensure that the summary is a concise and clear summary of the provided data. 2. Guarantee that the summary is directly relevant to the topic provided. 3. Output only the summary, without any additional information or context 4. The summary should be in a structured format, with each point on a new line. """ client = OpenAI(api_key=api_key, base_url=base_url) try: # Create the prompt for generating a query prompt = f"""Summarize the following data: '{data}' """ # Make the API call to generate the query response = client.chat.completions.create( model= os.getenv("OPENAI_MODEL") or "meta-llama/llama-3.1-8b-instruct:free", # You can change this to a different model if needed messages=[ {"role": "system", "content": sys_prompt}, {"role": "user", "content": prompt} ], max_tokens=2000, # Adjust this value to control the length of the generated query n=1, temperature=0.5, # Adjust this value to control the randomness of the output ) # Extract the generated query from the response generated_query = response.choices[0].message.content.strip() return generated_query except Exception as e: print(f"An error occurred: {str(e)}") return None ##websearch_duckduckgo search = DuckDuckGoSearchRun() #API app = FastAPI() @app.post("/generate_report") async def generate_report(topic: str = Query(..., description="The topic for the report")): try: query = generate_first_query(topic, os.getenv("OPENAI_API_KEY"), os.getenv("OPENAI_API_BASE")) search = DuckDuckGoSearchRun() research = search(query) all_data = f"Query: {query}\nResearch: {research}\n" for _ in range(7): next_query = generate_another_quory(topic, os.getenv("OPENAI_API_KEY"), os.getenv("OPENAI_API_BASE"), old_data=all_data) next_research = search(next_query) all_data += f"Query: {next_query}\nResearch: {next_research}\n" output = summarize_data(all_data, os.getenv("OPENAI_API_KEY"), os.getenv("OPENAI_API_BASE")) return {"report": output} except Exception as e: raise HTTPException(status_code=500, detail=str(e)) @app.get("/") async def root(): return {"message": "Welcome to the Report Generation API"} if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=7860)