File size: 1,498 Bytes
b89f196 ecd9090 b89f196 ecd9090 2eb1363 967efaf 0250d76 b89f196 fac22d0 b89f196 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
import os
from langchain.memory import ConversationBufferMemory
from langchain.utilities import GoogleSearchAPIWrapper
from langchain.agents import initialize_agent, Tool
from lang import G4F
from fastapi import FastAPI
from pydantic import BaseModel
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
app.add_middleware( # add the middleware
CORSMiddleware,
allow_credentials=True, # allow credentials
allow_origins=["*"], # allow all origins
allow_methods=["*"], # allow all methods
allow_headers=["*"], # allow all headers
)
google_api_key = os.environ["GOOGLE_API_KEY"]
cse_id = os.environ["GOOGLE_CSE_ID"]
model = os.environ['default_model']
search = GoogleSearchAPIWrapper()
tools = [
Tool(
name ="Search" ,
func=search.run,
description="useful when you need to answer questions about current events"
),
]
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
llm = G4F(model=model)
agent_chain = initialize_agent(tools, llm, agent="chat-conversational-react-description",
verbose=True, memory=memory)
class messages(BaseModel):
messages: str
@app.get("/")
def gello():
return "Hello! My name is Linlada."
@app.post('/linlada')
def hello_post(message: messages):
llm = G4F(model=model)
chat = llm(message)
return chat
@app.post('/search')
def searches(message: messages):
response = agent_chain.run(input=message)
return response
|