DeMaking's picture
Update app.py
d6131be verified
raw
history blame
4.2 kB
import os
import logging
from fastapi import FastAPI, Request
import subprocess
from huggingface_hub import InferenceClient, login #, configure_http_backend, get_session
import langid
# import requests
# # Create a factory function that returns a requests.Session object
# def backend_factory() -> requests.Session:
# session = requests.Session()
# # session.proxies = {"http": "http://your-proxy-host:your-proxy-port",
# # "https": "http://your-proxy-host:your-proxy-port"}
# return session
# # Set our custom backend factory as the default HTTP backend
# configure_http_backend(backend_factory=backend_factory)
# # For debugging
# session = get_session()
# print("Configured session:", session)
# Configure logging
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s", level=logging.INFO)
logger = logging.getLogger(__name__)
# Get Hugging Face API token from environment variable
HF_HUB_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
if not HF_HUB_TOKEN:
raise ValueError("Missing Hugging Face API token. Please set HUGGINGFACEHUB_API_TOKEN.")
# Login and initialize the InferenceClient
login(token=HF_HUB_TOKEN)
client = InferenceClient(token=HF_HUB_TOKEN)
# Create FastAPI app instance
app = FastAPI()
def detect_language(user_input):
"""
Detect the language of the input text.
Returns "hebrew" if Hebrew, "english" if English, or "unsupported" otherwise.
"""
try:
lang, _ = langid.classify(user_input)
if lang == "he":
return "hebrew"
elif lang == "en":
return "english"
else:
return "unsupported"
except Exception as e:
logger.error(f"Language detection error: {e}")
return "unsupported"
def generate_response(text):
"""
Generate a response based on the input text.
Selects a prompt and model according to the detected language,
and calls the Hugging Face chat completion API.
"""
language = detect_language(text)
if language == "hebrew":
# Hebrew prompt: answer shortly but explain your decision-making process
content = "转砖诪讜专 注诇 转砖讜讘讛 拽爪专讛, 讗讘诇 转住驻专 讗讬讱 拽讬讘诇转 讗转 讛讛讞诇讟讛, " + text
model = "mistralai/Mistral-Nemo-Instruct-2407"
elif language == "english":
content = "keep it short but tell your decision making process, " + text
model = "mistralai/Mistral-Nemo-Instruct-2407"
else:
return "Sorry, I only support Hebrew and English."
messages = [{"role": "user", "content": content}]
try:
completion = client.chat.completions.create(
#model=model,
messages=messages,
max_tokens=2048,
temperature=0.5,
top_p=0.7
)
return completion.choices[0].message.content
except Exception as e:
logger.error(f"Error generating response: {e}")
return "Error: Could not generate response."
@app.post("/generate_response")
async def generate_text(request: Request):
"""
API endpoint to generate a response from the chat model.
Expects a JSON with a "text" field.
"""
try:
data = await request.json()
text = data.get("text", "").strip()
if not text:
return {"error": "No text provided"}
response = generate_response(text)
return {"response": response}
except Exception as e:
logger.error(f"Error processing request: {e}")
return {"error": "An unexpected error occurred."}
@app.get("/")
async def root():
"""
Root endpoint to check that the API is running.
"""
return {"message": "Decision Helper API is running!"}
def run_bot():
"""
Start the Telegram bot by running bot.py as a subprocess.
"""
logger.info("Starting Telegram bot...")
subprocess.Popen(["python3", "bot.py"])
if __name__ == "__main__":
# When running app.py directly, start the bot as well.
run_bot()
# Uncomment the next lines to run the FastAPI server standalone.
# Start the FastAPI server with uvicorn
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)