|
import os |
|
import logging |
|
from fastapi import FastAPI, Request |
|
import subprocess |
|
from huggingface_hub import InferenceClient, login |
|
import langid |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s", level=logging.INFO) |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
HF_HUB_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN") |
|
if not HF_HUB_TOKEN: |
|
raise ValueError("Missing Hugging Face API token. Please set HUGGINGFACEHUB_API_TOKEN.") |
|
|
|
|
|
|
|
login(token=HF_HUB_TOKEN) |
|
client = InferenceClient(token=HF_HUB_TOKEN) |
|
|
|
|
|
|
|
app = FastAPI() |
|
|
|
|
|
def detect_language(user_input): |
|
""" |
|
Detect the language of the input text. |
|
Returns "hebrew" if Hebrew, "english" if English, or "unsupported" otherwise. |
|
""" |
|
try: |
|
lang, _ = langid.classify(user_input) |
|
if lang == "he": |
|
return "hebrew" |
|
elif lang == "en": |
|
return "english" |
|
else: |
|
return "unsupported" |
|
except Exception as e: |
|
logger.error(f"Language detection error: {e}") |
|
return "unsupported" |
|
|
|
|
|
def generate_response(text): |
|
""" |
|
Generate a response based on the input text. |
|
Selects a prompt and model according to the detected language, |
|
and calls the Hugging Face chat completion API. |
|
""" |
|
language = detect_language(text) |
|
if language == "hebrew": |
|
|
|
content = "转砖诪讜专 注诇 转砖讜讘讛 拽爪专讛, 讗讘诇 转住驻专 讗讬讱 拽讬讘诇转 讗转 讛讛讞诇讟讛, " + text |
|
model = "mistralai/Mistral-Nemo-Instruct-2407" |
|
elif language == "english": |
|
content = "keep it short but tell your decision making process, " + text |
|
model = "mistralai/Mistral-Nemo-Instruct-2407" |
|
else: |
|
return "Sorry, I only support Hebrew and English." |
|
|
|
messages = [{"role": "user", "content": content}] |
|
|
|
try: |
|
completion = client.chat.completions.create( |
|
|
|
messages=messages, |
|
max_tokens=2048, |
|
temperature=0.5, |
|
top_p=0.7 |
|
) |
|
return completion.choices[0].message.content |
|
except Exception as e: |
|
logger.error(f"Error generating response: {e}") |
|
return "Error: Could not generate response." |
|
|
|
|
|
@app.post("/generate_response") |
|
async def generate_text(request: Request): |
|
""" |
|
API endpoint to generate a response from the chat model. |
|
Expects a JSON with a "text" field. |
|
""" |
|
try: |
|
data = await request.json() |
|
text = data.get("text", "").strip() |
|
if not text: |
|
return {"error": "No text provided"} |
|
response = generate_response(text) |
|
return {"response": response} |
|
except Exception as e: |
|
logger.error(f"Error processing request: {e}") |
|
return {"error": "An unexpected error occurred."} |
|
|
|
|
|
@app.get("/") |
|
async def root(): |
|
""" |
|
Root endpoint to check that the API is running. |
|
""" |
|
return {"message": "Decision Helper API is running!"} |
|
|
|
|
|
def run_bot(): |
|
""" |
|
Start the Telegram bot by running bot.py as a subprocess. |
|
""" |
|
logger.info("Starting Telegram bot...") |
|
subprocess.Popen(["python3", "bot.py"]) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
run_bot() |
|
|
|
|
|
import uvicorn |
|
uvicorn.run(app, host="0.0.0.0", port=7860) |
|
|