File size: 6,038 Bytes
3bc4fcb
 
 
 
 
 
 
7b18e43
 
 
 
 
 
 
 
3bc4fcb
 
17780a4
3bc4fcb
 
 
 
 
7b18e43
 
 
 
 
ecf841a
7b18e43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3bc4fcb
7b18e43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3bc4fcb
7b18e43
 
 
3bc4fcb
7b18e43
 
 
3bc4fcb
 
 
7b18e43
3bc4fcb
 
7b18e43
 
 
 
 
3bc4fcb
7b18e43
3bc4fcb
 
7b18e43
 
3bc4fcb
 
 
 
 
7b18e43
3bc4fcb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7b18e43
3bc4fcb
 
7b18e43
 
 
3bc4fcb
7b18e43
 
 
3bc4fcb
7b18e43
 
3bc4fcb
 
7b18e43
3bc4fcb
 
 
 
 
 
 
 
 
 
 
 
 
9385245
3bc4fcb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse
import httpx
from telegram import Update
from telegram.ext import ApplicationBuilder, CommandHandler, ContextTypes
import os

import logging
# from transformers import pipeline
from huggingface_hub import InferenceClient, login
import langid

# Configure logging
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s", level=logging.INFO)
logger = logging.getLogger(__name__)

# Replace this with your Hugging Face Space URL
HUGGING_FACE_SPACE_URL = "https://demaking-decision-helper-bot.hf.space"

# Get Telegram bot token from environment variables
TOKEN = os.getenv("TELEGRAM_BOT_TOKEN")
if not TOKEN:
    raise ValueError("Missing Telegram Bot Token. Please set TELEGRAM_BOT_TOKEN environment variable.")


# Get Hugging Face API token from environment variable
HF_HUB_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
if not HF_HUB_TOKEN:
    raise ValueError("Missing Hugging Face API token. Please set HUGGINGFACEHUB_API_TOKEN.")

# Login and initialize the client
login(token=HF_HUB_TOKEN)
client = InferenceClient(api_key=HF_HUB_TOKEN)


app = FastAPI()

# Function to detect language
def detect_language(user_input):
    try:
        lang, _ = langid.classify(user_input)
        return "hebrew" if lang == "he" else "english" if lang == "en" else "unsupported"
    except Exception as e:
        logging.error(f"Language detection error: {e}")
        return "unsupported"


# Function to generate response
def generate_response(text):
    language = detect_language(text)

    if language == "hebrew":
        content = "转注谞讛 讘拽爪专讛 讗讘诇 转砖转祝 讗转 转讛诇讬讱 拽讘诇转 讛讛讞诇讟讜转 砖诇讱, " + text
        model = "microsoft/Phi-3.5-mini-instruct"
    elif language == "english":
        content = "keep it short but tell your decision making process, " + text
        model = "mistralai/Mistral-Nemo-Instruct-2407"
    else:
        return "Sorry, I only support Hebrew and English."

    messages = [{"role": "user", "content": content}]
    
    completion = client.chat.completions.create( 
        model=model,
        messages=messages,
        max_tokens=2048,
        temperature=0.5,
        top_p=0.7
    )
    return completion.choices[0].message.content

    
@app.post("/generate_response")
async def generate_text(request: Request):
    """
    Endpoint to generate a response from the chat model.
    Expects a JSON with a "text" field.
    """
    try:
        data = await request.json()
        text = data.get("text", "").strip()
        if not text:
            return {"error": "No text provided"}
        response = generate_response(text)
        return {"response": response}
    except Exception as e:
        logging.error(f"Error processing request: {e}")
        return {"error": "An unexpected error occurred."}


@app.get("/")
async def root():
    """
    Root endpoint to check that the API is running.
    """
    return {"message": "Decision Helper API is running!"}


# -------------------------
# Function to fetch response from FastAPI
# -------------------------
async def call_hugging_face_space(input_data: str):
    """
    Sends a POST request to the FastAPI API with the user's imput and returns the JSON response.
    """
    async with httpx.AsyncClient(timeout=45.0) as client:
        try:
            response = await client.post(HUGGING_FACE_SPACE_URL, json={"input": input_data})
            response.raise_for_status()  # Raise exception for HTTP 4XX/5XX errors
            return response.json()
        except httpx.HTTPStatusError as e:
            logger.error(f"HTTP Error: {e.response.status_code} - {e.response.text}")
            return {"response": "Error: API returned an error."}
        except httpx.RequestError as e:
            logger.error(f"Request Error: {e}")
            return {"response": "Error: Request Error. Could not reach API."}
        except httpx.ConnectError as e:
            logger.error(f"Connection error: {e}")
            return {"error": "Could not connect to the Hugging Face Space"}
        except Exception as e:
            logger.error(f"Unexpected Error: {e}")
            return {"response": "Error: Unexpected error occurred."}


@app.post("/webhook/{token}")
async def webhook(token: str, request: Request):
    if token != TOKEN:
        logger.error(f"Tokens doesn't match. {e}")
        return JSONResponse(status_code=403, content={"message": "Forbidden"})

    update = Update.de_json(await request.json(), None)
    message_text = update.message.text

    result = await call_hugging_face_space(message_text)
    
    return JSONResponse(content=result)


def start_telegram_bot():
    application = ApplicationBuilder().token(TOKEN).build()

    # Set up a command handler
    async def start(update: Update, context: ContextTypes.DEFAULT_TYPE):
        await update.message.reply_text("Hello! Tell me your decision-making issue, and I'll try to help.")
        logger.info("Start command received.")

    
    async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE):
        user_text = update.message.text
        logger.info(f"User message: {user_text}")
        
        # Send the user text to the FastAPI server and get the response.
        result = await call_hugging_face_space(user_text)
        response_text = result.get("response", "Error generating response.")
        
        logger.info(f"API Response: {response_text}")
        await update.message.reply_text(response_text)
        
        application.add_handler(CommandHandler("start", start))
        application.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, handle_message))
    
        # Start the bot
        application.run_polling()


if __name__ == "__main__":
    import threading
    
    # Start the Telegram bot in a separate thread
    threading.Thread(target=start_telegram_bot).start()
    
    # Start the FastAPI app
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=7860)