decision-helper-bot / the_best_app.py
DeMaking's picture
Rename app.py to the_best_app.py
2afcbd3 verified
raw
history blame
4.93 kB
# new app
import logging
import os
from fastapi import FastAPI, Request
from contextlib import asynccontextmanager
from transformers import pipeline
import langid
from huggingface_hub import InferenceClient, login
import socket
import time
# Global variables
HF_HUB_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
def current_time_gmt():
return time.gmtime().tm_hour+2,':',time.gmtime().tm_min,':',time.gmtime().tm_sec
# Verify Hugging Face token
if not HF_HUB_TOKEN:
raise ValueError("Missing Hugging Face API token. Please set HUGGINGFACEHUB_API_TOKEN in environment variables.")
login(token=HF_HUB_TOKEN)
client = InferenceClient(api_key=HF_HUB_TOKEN)
# Function to detect language
def detect_language(user_input):
try:
lang, _ = langid.classify(user_input) # langid.classify returns a tuple (language, confidence)
print(f"Detected language: {lang}, ", f"current time: {current_time_gmt()}")
return "hebrew" if lang == "he" else "english" if lang == "en" else "unsupported"
except Exception as e:
print(f"Language detection error: {e}")
return "unsupported"
def generate_response(text):
language = detect_language(text)
print(f"Detected language: {language}, ", f"current time: {current_time_gmt()}")
if language == "hebrew": # or language == "english":
content = "转注谞讛 讘拽爪专讛 讗讘诇 转砖转祝 讗转 转讛诇讬讱 拽讘诇转 讛讛讞诇讟讜转 砖诇讱, " + text
print("content: ", content)
messages = [
{ "role": "user", "content": content }
]
print(f"Messages: {messages}, ", f"current time: {current_time_gmt()}")
completion = client.chat.completions.create(
model="microsoft/Phi-3.5-mini-instruct", # good in english. not so good in hebrew # i need to change it!!!
messages=messages,
max_tokens=2048,
temperature=0.5,
top_p=0.7
)
#print("\ncompletion: ", completion.choices[0].message.content, f"\ncurrent time: {current_time_gmt()}")
return completion.choices[0].message.content
elif language == "english":
content = "keep it short but tell your decision making process, " + text # good
print("content: ", content)
messages = [
{ "role": "user", "content": content }
]
print(f"Messages: {messages}, ", f"current time: {current_time_gmt()}")
completion = client.chat.completions.create(
model="mistralai/Mistral-Nemo-Instruct-2407", # good
messages=messages,
max_tokens=2048,
temperature=0.5,
top_p=0.7
)
return completion.choices[0].message.content
return "Sorry, I only support Hebrew and English."
# if language == "hebrew" or language == "english":
# # hebrew_generator = pipeline("text-generation", model="onlplab/alephbert-base")
# output = lang_generator(text, max_length=250, truncation=True)
# print(f"Model output: {output}, ", f"current time: {current_time_gmt()}") # Debugging
# return output[0]["generated_text"]
# elif language == "english":
# #english_generator = pipeline("text-generation", model="mistralai/Mistral-Nemo-Instruct-2407", max_new_tokens=128)
# # english_generator = pipeline("text-generation", model="distilgpt2")
# output = english_generator(text, max_length=100, truncation=True)
# print(f"English model output: {output}, ", f"current time: {current_time_gmt()}") # Debugging
# return output[0]["generated_text"]
# return "Sorry, I only support Hebrew and English."
# FastAPI lifespan event
@asynccontextmanager
async def lifespan(app: FastAPI):
print("Starting application...")
yield # Wait until app closes
print("Shutting down application...")
# Create FastAPI app
app = FastAPI(lifespan=lifespan)
@app.get("/")
async def root():
return {"message": "Decision Helper API is running!"}
@app.post("/generate_response")
async def generate_text(request: Request):
try:
data = await request.json()
if not data or "text" not in data:
logging.error("Invalid request received")
return {"error": "Invalid request. Please send JSON with a 'text' field."}
text = data["text"].strip()
if not text:
return {"error": "No text provided"}
print(f"Received text: {text}") # Debugging
response = generate_response(text)
print(f"Generated response: {response}") # Debugging
return {"response": response}
except Exception as e:
logging.error(f"Error processing request: {e}")
return {"error": "An unexpected error occurred."}
# Run the server
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)