Raiff1982's picture
Update app.py
a588801 verified
raw
history blame
3.17 kB
import os
import openai
import logging
import gradio as gr
import asyncio
from typing import Dict, Any
from cryptography.fernet import Fernet
# Configure logging
logging.basicConfig(level=logging.INFO)
class EnvironmentManager:
"""Handles environment variable validation."""
@staticmethod
def load_env_variables() -> Dict[str, str]:
required_vars = ["HF_KEY"]
env_vars = {var: os.getenv(var) for var in required_vars}
missing_vars = [var for var, value in env_vars.items() if not value]
if missing_vars:
raise ValueError(f"Missing environment variables: {', '.join(missing_vars)}")
return env_vars
class EncryptionManager:
"""Handles encryption and decryption of sensitive data."""
def __init__(self, key: str):
self.cipher = Fernet(key.encode())
def encrypt(self, data: str) -> str:
return self.cipher.encrypt(data.encode()).decode()
def decrypt(self, encrypted_data: str) -> str:
return self.cipher.decrypt(encrypted_data.encode()).decode()
class AICore:
"""AI Core system integrating OpenAI's GPT API."""
def __init__(self, env_vars: Dict[str, str]):
self.env_vars = env_vars
self.encryption_manager = EncryptionManager(env_vars["HF_KEY"])
self.openai_api_key = env_vars["HF_KEY"]
async def generate_response(self, query: str) -> Dict[str, Any]:
try:
encrypted_query = self.encryption_manager.encrypt(query)
response = await openai.ChatCompletion.acreate(
model="ft:gpt-4o-2024-08-06:raiffs-bits:codettev7", # Ensure this model is supported
messages=[
{"role": "system", "content": "You are a helpful AI assistant."},
{"role": "user", "content": query}
],
api_key=self.openai_api_key
)
model_response = response["choices"][0]["message"]["content"]
return {
"encrypted_query": encrypted_query,
"model_response": model_response
}
except Exception as e:
logging.error(f"Error generating response: {e}")
return {"error": "Failed to generate response"}
# Hugging Face Gradio App
def main():
try:
env_vars = EnvironmentManager.load_env_variables()
ai_core = AICore(env_vars)
async def async_respond(message: str) -> str:
response_data = await ai_core.generate_response(message)
return response_data.get("model_response", "Error: Response not available")
def respond(message: str) -> str:
return asyncio.run(async_respond(message))
interface = gr.Interface(
fn=respond,
inputs="text",
outputs="text",
title="AI Chatbot - Hugging Face Space"
)
# Use `server_name="0.0.0.0"` to work properly on Hugging Face Spaces
interface.launch(server_name="0.0.0.0", server_port=7860)
except Exception as e:
logging.error(f"Application failed to start: {e}")
if __name__ == "__main__":
main()