Spaces:
Running
Running
import os | |
import json | |
import pandas as pd | |
from docx import Document | |
from PyPDF2 import PdfReader | |
from huggingface_hub import InferenceClient | |
import gradio as gr | |
# Retrieve Hugging Face API key from environment variable (secret) | |
API_KEY = os.getenv("APIHUGGING") | |
if not API_KEY: | |
raise ValueError("Hugging Face API key not found. Please set the 'APIHUGGING' secret.") | |
# Initialize Hugging Face Inference Client | |
client = InferenceClient(api_key=API_KEY) | |
# Function to extract text from various file types | |
def extract_file_content(file_path): | |
_, file_extension = os.path.splitext(file_path.name) | |
if file_extension.lower() in [".txt"]: | |
return file_path.read().decode("utf-8") | |
elif file_extension.lower() in [".csv"]: | |
df = pd.read_csv(file_path) | |
return df.to_string(index=False) | |
elif file_extension.lower() in [".json"]: | |
data = json.load(file_path) | |
return json.dumps(data, indent=4) | |
elif file_extension.lower() in [".pdf"]: | |
reader = PdfReader(file_path) | |
text = "" | |
for page in reader.pages: | |
text += page.extract_text() | |
return text | |
elif file_extension.lower() in [".docx"]: | |
doc = Document(file_path) | |
return "\n".join([para.text for para in doc.paragraphs]) | |
else: | |
return "Unsupported file type." | |
# Function to interact with the Hugging Face model | |
def get_bot_response(file, prompt): | |
try: | |
# Extract content from the uploaded file | |
file_content = extract_file_content(file) | |
# Prepare conversation history | |
messages = [ | |
{"role": "user", "content": f"{prompt}\n\nFile Content:\n{file_content}"} | |
] | |
# Call Hugging Face API | |
bot_response = client.chat_completions.create( | |
model="Qwen/Qwen2.5-Coder-32B-Instruct", | |
messages=messages, | |
max_tokens=500 | |
) | |
# Collect and return the bot's response | |
return bot_response.choices[0].message.content | |
except Exception as e: | |
return f"Error: {str(e)}" | |
# Gradio Interface | |
with gr.Blocks() as app: | |
gr.Markdown("# π AI File Chat with Hugging Face π") | |
gr.Markdown("Upload any file and ask the AI a question based on the file's content!") | |
with gr.Row(): | |
file_input = gr.File(label="Upload File") | |
prompt_input = gr.Textbox(label="Enter your question", placeholder="Ask something about the uploaded file...") | |
output = gr.Textbox(label="AI Response") | |
submit_button = gr.Button("Submit") | |
submit_button.click(get_bot_response, inputs=[file_input, prompt_input], outputs=output) | |
# Launch the Gradio app | |
if __name__ == "__main__": | |
app.launch() | |