Spaces:
Running
Running
File size: 2,692 Bytes
d5e59b9 d061bf7 d5e59b9 867d897 771138c 867d897 d5e59b9 867d897 771138c 867d897 771138c 867d897 771138c 867d897 771138c 867d897 771138c 867d897 771138c d061bf7 867d897 d061bf7 867d897 d061bf7 867d897 d061bf7 867d897 d5e59b9 867d897 d5e59b9 867d897 d5e59b9 867d897 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
import os
import json
import pandas as pd
from docx import Document
from PyPDF2 import PdfReader
from huggingface_hub import InferenceClient
import gradio as gr
# Retrieve Hugging Face API key from environment variable (secret)
API_KEY = os.getenv("APIHUGGING")
if not API_KEY:
raise ValueError("Hugging Face API key not found. Please set the 'APIHUGGING' secret.")
# Initialize Hugging Face Inference Client
client = InferenceClient(api_key=API_KEY)
# Function to extract text from various file types
def extract_file_content(file_path):
_, file_extension = os.path.splitext(file_path.name)
if file_extension.lower() in [".txt"]:
return file_path.read().decode("utf-8")
elif file_extension.lower() in [".csv"]:
df = pd.read_csv(file_path)
return df.to_string(index=False)
elif file_extension.lower() in [".json"]:
data = json.load(file_path)
return json.dumps(data, indent=4)
elif file_extension.lower() in [".pdf"]:
reader = PdfReader(file_path)
text = ""
for page in reader.pages:
text += page.extract_text()
return text
elif file_extension.lower() in [".docx"]:
doc = Document(file_path)
return "\n".join([para.text for para in doc.paragraphs])
else:
return "Unsupported file type."
# Function to interact with the Hugging Face model
def get_bot_response(file, prompt):
try:
# Extract content from the uploaded file
file_content = extract_file_content(file)
# Prepare conversation history
messages = [
{"role": "user", "content": f"{prompt}\n\nFile Content:\n{file_content}"}
]
# Call Hugging Face API
bot_response = client.chat_completions.create(
model="Qwen/Qwen2.5-Coder-32B-Instruct",
messages=messages,
max_tokens=500
)
# Collect and return the bot's response
return bot_response.choices[0].message.content
except Exception as e:
return f"Error: {str(e)}"
# Gradio Interface
with gr.Blocks() as app:
gr.Markdown("# π AI File Chat with Hugging Face π")
gr.Markdown("Upload any file and ask the AI a question based on the file's content!")
with gr.Row():
file_input = gr.File(label="Upload File")
prompt_input = gr.Textbox(label="Enter your question", placeholder="Ask something about the uploaded file...")
output = gr.Textbox(label="AI Response")
submit_button = gr.Button("Submit")
submit_button.click(get_bot_response, inputs=[file_input, prompt_input], outputs=output)
# Launch the Gradio app
if __name__ == "__main__":
app.launch()
|