QwenVL-7B-Demo / app.py
Kabilash10's picture
Create app.py
1b05cd7 verified
raw
history blame contribute delete
971 Bytes
import streamlit as st
import requests
from PIL import Image
import io
import os
API_URL = "https://api-inference.huggingface.co/models/Qwen/Qwen-VL"
headers = {
"Authorization": f"Bearer {os.getenv('secret')}"
}
st.title("OCR with Qwen-VL")
uploaded_image = st.file_uploader("Upload an image for OCR", type=["jpg", "jpeg", "png"])
if uploaded_image is not None:
image = Image.open(uploaded_image)
st.image(image, caption="Uploaded Image", use_column_width=True)
img_bytes = io.BytesIO()
image.save(img_bytes, format='PNG')
img_bytes = img_bytes.getvalue()
st.write("Processing the image...")
response = requests.post(API_URL, headers=headers, files={"file": img_bytes})
if response.status_code == 200:
result = response.json()
st.write("Extracted Text:")
st.text(result.get("generated_text", "No text found in the image"))
else:
st.write(f"Error: {response.status_code} - {response.text}")