|
import streamlit as st |
|
from transformers import BlipProcessor, BlipForConditionalGeneration |
|
from PIL import Image |
|
from gtts import gTTS |
|
import tempfile |
|
import os |
|
|
|
|
|
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large") |
|
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large") |
|
|
|
st.title("图像描述生成器") |
|
st.write("使用摄像头拍照并生成图像的描述。") |
|
|
|
|
|
image_data = st.camera_input("请使用摄像头拍照") |
|
|
|
if image_data is not None: |
|
|
|
image = Image.open(image_data) |
|
|
|
|
|
st.image(image, caption="拍摄的图像", use_column_width=True) |
|
|
|
|
|
inputs = processor(image, return_tensors="pt") |
|
out = model.generate(**inputs) |
|
caption = processor.decode(out[0], skip_special_tokens=True) |
|
|
|
st.write(f"图像描述: {caption}") |
|
|
|
|
|
tts = gTTS(text=caption, lang='en') |
|
|
|
|
|
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as fp: |
|
tts.save(fp.name) |
|
audio_file = fp.name |
|
|
|
|
|
st.audio(audio_file) |
|
|
|
|
|
os.remove(audio_file) |
|
|