Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,15 +1,15 @@
|
|
1 |
-
from transformers import pipeline, BlipForConditionalGeneration, BlipProcessor
|
2 |
import torchaudio
|
3 |
from torchaudio.transforms import Resample
|
4 |
import torch
|
5 |
import gradio as gr
|
6 |
|
7 |
# Initialize TTS model from Hugging Face
|
8 |
-
tts_model_name = "
|
9 |
tts = pipeline(task="text-to-speech", model=tts_model_name)
|
10 |
|
11 |
# Initialize Blip model for image captioning
|
12 |
-
model_id = "
|
13 |
blip_model = BlipForConditionalGeneration.from_pretrained(model_id)
|
14 |
blip_processor = BlipProcessor.from_pretrained(model_id)
|
15 |
|
|
|
1 |
+
from transformers import pipeline, BlipForConditionalGeneration, BlipProcessor
|
2 |
import torchaudio
|
3 |
from torchaudio.transforms import Resample
|
4 |
import torch
|
5 |
import gradio as gr
|
6 |
|
7 |
# Initialize TTS model from Hugging Face
|
8 |
+
tts_model_name = "Kamonwan/blip-image-captioning-new"
|
9 |
tts = pipeline(task="text-to-speech", model=tts_model_name)
|
10 |
|
11 |
# Initialize Blip model for image captioning
|
12 |
+
model_id = "Kamonwan/blip-image-captioning-new"
|
13 |
blip_model = BlipForConditionalGeneration.from_pretrained(model_id)
|
14 |
blip_processor = BlipProcessor.from_pretrained(model_id)
|
15 |
|