Nepjune commited on
Commit
eaa3654
·
verified ·
1 Parent(s): d8025d1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -1,15 +1,15 @@
1
- from transformers import pipeline, BlipForConditionalGeneration, BlipProcessor, AutoTokenizer, AutoModelForSeq2SeqLM
2
  import torchaudio
3
  from torchaudio.transforms import Resample
4
  import torch
5
  import gradio as gr
6
 
7
  # Initialize TTS model from Hugging Face
8
- tts_model_name = "suno/bark"
9
  tts = pipeline(task="text-to-speech", model=tts_model_name)
10
 
11
  # Initialize Blip model for image captioning
12
- model_id = "dblasko/blip-dalle3-img2prompt"
13
  blip_model = BlipForConditionalGeneration.from_pretrained(model_id)
14
  blip_processor = BlipProcessor.from_pretrained(model_id)
15
 
 
1
+ from transformers import pipeline, BlipForConditionalGeneration, BlipProcessor
2
  import torchaudio
3
  from torchaudio.transforms import Resample
4
  import torch
5
  import gradio as gr
6
 
7
  # Initialize TTS model from Hugging Face
8
+ tts_model_name = "Kamonwan/blip-image-captioning-new"
9
  tts = pipeline(task="text-to-speech", model=tts_model_name)
10
 
11
  # Initialize Blip model for image captioning
12
+ model_id = "Kamonwan/blip-image-captioning-new"
13
  blip_model = BlipForConditionalGeneration.from_pretrained(model_id)
14
  blip_processor = BlipProcessor.from_pretrained(model_id)
15