legusxyz commited on
Commit
d4396b8
·
verified ·
1 Parent(s): 1ec6939

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -21
app.py CHANGED
@@ -12,29 +12,9 @@ app = FastAPI()
12
 
13
  # Load the Whisper model once during startup
14
  device = 0 if torch.cuda.is_available() else -1 # Use GPU if available, otherwise CPU
15
- # asr_pipeline = pipeline(model="openai/whisper-small", device=device) # Initialize Whisper model
16
  # asr_pipeline = pipeline( model="openai/whisper-small", device=device, language="pt")
17
 
18
- # Load model and processor
19
- model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small")
20
- processor = WhisperProcessor.from_pretrained("openai/whisper-small")
21
- tokenizer = processor.tokenizer # Explicitly extract the tokenizer from the processor
22
-
23
- # Set forced language to Portuguese (pt)
24
- forced_language_token_id = tokenizer.convert_tokens_to_ids("<|pt|>")
25
- model.config.forced_decoder_ids = [[2, forced_language_token_id]] # `2` refers to the decoder start token.
26
-
27
-
28
- # Initialize the pipeline
29
- # asr_pipeline = pipeline(task="automatic-speech-recognition", model=model, processor=processor, device=device)
30
- # Initialize the pipeline
31
- asr_pipeline = pipeline(
32
- task="automatic-speech-recognition",
33
- model=model,
34
- tokenizer=tokenizer, # Pass the tokenizer explicitly
35
- feature_extractor=processor.feature_extractor, # Pass the feature extractor explicitly
36
- device=device
37
- )
38
 
39
  # Basic GET endpoint
40
  @app.get("/")
 
12
 
13
  # Load the Whisper model once during startup
14
  device = 0 if torch.cuda.is_available() else -1 # Use GPU if available, otherwise CPU
15
+ asr_pipeline = pipeline(model="openai/whisper-tiny", device=device) # Initialize Whisper model
16
  # asr_pipeline = pipeline( model="openai/whisper-small", device=device, language="pt")
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
  # Basic GET endpoint
20
  @app.get("/")