Spaces:
Running
Running
example initial prompt
Browse files
app.py
CHANGED
@@ -239,6 +239,7 @@ if "audio" in locals():
|
|
239 |
pipeline = Pipeline.from_pretrained(
|
240 |
"pyannote/speaker-diarization-3.0", use_auth_token=hf_api_key)
|
241 |
if torch.cuda.device_count() > 0: # use gpu if available
|
|
|
242 |
pipeline.to(torch.device('cuda'))
|
243 |
|
244 |
# run the pipeline on an audio file
|
@@ -371,6 +372,7 @@ if "audio" in locals():
|
|
371 |
return initial_response['choices'][0]['message']['content']
|
372 |
|
373 |
# Chat container
|
|
|
374 |
with container_transcript_chat:
|
375 |
# get a summary of transcript from ChatGpt
|
376 |
try:
|
|
|
239 |
pipeline = Pipeline.from_pretrained(
|
240 |
"pyannote/speaker-diarization-3.0", use_auth_token=hf_api_key)
|
241 |
if torch.cuda.device_count() > 0: # use gpu if available
|
242 |
+
st.write('Using cuda - GPU')
|
243 |
pipeline.to(torch.device('cuda'))
|
244 |
|
245 |
# run the pipeline on an audio file
|
|
|
372 |
return initial_response['choices'][0]['message']['content']
|
373 |
|
374 |
# Chat container
|
375 |
+
st.session_state.messages[1]['content'] = st.session_state.messages[1]['content'].format(transcript_string)
|
376 |
with container_transcript_chat:
|
377 |
# get a summary of transcript from ChatGpt
|
378 |
try:
|