Redmind's picture
Create app.py
221417c verified
raw
history blame
2.9 kB
import gradio as gr
import openai
import deepgram_sdk
import assemblyai
from transformers import pipeline
# Initialize sentiment analysis model
sentiment_analyzer = pipeline("text-classification", model="Vasanth/tamil-sentiment-distilbert")
# Placeholder transcription functions - Replace with your actual API implementation
def whisper_openai_transcribe(audio_file):
# Replace with actual Whisper API transcription code
transcription = "This is a dummy transcription from Whisper OpenAI API"
return transcription
def deepgram_transcribe(audio_file):
# Replace with actual Deepgram API transcription code
transcription = "This is a dummy transcription from Deepgram API"
return transcription
def assemblyai_transcribe(audio_file):
# Replace with actual Assembly AI transcription code
transcription = "This is a dummy transcription from Assembly AI API"
return transcription
# Sentiment analysis function
def analyze_sentiment(text):
sentiment = sentiment_analyzer(text)
return sentiment[0]['label'], sentiment[0]['score']
# Main function to process audio and sentiment analysis
def process_transcription_and_sentiment(audio_file, model_choice):
# Transcription
if model_choice == "Whisper OpenAI":
transcription = whisper_openai_transcribe(audio_file)
elif model_choice == "Deepgram API":
transcription = deepgram_transcribe(audio_file)
elif model_choice == "Assembly AI API":
transcription = assemblyai_transcribe(audio_file)
# Sentiment analysis
sentiment_label, sentiment_score = analyze_sentiment(transcription)
return transcription, f"Sentiment: {sentiment_label} with score {sentiment_score}"
# Gradio interface setup
def create_interface():
with gr.Blocks() as demo:
gr.Markdown("### Audio Transcription and Sentiment Analysis")
with gr.Row():
audio_input = gr.Audio(source="upload", type="file", label="Upload Audio File")
model_choice = gr.Dropdown(
choices=["Whisper OpenAI", "Deepgram API", "Assembly AI API"],
label="Choose Transcription Model",
value="Whisper OpenAI"
)
# Textboxes for transcription and sentiment analysis
transcription_output = gr.Textbox(label="Transcription", lines=5)
sentiment_output = gr.Textbox(label="Sentiment Analysis", lines=5)
# Submit button
submit_button = gr.Button("Process")
# When the button is clicked, call the `process_transcription_and_sentiment` function
submit_button.click(process_transcription_and_sentiment,
inputs=[audio_input, model_choice],
outputs=[transcription_output, sentiment_output])
demo.launch()
if __name__ == "__main__":
create_interface()