File size: 3,176 Bytes
31f453f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97

import os
os.environ["GROQ_API_KEY"] = "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"

import os
import gradio as gr
import whisper
from gtts import gTTS
import io
from transformers import pipeline
from groq import Groq

# Initialize the Groq client
client = Groq(api_key=os.environ.get("GROQ_API_KEY"))

# Load the Whisper model
whisper_model = whisper.load_model("base")  # You can choose other models like "small", "medium", "large"

# Initialize the grammar correction pipeline
corrector = pipeline("text2text-generation", model="pszemraj/flan-t5-large-grammar-synthesis")

def process_audio(file_path):
    try:
        # Load the audio file
        audio = whisper.load_audio(file_path)

        # Transcribe the audio using Whisper
        result = whisper_model.transcribe(audio)
        user_text = result["text"]

        # Display the user input text
        corrected_text = corrector(user_text)[0]['generated_text'].strip()

        # Generate a response using Groq
        chat_completion = client.chat.completions.create(
            messages=[{"role": "user", "content": corrected_text}],
            model="llama3-8b-8192",  # Replace with the correct model if necessary
        )

        # Access the response using dot notation
        response_message = chat_completion.choices[0].message.content.strip()

        # Convert the response text to speech
        tts = gTTS(response_message)
        response_audio_io = io.BytesIO()
        tts.write_to_fp(response_audio_io)  # Save the audio to the BytesIO object
        response_audio_io.seek(0)

        # Save audio to a file to ensure it's generated correctly
        with open("response.mp3", "wb") as audio_file:
            audio_file.write(response_audio_io.getvalue())

        # Return the original text, corrected text, and the path to the saved audio file
        return user_text, corrected_text, "response.mp3"

    except Exception as e:
        return f"An error occurred: {e}", None, None

# Create a Gradio interface with a submit button
iface = gr.Interface(
    fn=process_audio,
    inputs=gr.Audio(type="filepath"),  # Use type="filepath"
    outputs=[
        gr.Textbox(label="User voice input into text"),  # Original user input text
        gr.Textbox(label="Corrected version of user input"),  # Corrected text
        gr.Audio(label="Response Audio")  # Response audio
    ],
    live=False,  # Ensure live mode is off to use a submit button
    title="Audio Processing with Grammar Correction",
    description="Upload an audio file, which will be transcribed, corrected for grammar, and then used to generate a response.",
    allow_flagging="never"
)

iface.launch()




#     except Exception as e:
#         return f"An error occurred: {e}", None, None

# iface = gr.Interface(
#     fn=process_audio,
#     inputs=gr.Audio(type="filepath"),  # Use type="filepath"
#     outputs=[
#         gr.Textbox(label="User voice input into text"),  # Original user input text
#         gr.Textbox(label="Corrected version of user input"),  # Corrected text
#         gr.Audio(label="Response Audio")  # Response audio
#     ],
#     live=True
# )

# iface.launch()