import requests import time import threading import gradio as gr from transformers import pipeline # Load Whisper model (adjust size as needed) model = pipeline("automatic-speech-recognition", model="openai/whisper-base") def get_public_ip(): response = requests.get('https://api.ipify.org?format=json') return response.json()["ip"] def simulate_interaction(): # *** Placeholder for simulating activity *** print("Simulating interaction...") def transcribe_audio(audio): transcription = model(audio)["text"] return transcription def background_interaction(): """Placeholder for background interaction""" while True: simulate_interaction() time.sleep(60 * 5) # Gradio interface with IP display and speech-to-text iface = gr.Interface( [get_public_ip, transcribe_audio], [gr.inputs.Textbox(lines=1, placeholder="Public IP will appear here"), gr.inputs.Audio(source="microphone", type="filepath")], [gr.outputs.Textbox(), gr.outputs.Textbox()], title="Public IP Retriever & Whisper Transcription", description="Get your approximate public IP and transcribe audio using Whisper." ) # Start background interaction thread (optional) interaction_thread = threading.Thread(target=background_interaction) interaction_thread.start() # Launch the Gradio interface iface.launch()