|
import requests |
|
import time |
|
import threading |
|
import gradio as gr |
|
from transformers import pipeline |
|
|
|
|
|
model = pipeline("automatic-speech-recognition", model="openai/whisper-base") |
|
|
|
def get_public_ip(): |
|
response = requests.get('https://api.ipify.org?format=json') |
|
return response.json()["ip"] |
|
|
|
def simulate_interaction(): |
|
|
|
print("Simulating interaction...") |
|
|
|
def transcribe_audio(audio): |
|
transcription = model(audio)["text"] |
|
return transcription |
|
|
|
def background_interaction(): |
|
"""Placeholder for background interaction""" |
|
while True: |
|
simulate_interaction() |
|
time.sleep(60 * 5) |
|
|
|
|
|
iface = gr.Interface( |
|
[get_public_ip, transcribe_audio], |
|
[gr.inputs.Textbox(lines=1, placeholder="Public IP will appear here"), gr.inputs.Audio(source="microphone", type="filepath")], |
|
[gr.outputs.Textbox(), gr.outputs.Textbox()], |
|
title="Public IP Retriever & Whisper Transcription", |
|
description="Get your approximate public IP and transcribe audio using Whisper." |
|
) |
|
|
|
|
|
interaction_thread = threading.Thread(target=background_interaction) |
|
interaction_thread.start() |
|
|
|
|
|
iface.launch() |
|
|