jsonvid / app.py
sheikhed's picture
Update app.py
a1f4e92 verified
import os
import requests
import json
import time
import subprocess
import gradio as gr
import uuid
from dotenv import load_dotenv
from urllib.parse import urlparse
# Load environment variables
load_dotenv()
# API Keys
A_KEY = os.getenv("A_KEY")
B_KEY = os.getenv("B_KEY")
# URLs
API_URL = os.getenv("API_URL")
UPLOAD_URL = os.getenv("UPLOAD_URL")
def get_voices():
url = "https://api.elevenlabs.io/v1/voices"
headers = {
"Accept": "application/json",
"xi-api-key": A_KEY
}
response = requests.get(url, headers=headers)
if response.status_code != 200:
return []
return [(voice['name'], voice['voice_id']) for voice in response.json().get('voices', [])]
def text_to_speech(voice_id, text, session_id):
url = f"https://api.elevenlabs.io/v1/text-to-speech/{voice_id}"
headers = {
"Accept": "audio/mpeg",
"Content-Type": "application/json",
"xi-api-key": A_KEY
}
data = {
"text": text,
"model_id": "eleven_turbo_v2_5",
"voice_settings": {
"stability": 0.5,
"similarity_boost": 0.5
}
}
response = requests.post(url, json=data, headers=headers)
if response.status_code != 200:
return None
# Save temporary audio file with session ID
audio_file_path = f'temp_voice_{session_id}.mp3'
with open(audio_file_path, 'wb') as audio_file:
audio_file.write(response.content)
return audio_file_path
def upload_file(file_path):
with open(file_path, 'rb') as file:
files = {'fileToUpload': (os.path.basename(file_path), file)}
data = {'reqtype': 'fileupload'}
response = requests.post(UPLOAD_URL, files=files, data=data)
if response.status_code == 200:
return response.text.strip()
return None
def lipsync_api_call(video_url, audio_url):
headers = {
"Content-Type": "application/json",
"x-api-key": B_KEY
}
data = {
"audioUrl": audio_url,
"videoUrl": video_url,
"maxCredits": 1000,
"model": "sync-1.7.1-beta",
"synergize": True,
"pads": [0, 5, 0, 0],
"synergizerStrength": 1
}
response = requests.post(API_URL, headers=headers, data=json.dumps(data))
return response.json()
def check_job_status(job_id):
headers = {"x-api-key": B_KEY}
max_attempts = 30 # Limit the number of attempts
for _ in range(max_attempts):
response = requests.get(f"{API_URL}/{job_id}", headers=headers)
data = response.json()
if data["status"] == "COMPLETED":
return data["videoUrl"]
elif data["status"] == "FAILED":
return None
time.sleep(10)
return None
def get_media_duration(file_path):
# Fetch media duration using ffprobe
cmd = ['ffprobe', '-v', 'error', '-show_entries', 'format=duration', '-of', 'default=noprint_wrappers=1:nokey=1', file_path]
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return float(result.stdout.strip())
def combine_audio_video(video_path, audio_path, output_path):
# Get durations of both video and audio
video_duration = get_media_duration(video_path)
audio_duration = get_media_duration(audio_path)
if video_duration > audio_duration:
# Trim video to match the audio length
cmd = [
'ffmpeg', '-i', video_path, '-i', audio_path,
'-t', str(audio_duration), # Trim video to audio duration
'-map', '0:v', '-map', '1:a',
'-c:v', 'copy', '-c:a', 'aac',
'-y', output_path
]
else:
# Loop video if it's shorter than audio
loop_count = int(audio_duration // video_duration) + 1 # Calculate how many times to loop
cmd = [
'ffmpeg', '-stream_loop', str(loop_count), '-i', video_path, '-i', audio_path,
'-t', str(audio_duration), # Match the duration of the final video with the audio
'-map', '0:v', '-map', '1:a',
'-c:v', '-c:a', 'aac',
'-shortest', '-y', output_path
]
subprocess.run(cmd, check=True)
def is_image_url(url):
parsed = urlparse(url)
path = parsed.path.lower()
return path.endswith(('.png', '.jpg', '.jpeg', '.gif', '.bmp', '.tiff', '.webp', '.heic', '.svg', '.ico'))
def create_video_from_image(image_url, output_path, duration=10):
# Download the image
response = requests.get(image_url)
if response.status_code != 200:
raise Exception("Failed to download the image")
temp_image_path = f"temp_image_{uuid.uuid4()}.jpg"
with open(temp_image_path, 'wb') as f:
f.write(response.content)
# Create a 10-second video from the image
cmd = [
'ffmpeg', '-loop', '1', '-i', temp_image_path,
'-c:v', 'libx264', '-t', str(duration), '-pix_fmt', 'yuv420p',
'-vf', 'scale=trunc(iw/2)*2:trunc(ih/2)*2',
'-y', output_path
]
subprocess.run(cmd, check=True)
# Clean up the temporary image file
os.remove(temp_image_path)
return output_path
def process_video(voice, media_url, text, progress=gr.Progress()):
session_id = str(uuid.uuid4())
progress(0, desc="Generating speech...")
audio_path = text_to_speech(voice, text, session_id)
if not audio_path:
return None, "Failed to generate speech audio."
progress(0.2, desc="Processing media...")
try:
if is_image_url(media_url):
progress(0.3, desc="Converting image to video...")
video_path = f"temp_video_{session_id}.mp4"
create_video_from_image(media_url, video_path)
progress(0.4, desc="Uploading converted video...")
video_url = upload_file(video_path)
if not video_url:
raise Exception("Failed to upload converted video")
else:
video_url = media_url
progress(0.5, desc="Uploading audio...")
audio_url = upload_file(audio_path)
if not audio_url:
raise Exception("Failed to upload audio file")
progress(0.6, desc="Initiating lipsync...")
job_data = lipsync_api_call(video_url, audio_url)
if "error" in job_data or "message" in job_data:
raise Exception(job_data.get("error", job_data.get("message", "Unknown error")))
job_id = job_data["id"]
progress(0.7, desc="Processing lipsync...")
result_url = check_job_status(job_id)
if result_url:
progress(0.9, desc="Downloading result...")
response = requests.get(result_url)
output_path = f"output_{session_id}.mp4"
with open(output_path, "wb") as f:
f.write(response.content)
progress(1.0, desc="Complete!")
return output_path, "Lipsync completed successfully!"
else:
raise Exception("Lipsync processing failed or timed out")
except Exception as e:
progress(0.8, desc="Falling back to simple combination...")
try:
if 'video_path' not in locals():
# Download the video from the URL if it wasn't created from an image
video_response = requests.get(video_url)
video_path = f"temp_video_{session_id}.mp4"
with open(video_path, "wb") as f:
f.write(video_response.content)
output_path = f"output_{session_id}.mp4"
combine_audio_video(video_path, audio_path, output_path)
progress(1.0, desc="Complete!")
return output_path, f"Used fallback method. Original error: {str(e)}"
except Exception as fallback_error:
return None, f"All methods failed. Error: {str(fallback_error)}"
finally:
# Cleanup
if os.path.exists(audio_path):
os.remove(audio_path)
if os.path.exists(f"temp_video_{session_id}.mp4"):
os.remove(f"temp_video_{session_id}.mp4")
def create_interface():
voices = get_voices()
css = """
#component-0 > :not(.prose) {display: none !important;}
footer {display: none !important;}
"""
with gr.Blocks(css=css) as app:
gr.Markdown("# JSON Train")
with gr.Row():
with gr.Column():
voice_dropdown = gr.Dropdown(choices=[v[0] for v in voices], label="Select Voice", value=voices[0][0] if voices else None)
media_url_input = gr.Textbox(label="Enter Video or Image URL")
text_input = gr.Textbox(label="Enter text", lines=3)
generate_btn = gr.Button("Generate Video")
with gr.Column():
video_output = gr.Video(label="Generated Video")
status_output = gr.Textbox(label="Status", interactive=False)
def on_generate(voice_name, media_url, text):
voice_id = next((v[1] for v in voices if v[0] == voice_name), None)
if not voice_id:
return None, "Invalid voice selected."
return process_video(voice_id, media_url, text)
generate_btn.click(
fn=on_generate,
inputs=[voice_dropdown, media_url_input, text_input],
outputs=[video_output, status_output]
)
return app
if __name__ == "__main__":
app = create_interface()
app.launch()