Spaces:
Sleeping
Sleeping
Brunwo
commited on
Commit
·
30933b5
1
Parent(s):
7071579
default to eng, working separate js UI
Browse files- app.py +7 -6
- index.html +10 -1
- script.js +66 -27
- testtranslations.py +62 -0
app.py
CHANGED
@@ -71,7 +71,7 @@ def read_readme():
|
|
71 |
|
72 |
|
73 |
# Initialize _ with a default function
|
74 |
-
_ =
|
75 |
|
76 |
def update_language(lang):
|
77 |
|
@@ -98,7 +98,7 @@ class InstructionTemplate(BaseModel):
|
|
98 |
# Define the instruction templates
|
99 |
INSTRUCTION_TEMPLATES = {
|
100 |
"podcast": InstructionTemplate(
|
101 |
-
intro=_("podcast.intro"),
|
102 |
text_instructions=_("podcast.text_instructions"),
|
103 |
scratch_pad=_("podcast.scratch_pad"),
|
104 |
prelude=_("podcast.prelude"),
|
@@ -269,11 +269,11 @@ def generate_audio(
|
|
269 |
if not os.getenv("OPENAI_API_KEY") and not openai_api_key:
|
270 |
raise gr.Error("OpenAI API key is required")
|
271 |
|
272 |
-
combined_text = original_text or ""
|
273 |
|
274 |
-
# If there's no original text, fetch it from the provided URL
|
275 |
-
if not combined_text:
|
276 |
-
|
277 |
|
278 |
# Configure the LLM based on selected model and api_base
|
279 |
@retry(retry=retry_if_exception_type(ValidationError))
|
@@ -314,6 +314,7 @@ def generate_audio(
|
|
314 |
if debug:
|
315 |
logger.info (edited_transcript_processed)
|
316 |
logger.info (user_feedback_processed)
|
|
|
317 |
|
318 |
# Generate the dialogue using the LLM
|
319 |
llm_output = generate_dialogue(
|
|
|
71 |
|
72 |
|
73 |
# Initialize _ with a default function
|
74 |
+
_ = setup_translation('en')
|
75 |
|
76 |
def update_language(lang):
|
77 |
|
|
|
98 |
# Define the instruction templates
|
99 |
INSTRUCTION_TEMPLATES = {
|
100 |
"podcast": InstructionTemplate(
|
101 |
+
intro= _("podcast.intro"),
|
102 |
text_instructions=_("podcast.text_instructions"),
|
103 |
scratch_pad=_("podcast.scratch_pad"),
|
104 |
prelude=_("podcast.prelude"),
|
|
|
269 |
if not os.getenv("OPENAI_API_KEY") and not openai_api_key:
|
270 |
raise gr.Error("OpenAI API key is required")
|
271 |
|
272 |
+
# combined_text = original_text or ""
|
273 |
|
274 |
+
# # If there's no original text, fetch it from the provided URL
|
275 |
+
# if not combined_text:
|
276 |
+
combined_text = get_text_from_url(url)
|
277 |
|
278 |
# Configure the LLM based on selected model and api_base
|
279 |
@retry(retry=retry_if_exception_type(ValidationError))
|
|
|
314 |
if debug:
|
315 |
logger.info (edited_transcript_processed)
|
316 |
logger.info (user_feedback_processed)
|
317 |
+
logger.info (combined_text)
|
318 |
|
319 |
# Generate the dialogue using the LLM
|
320 |
llm_output = generate_dialogue(
|
index.html
CHANGED
@@ -13,9 +13,18 @@
|
|
13 |
<h1>web to podcast player</h1>
|
14 |
<audio id="player" controls></audio>
|
15 |
|
|
|
|
|
|
|
16 |
<!-- Add settings button -->
|
17 |
<button id="settingsBtn">Settings</button>
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
<!-- Add settings modal -->
|
20 |
<div id="settingsModal" class="modal">
|
21 |
<div class="modal-content">
|
@@ -31,7 +40,7 @@
|
|
31 |
<button id="saveSettings">Save</button>
|
32 |
</div>
|
33 |
</div>
|
34 |
-
|
35 |
|
36 |
<script type="module" src="script.js"></script>
|
37 |
<!-- <script src="/script.js"></script> -->
|
|
|
13 |
<h1>web to podcast player</h1>
|
14 |
<audio id="player" controls></audio>
|
15 |
|
16 |
+
<!-- Add play button -->
|
17 |
+
<button id="playButton" style="display: none;">Play</button>
|
18 |
+
|
19 |
<!-- Add settings button -->
|
20 |
<button id="settingsBtn">Settings</button>
|
21 |
|
22 |
+
<!-- Add transcription container -->
|
23 |
+
<div id="transcriptionContainer" style="display: none;">
|
24 |
+
<h2>Transcription:</h2>
|
25 |
+
<p id="transcription"></p>
|
26 |
+
</div>
|
27 |
+
|
28 |
<!-- Add settings modal -->
|
29 |
<div id="settingsModal" class="modal">
|
30 |
<div class="modal-content">
|
|
|
40 |
<button id="saveSettings">Save</button>
|
41 |
</div>
|
42 |
</div>
|
43 |
+
<div id="loadingIndicator" style="display: none;">Loading audio...</div>
|
44 |
|
45 |
<script type="module" src="script.js"></script>
|
46 |
<!-- <script src="/script.js"></script> -->
|
script.js
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import { Client } from "@gradio/client";
|
2 |
|
|
|
3 |
document.addEventListener("DOMContentLoaded", function() {
|
4 |
const audioPlayer = document.getElementById('player');
|
5 |
const settingsBtn = document.getElementById('settingsBtn');
|
@@ -89,6 +90,15 @@ document.addEventListener("DOMContentLoaded", function() {
|
|
89 |
// Function to fetch MP3 from API endpoint when a link is shared
|
90 |
async function fetchMp3(link) {
|
91 |
console.log('Starting fetchMp3 function with link:', link);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
try {
|
93 |
const apiKey = localStorage.getItem('openaiApiKey');
|
94 |
const apiServer = localStorage.getItem('apiServer');
|
@@ -113,7 +123,7 @@ document.addEventListener("DOMContentLoaded", function() {
|
|
113 |
console.log('Gradio client created successfully');
|
114 |
|
115 |
console.log(await client.view_api())
|
116 |
-
|
117 |
console.log('Preparing to make prediction...');
|
118 |
// Make the prediction
|
119 |
|
@@ -131,26 +141,68 @@ document.addEventListener("DOMContentLoaded", function() {
|
|
131 |
// debug: true,
|
132 |
});
|
133 |
|
134 |
-
|
|
|
|
|
135 |
|
136 |
|
137 |
-
|
138 |
|
139 |
-
|
140 |
-
|
141 |
-
|
|
|
142 |
|
143 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
audioPlayer.src = audioFileUrl;
|
145 |
-
|
146 |
-
|
147 |
-
}
|
148 |
-
|
149 |
-
|
150 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
}
|
153 |
-
|
|
|
154 |
// Get the link from the shared URL
|
155 |
const queryParams = new URLSearchParams(window.location.search);
|
156 |
const sharedLink = queryParams.get('url');
|
@@ -194,16 +246,3 @@ if ('mediaSession' in navigator) {
|
|
194 |
audioPlayer.currentTime = Math.min(audioPlayer.currentTime + 10, audioPlayer.duration);
|
195 |
});
|
196 |
}
|
197 |
-
|
198 |
-
// JavaScript code to read URL parameters
|
199 |
-
const urlParams = new URLSearchParams(window.location.search);
|
200 |
-
|
201 |
-
// Retrieve specific parameters
|
202 |
-
const name = urlParams.get('name'); // "John"
|
203 |
-
const age = urlParams.get('age'); // "30"
|
204 |
-
|
205 |
-
// Display the parameters in the output div
|
206 |
-
document.getElementById('shared-content').innerHTML = `
|
207 |
-
<p>Name: ${name}</p>
|
208 |
-
<p>Age: ${age}</p>
|
209 |
-
`;
|
|
|
1 |
import { Client } from "@gradio/client";
|
2 |
|
3 |
+
|
4 |
document.addEventListener("DOMContentLoaded", function() {
|
5 |
const audioPlayer = document.getElementById('player');
|
6 |
const settingsBtn = document.getElementById('settingsBtn');
|
|
|
90 |
// Function to fetch MP3 from API endpoint when a link is shared
|
91 |
async function fetchMp3(link) {
|
92 |
console.log('Starting fetchMp3 function with link:', link);
|
93 |
+
const loadingIndicator = document.getElementById('loadingIndicator');
|
94 |
+
const audioPlayer = document.getElementById('player');
|
95 |
+
const playButton = document.getElementById('playButton');
|
96 |
+
const transcriptionContainer = document.getElementById('transcriptionContainer');
|
97 |
+
const transcriptionElement = document.getElementById('transcription');
|
98 |
+
|
99 |
+
if (loadingIndicator) loadingIndicator.style.display = 'block';
|
100 |
+
if (transcriptionContainer) transcriptionContainer.style.display = 'none';
|
101 |
+
|
102 |
try {
|
103 |
const apiKey = localStorage.getItem('openaiApiKey');
|
104 |
const apiServer = localStorage.getItem('apiServer');
|
|
|
123 |
console.log('Gradio client created successfully');
|
124 |
|
125 |
console.log(await client.view_api())
|
126 |
+
|
127 |
console.log('Preparing to make prediction...');
|
128 |
// Make the prediction
|
129 |
|
|
|
141 |
// debug: true,
|
142 |
});
|
143 |
|
144 |
+
|
145 |
+
console.log('Raw result from predict:', result);
|
146 |
+
console.log('Result data:', result.data);
|
147 |
|
148 |
|
149 |
+
console.log('Prediction made successfully');
|
150 |
|
151 |
+
// Check if result.data is an array and has at least one element
|
152 |
+
if (!Array.isArray(result.data) || result.data.length === 0) {
|
153 |
+
throw new Error('Unexpected result format from server');
|
154 |
+
}
|
155 |
|
156 |
+
// Assuming the audio file URL is the second item in the result
|
157 |
+
const audioFileUrl = result.data[0].url;
|
158 |
+
console.log('Received audio file URL:', audioFileUrl);
|
159 |
+
|
160 |
+
// Check if the URL is valid
|
161 |
+
if (typeof audioFileUrl !== 'string' || !audioFileUrl.startsWith('http')) {
|
162 |
+
throw new Error(`Invalid audio file URL received: ${audioFileUrl}`);
|
163 |
+
}
|
164 |
+
|
165 |
+
// Set the audio player source
|
166 |
+
if (audioPlayer) {
|
167 |
audioPlayer.src = audioFileUrl;
|
168 |
+
} else {
|
169 |
+
throw new Error('Audio player element not found');
|
170 |
+
}
|
171 |
+
|
172 |
+
// Show play button
|
173 |
+
if (playButton) {
|
174 |
+
playButton.style.display = 'block';
|
175 |
+
playButton.onclick = () => audioPlayer.play();
|
176 |
+
} else {
|
177 |
+
console.warn('Play button not found, audio controls will be used instead');
|
178 |
+
}
|
179 |
+
|
180 |
+
// Display the transcription
|
181 |
+
if (transcriptionElement && transcriptionContainer) {
|
182 |
+
const transcription = result.data[1];
|
183 |
+
transcriptionElement.textContent = transcription;
|
184 |
+
transcriptionContainer.style.display = 'block';
|
185 |
+
} else {
|
186 |
+
console.warn('Transcription elements not found');
|
187 |
}
|
188 |
+
|
189 |
+
console.log('Audio ready for playback and transcription displayed');
|
190 |
+
|
191 |
+
} catch (error) {
|
192 |
+
console.error('Error in fetchMp3:', error);
|
193 |
+
console.error('Error stack:', error.stack);
|
194 |
+
alert(`Error fetching MP3: ${error.message}`);
|
195 |
+
|
196 |
+
// Clear the audio player source and hide the play button
|
197 |
+
if (audioPlayer) audioPlayer.src = '';
|
198 |
+
if (playButton) playButton.style.display = 'none';
|
199 |
+
if (transcriptionContainer) transcriptionContainer.style.display = 'none';
|
200 |
+
} finally {
|
201 |
+
if (loadingIndicator)
|
202 |
+
loadingIndicator.style.display = 'none';
|
203 |
}
|
204 |
+
}
|
205 |
+
|
206 |
// Get the link from the shared URL
|
207 |
const queryParams = new URLSearchParams(window.location.search);
|
208 |
const sharedLink = queryParams.get('url');
|
|
|
246 |
audioPlayer.currentTime = Math.min(audioPlayer.currentTime + 10, audioPlayer.duration);
|
247 |
});
|
248 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
testtranslations.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gettext
|
2 |
+
import os
|
3 |
+
|
4 |
+
# Setup gettext
|
5 |
+
def setup_translation(lang_code):
|
6 |
+
# The translation domain (like an app-specific identifier)
|
7 |
+
locale_path = os.path.join(os.path.dirname(__file__), 'locales')
|
8 |
+
translation = gettext.translation('messages', localedir=locale_path, languages=[lang_code])
|
9 |
+
translation.install()
|
10 |
+
return translation.gettext # Return the translation function '_'
|
11 |
+
|
12 |
+
# Function to greet the world in different languages
|
13 |
+
def greet():
|
14 |
+
print(_("Hello, World!"))
|
15 |
+
|
16 |
+
if __name__ == "__main__":
|
17 |
+
# Select language dynamically
|
18 |
+
lang = input("Choose a language (en, fr, es): ")
|
19 |
+
|
20 |
+
if lang == 'fr':
|
21 |
+
_ = setup_translation('fr')
|
22 |
+
elif lang == 'es':
|
23 |
+
_ = setup_translation('es')
|
24 |
+
else:
|
25 |
+
_ = lambda s: s # Default fallback, no translation
|
26 |
+
|
27 |
+
# Greet in the selected language
|
28 |
+
greet()
|
29 |
+
|
30 |
+
|
31 |
+
from gradio_client import Client
|
32 |
+
|
33 |
+
client = Client("http://127.0.0.1:7860/")
|
34 |
+
result = client.predict(
|
35 |
+
param_0="Hello!!",
|
36 |
+
param_1="Hello!!",
|
37 |
+
param_2="o1-preview-2024-09-12",
|
38 |
+
param_3="tts-1",
|
39 |
+
param_4="alloy",
|
40 |
+
param_5="echo",
|
41 |
+
param_6="Hello!!",
|
42 |
+
param_7="Your task is to take the input text provided and turn it into an lively, engaging, informative podcast dialogue, in the style of NPR. The input text may be messy or unstructured, as it could come from a variety of sources like PDFs or web pages.
|
43 |
+
Don't worry about the formatting issues or any irrelevant information; your goal is to extract the key points, identify definitions, and interesting facts that could be discussed in a podcast.
|
44 |
+
Define all terms used carefully for a broad audience of listeners.",
|
45 |
+
param_8="First, carefully read through the input text and identify the main topics, key points, and any interesting facts or anecdotes. Think about how you could present this information in a fun, engaging way that would be suitable for a high quality presentation.",
|
46 |
+
param_9="Brainstorm creative ways to discuss the main topics and key points you identified in the input text. Consider using analogies, examples, storytelling techniques, or hypothetical scenarios to make the content more relatable and engaging for listeners.
|
47 |
+
Keep in mind that your podcast should be accessible to a general audience, so avoid using too much jargon or assuming prior knowledge of the topic. If necessary, think of ways to briefly explain any complex concepts in simple terms.
|
48 |
+
Use your imagination to fill in any gaps in the input text or to come up with thought-provoking questions that could be explored in the podcast. The goal is to create an informative and entertaining dialogue, so feel free to be creative in your approach.
|
49 |
+
Define all terms used clearly and spend effort to explain the background.
|
50 |
+
Write your brainstorming ideas and a rough outline for the podcast dialogue here. Be sure to note the key insights and takeaways you want to reiterate at the end.
|
51 |
+
Make sure to make it fun and exciting. ",
|
52 |
+
param_10="Now that you have brainstormed ideas and created a rough outline, it's time to write the actual podcast dialogue. Aim for a natural, conversational flow between the host and any guest speakers. Incorporate the best ideas from your brainstorming session and make sure to explain any complex topics in an easy-to-understand way.",
|
53 |
+
param_11="Write a very long, engaging, informative podcast dialogue here, based on the key points and creative ideas you came up with during the brainstorming session. Use a conversational tone and include any necessary context or explanations to make the content accessible to a general audience.
|
54 |
+
Never use made-up names for the hosts and guests, but make it an engaging and immersive experience for listeners. Do not include any bracketed placeholders like [Host] or [Guest]. Design your output to be read aloud -- it will be directly converted into audio.
|
55 |
+
Make the dialogue as long and detailed as possible, while still staying on topic and maintaining an engaging flow. Aim to use your full output capacity to create the longest podcast episode you can, while still communicating the key information from the input text in an entertaining way.
|
56 |
+
At the end of the dialogue, have the host and guest speakers naturally summarize the main insights and takeaways from their discussion. This should flow organically from the conversation, reiterating the key points in a casual, conversational manner. Avoid making it sound like an obvious recap - the goal is to reinforce the central ideas one last time before signing off.
|
57 |
+
The podcast should have around 20000 words.",
|
58 |
+
param_12="Hello!!",
|
59 |
+
param_13="Hello!!",
|
60 |
+
api_name="/validate_and_generate_audio"
|
61 |
+
)
|
62 |
+
print(result)
|