Spaces:
Running
Running
kwabs22
commited on
Commit
•
a8a1961
1
Parent(s):
d7fc782
Links and other
Browse files
app.py
CHANGED
@@ -35,6 +35,8 @@ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
|
|
35 |
from flores200_codes import flores_codes #- figure this out after it works
|
36 |
import whisper
|
37 |
import tempfile
|
|
|
|
|
38 |
|
39 |
#When I forgot about the readme file ChatGPT suggested these - Leaving to remember the Readme.md must be updated as well
|
40 |
#print(gr.__version__)
|
@@ -2173,6 +2175,56 @@ def sentbuildcheck_answer(selected, correct):
|
|
2173 |
else:
|
2174 |
return "Incorrect. Please try again.", gr.update(interactive=True)
|
2175 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2176 |
#--------------------------------------------------------------------------------------------------------------------------------------------
|
2177 |
|
2178 |
with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', secondary_hue='red', neutral_hue='blue', )
|
@@ -2192,10 +2244,16 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
2192 |
audiogenspacebtn.click(display_website, inputs=linktoaudiogen, outputs=audiogenspace)
|
2193 |
with gr.Accordion("Image Gen or Animation HF Spaces/Sites (Click Here to Open) - Use with the image placeholder in Workflows tab", open=False):
|
2194 |
with gr.Row():
|
2195 |
-
linktoimagegen = gr.Dropdown(choices=["https://gparmar-img2img-turbo-sketch.hf.space", "https://kadirnar-open-sora.hf.space", "https://bytedance-animatediff-lightning.hf.space", "https://radames-real-time-text-to-image-sdxl-lightning.hf.space", "https://cagliostrolab-animagine-xl-3-1.hf.space", "https://wangfuyun-animatelcm-svd.hf.space" "https://modelscope-transferanything.hf.space", "https://visionmaze-magic-me.hf.space", "https://wangfuyun-animatelcm.hf.space", "https://artgan-diffusion-api.hf.space", "https://multimodalart-stable-cascade.hf.space", "https://ap123-sdxl-lightning.hf.space", "https://google-sdxl.hf.space", "https://guoyww-animatediff.hf.space", "https://segmind-segmind-stable-diffusion.hf.space", "https://simianluo-latent-consistency-model.hf.space", "https://artificialguybr-studio-ghibli-lora-sdxl.hf.space", "https://artificialguybr-pixel-art-generator.hf.space", "https://fffiloni-sdxl-control-loras.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
|
2196 |
imagegenspacebtn = gr.Button("Use the chosen URL to load interface with a chat model")
|
2197 |
imagegenspace = gr.HTML("Chat Space Chosen will load here")
|
2198 |
imagegenspacebtn.click(display_website, inputs=linktoimagegen, outputs=imagegenspace)
|
|
|
|
|
|
|
|
|
|
|
|
|
2199 |
with gr.Accordion("Image Understanding/Vision Conversation HF Spaces/Sites (Click Here to Open)", open=False):
|
2200 |
with gr.Row():
|
2201 |
linktovisionund = gr.Dropdown(choices=["https://linfanluntan-grounded-sam.hf.space", "https://merve-llava-next.hf.space", "https://badayvedat-llava.hf.space", "https://otter-ai-otterhd-demo.hf.space", "https://adept-fuyu-8b-demo.hf.space", "https://xinyu1205-recognize-anything.hf.space", "https://languagebind-moe-llava.hf.space", "https://vision-cair-minigpt4.hf.space", "https://fffiloni-live-vision.hf.space", "https://ysharma-gemini-pro-vision-chat.hf.space", "https://kvikontent-chatgpt-vision.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
|
@@ -2204,7 +2262,7 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
2204 |
visionundspacebtn.click(display_website, inputs=linktovisionund, outputs=visionundspace)
|
2205 |
with gr.Accordion("LLM HF Spaces/Sites (Click Here to Open) - Use 'Acronym Map Creation Space' Tab with this - Ask for Translation of image tags made below, sentence to emojis, Wordlists, Test Conversations, Get Grammar Explanations etc., Can use GPT-4 or new SOTA to review the conversation", open=False):
|
2206 |
with gr.Row():
|
2207 |
-
linktochat = gr.Dropdown(choices=["https://sdk.vercel.ai/docs", "https://labs.perplexity.ai/", "https://chat.lmsys.org", "https://cyzgab-catch-me-if-you-can.hf.space", "https://databricks-dbrx-instruct.hf.space", "https://qwen-qwen1-5-moe-a2-7b-chat-demo.hf.space", "https://stabilityai-stablelm-2-1-6b-zephyr.hf.space", "https://qwen-qwen1-5-72b-chat.hf.space", "https://deepseek-ai-deepseek-coder-7b-instruct.hf.space", "https://01-ai-yi-34b-chat.hf.space", "https://ysharma-zephyr-playground.hf.space", "https://huggingfaceh4-zephyr-chat.hf.space", "https://osanseviero-mistral-super-fast.hf.space", "https://artificialguybr-qwen-14b-chat-demo.hf.space", "https://huggingface-projects-llama-2-7b-chat.hf.space", "https://ysharma-explore-llamav2-with-tgi.hf.space", "https://mosaicml-mpt-30b-chat.hf.space", "https://huggingfaceh4-falcon-chat.hf.space", "https://uwnlp-guanaco-playground-tgi.hf.space", "https://stabilityai-stablelm-tuned-alpha-chat.hf.space", "https://mosaicml-mpt-7b-storywriter.hf.space", "https://huggingfaceh4-starchat-playground.hf.space", "https://bigcode-bigcode-playground.hf.space", "https://mosaicml-mpt-7b-chat.hf.space", "https://huggingchat-chat-ui.hf.space", "https://togethercomputer-openchatkit.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
|
2208 |
chatspacebtn = gr.Button("Use the chosen URL to load interface with a chat model. For sdk.vercel click the chat button on the top left. For lymsys / chat arena copy the link and use a new tab")
|
2209 |
with gr.Accordion("Some prompt ideas", open=False):
|
2210 |
with gr.Accordion("Prompts in text (Manual copy paste)", open=False):
|
@@ -2221,7 +2279,8 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
2221 |
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
2222 |
with gr.Row():
|
2223 |
with gr.Column(scale=1):
|
2224 |
-
gr.HTML(""" <div style="height: 350px; width: 100%; border: 1px solid black; overflow: auto;"> Some useful links <br> <a href='https://github.com/eugeneyan/open-llms'> -- Opensource List -- </a> | <a href='https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard'> -- Open LLM Leaderboard -- </a> | <a href='https://openxlab.org.cn/apps'> -- Openxlabs - Huggingface Alternative -- </a> | <a href='https://huggingface.co/spaces/sanchit-gandhi/whisper-jax'> -- Whisper JAX -- </a> | <a href="https://translate.google.com/?hl=en&tab=TT"> -- Google Translate -- </a> | <a href='https://huggingface.co/spaces/damo-vilab/modelscope-text-to-video-synthesis'> -- Modelscope Text to Video -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion'> -- stable-diffusion 2 -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion-1'> -- stable-diffusion 1 -- </a> | <a href='https://huggingface.co/spaces/kakaobrain/karlo'> -- karlo 1 -- </a> | <a href='https://huggingface.co/spaces/suno/bark'> -- Bark (TTS) -- </a> | <a href='https://chat.lmsys.org/'> -- Offline Text Model Demos -- </a> | <a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a> | <a href='https://beta.elevenlabs.io/'> -- Eleven Labs -- </a> | <a href='https://www.d-id.com/'> -- Animate an Image -- </a> | <a href='https://voice.ai/'> -- Clone a voice -- </a> | <a href='https://openai.com/pricing'> -- OpenAI pricing -- </a> | <a href='https://huggingface.co/spaces/sohojoe/soho-clip-embeddings-explorer'> -- Image Training Data Search -- </a> | <a href='https://huggingface.co/spaces/huggingchat/chat-ui'> -- Huggingface Chat -- </a> | <a href='https://huggingface.co/spaces/bguisard/stable-diffusion-nano'> -- 128x128 Stable Diffusion (Fast) -- </a> | <a href='https://huggingface.co/spaces/colonelwatch/abstracts-index'> -- Search 95 million research abstracts -- </a> | <a href='https://huggingface.co/datasets/roneneldan/TinyStories'> -- Tiny Stories Dataset -- </a> | <a href='https://huggingface.co/spaces/lykeven/visualglm-6b'> -- Visualglm6b - Discuss images -- </a> | <a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- RAM and Tag2Text -- </a> | <a href='https://huggingface.co/camenduru/potat1'> -- Potat1 Text2vid -- </a> | <a href='https://twitter.com/willdepue/status/1661781355452325889'> -- Alexandria Prohect (Will Deque) - Free Embeddings -- </a> | <a href='https://artsandculture.google.com/'> -- Google Arts and Culture Portal -- </a> | <a href='https://huggingface.co/spaces/Matthijs/whisper_word_timestamps'> -- Word Level Timestamps -- </a> | <a href='https://huggingface.co/spaces/zaanind/NLLB-translation'> -- NLLB 600M Demo -- </a> = <a href='https://github.com/facebookresearch/fairseq/tree/nllb'> -- NLLB Github -- </a> | <a href='https://huggingface.co/spaces/hysts/zeroscope-v2'> -- Zeroscope v2 Text to video -- </a> | <a href='https://huggingface.co/spaces/SpacesExamples/ComfyUI'> -- ComfyUI Text to Image -- </a> | <a href='https://huggingface.co/spaces/DeepFloyd/IF'> -- Deepfloyd IF - Text in image -- </a> | <a href='https://huggingface.co/spaces/ysharma/ChatGPT-Plugins-in-Gradio'> -- ChatGPT Custom Plugins Test Space -- </a> | <a href='https://www.reddit.com/r/LocalLLaMA/'> -- r/LocalLlama -- </a> | <a href='https://www.reddit.com/r/singularity/'> -- r/Singularity -- </a> | <a href='https://huggingface.co/spaces/hysts/SD-XL'> -- SD-XL Test Space -- </a> | <a href='https://huggingface.co/spaces/facebook/seamless_m4t'> -- Seamless M4T - Translation one stop shop -- </a> | <a href='https://huggingface.co/spaces/codellama/codellama-playground'> -- Code Llama playground -- </a> | <a href='https://huggingface.co/spaces/Voicemod/Text-to-Sing'> -- Text to sing -- </a> | <a href='https://huggingface.co/spaces/camenduru-com/webui'> -- Stable Diffusion Webui (Camenduru Space) -- </a> | <a href='https://huggingface.co/spaces/ysharma/WizardCoder34b'> -- Wizard Coder 34B -- </a> | <a href='https://huggingface.co/spaces/chansung/co-write-with-llama2'> -- Cowrite with llama2 -- </a> | <a href='https://huggingface.co/spaces/fffiloni/Image-to-Story'> -- Image to Story -- </a> | <a href='https://huggingface.co/spaces/fffiloni/CLIP-Interrogator-2'> -- Clip interrogator 2 -- </a> | <a href='https://github.com/THUDM/AgentBench'> -- Agent Benchmarks -- </a> | <a href='https://www.convex.dev/ai-town'> -- AI Town Live Demo -- </a> = <a href='https://github.com/a16z-infra/ai-town'> -- AI Town Repository (Deployment]) -- </a> | <a href='https://github.com/joonspk-research/generative_agents/tree/main'> -- Generative Agents: Interactive Simulacra of Human Behavior (Research paper Repository) -- </a> | <a href='https://huggingface.co/spaces/HuggingFaceM4/idefics_playground'> -- IDEFICS - open Multimodal model -- </a> | <a href='https://github.com/facebookresearch/belebele'> -- Belebele (Meta Dataset) -- </a> | <a href='https://huggingface.co/spaces/jbilcke-hf/ai-comic-factory'> -- AI Comic Factory -- </a> | <a href='https://github.com/camenduru'> -- CAMENDURU REPOS -- </a> | <a href='https://huggingface.co/datasets/b-mc2/sql-create-context'> -- SQL Dataset - A list of simple questions -- </a> | <a href='https://github.com/KillianLucas/open-interpreter'> -- Open Interpreter (alt to ChatGPT Pro) -- </a> | <a href='https://easywithai.com/fun-ai-tools/'> -- List - Easy with AI -- </a> | <a href='https://huggingface.co/spaces/Xenova/whisper-web'> -- Whisper Web (UI) -- </a> | <a href='https://blog.roblox.com/2023/09/revolutionizing-creation-roblox/'> -- Roblox Assistant -- </a> | <a href='https://huggingface.co/spaces/AP123/IllusionDiffusion'> -- Illusion Diffusion (Hide words or shapes in the image) -- </a> | <a href='https://huggingface.co/spaces/Shopify/background-replacement'> -- Background replacement - Shopify -- </a> | <a href='https://huggingface.co/spaces/multimodalart/LoraTheExplorer'> -- Lora The Explorer (SDXL) -- </a> | <a href='https://huggingface.co/spaces/XCLiu/InstaFlow'> -- InstaFlow (Under 1 second Inference) -- </a> | <a href='https://github.com/tairov/llama2.mojo'> -- TinyStories on mojo (230+ tk/s) -- </a> | <a href='https://emojis.alexandru.so/p/OHVEmfMwQl'> -- Any Emoji you want - emojijs -- </a> | <a href='https://huggingface.co/spaces/google/sdxl'> -- SDXL on TPUv5 -- </a> | <a href='https://huggingface.co/spaces/SimianLuo/Latent_Consistency_Model'> -- LCM - SD1.5 at 7secs per 4 images (after coldstart) -- </a> | <a href='https://huggingface.co/spaces/fffiloni/sdxl-control-loras'> -- SDXL Control Lora -- </a> | <a href='https://huggingface.co/spaces/aadnk/faster-whisper-webui'> -- Whisper WebUI -- </a> | <a href='https://huggingface.co/spaces/guoyww/AnimateDiff'> -- AnimateDiff: Create an image make a video -- </a> | <a href='https://huggingface.co/spaces/facebook/seamless-m4t-v2-large'> -- Seamless m4t v2 -- </a> | <a href='https://huggingface.co/spaces/Otter-AI/OtterHD-Demo'> -- OtterHD: Multimodal model -- </a> | <a href='https://ai.meta.com/blog/ego-exo4d-video-learning-perception/'> -- Ego-exo4d Multimodal dataset -- </a> | <a href='https://imagine.meta.com/'> -- Meta Imagine images (Free) -- </a> | <a href='https://www.mage.space/'> -- Mage Space images (Free) -- </a> | <a href='https://www.bing.com/images/create?FORM=GENILP'> -- Bing Image Creator (Free) -- </a> | <a href='https://jalammar.github.io/'> -- Jay Alammar Blog - Illustrated Transformer, Stable Diffusion and More -- </a> | <a href='https://huggingface.co/spaces/myshell-ai/OpenVoice'> -- OpenVoice - Open Source Voice Clone -- </a> | <a href='https://huggingface.co/spaces/fffiloni/live-vision'> -- Live-Vision HF Space - Live commentary on a video feed demo -- </a> | <a href='https://xenova.github.io/transformers.js/'> -- Transformers JS demo - Xenova (HF) -- </a> | <a href='https://huggingface.co/chat/assistants'> -- Huggingface Assistants -- </a> | <a href='https://huggingface.co/spaces/AP123/SDXL-Lightning'> -- 4-step SDXL Inference through LORA -- </a> | <a href='https://huggingface.co/datasets/HuggingFaceTB/cosmopedia'> -- Cosmopedia - 92 GB synthetic dataset made using Mixtral (25 billion tokens) -- </a> | <a href='https://huggingface.co/spaces/dylanebert/LGM-mini'> -- LGM-mini: image to ply -- </a> | <a href='https://playgroundai-playground-v2-5.hf.space'> -- Playground v2.5 -- </a> | <a href='https://github.com/openai/transformer-debugger'> -- OpenAI - Transformer Debugger -- </a> | <a href='https://huggingface.co/datasets/princeton-nlp/SWE-bench'> -- SWE-bench dataset (Real world github issues) -- </a> | <a href='https://huggingface.co/papers/2402.17764'> -- The Era of 1-bit LLMs - All Large Language Models are in 1.58 Bits -- </a> | <a href='https://github.com/microsoft/unilm/tree/master'> -- Microsoft Repo for AI research (Bitnet andd others will be here) -- </a> | <a href='https://huggingface.co/spaces/cyzgab/catch-me-if-you-can'> -- Realtime response using GroqCloud and live gradio interface -- </a> | <a href='https://console.groq.com/docs/showcase-applications'> -- GroqCloud Application showcase -- </a> | <a href='https://kadirnar-open-sora.hf.space'> -- Open version of Open AI SORA -- </a> | <a href='https://huggingface.co/spaces/mms-meta/MMS'> -- MMS (Meta) - TTS for 1000 languages -- </a> | <a href='https://huggingface.co/pyp1/VoiceCraft'> -- VoiceCraft (Audio Clone Model) -- </a> | <a href='https://huggingface.co/papers/2403.09629'> -- QuietStar Paper (HF) - Models linked -- </a> | <a href='https://huggingface.co/ai21labs/Jamba-v0.1'> -- JAMBA - mamba based 52B with 140K context on one gpu!! -- </a> | <a href='https://huggingface.co/papers/2403.16627'> -- SDXS for realtime generation (upto 100FPS) -- </a> |
|
|
|
2225 |
with gr.Tabs() as nav1:
|
2226 |
with gr.Tab("Rep - HTML"):
|
2227 |
gr.HTML("UNWFWO = Unknown Native Word Foreign Word Order i.e. during active listening practice you only need the words you dont know")
|
@@ -2344,66 +2403,68 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
2344 |
SAMSpacetestbtn.click(display_website, SAMSpaceLink, SAMSpacetest)
|
2345 |
gr.HTML("Use Shift Enter To put text on new lines if the text doesnt fit <br> if theres an error you have to remove the foreign letters and place roman ones")
|
2346 |
gr.Interface(fn=add_text_to_image , inputs=["image", "text"], outputs="image", description="Create Annotated images (Can create using stable diffusion and use the prompt) - Describe from one side to the other to make guessing easy")
|
2347 |
-
with gr.Tab("
|
2348 |
-
|
2349 |
-
|
2350 |
-
|
2351 |
-
|
2352 |
-
|
2353 |
-
|
2354 |
-
|
2355 |
-
|
2356 |
-
|
2357 |
-
|
2358 |
-
|
2359 |
-
|
2360 |
-
|
2361 |
-
|
2362 |
-
|
2363 |
-
|
2364 |
-
|
2365 |
-
|
2366 |
-
|
2367 |
-
|
2368 |
-
|
2369 |
-
|
2370 |
-
|
2371 |
-
|
2372 |
-
|
2373 |
-
|
2374 |
-
|
2375 |
-
|
2376 |
-
|
2377 |
-
|
2378 |
-
|
2379 |
-
|
2380 |
-
|
2381 |
-
|
2382 |
-
|
2383 |
-
|
2384 |
-
|
2385 |
-
|
2386 |
-
|
2387 |
-
|
2388 |
-
|
2389 |
-
|
2390 |
-
|
2391 |
-
|
2392 |
-
|
2393 |
-
|
2394 |
-
|
2395 |
-
|
2396 |
-
|
2397 |
-
|
2398 |
-
|
2399 |
-
|
2400 |
-
|
2401 |
-
|
2402 |
-
|
2403 |
-
|
2404 |
-
|
2405 |
-
|
2406 |
-
|
|
|
|
|
2407 |
|
2408 |
with gr.Tab("Complex Sentence Builder"):
|
2409 |
gr.Markdown(
|
@@ -2447,6 +2508,21 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
2447 |
submit_button.click(sentbuildcheck_answer, inputs=[quiz_choices, quiz_answer], outputs=[quiz_feedback, submit_button])
|
2448 |
with gr.Tab("Youtube Subs Listening Comprehension"):
|
2449 |
gr.HTML("<a href='https://www.lingq.com/en/'>State Management Solution for Word --> Find LingQ Here --> https://www.lingq.com/en/</a>")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2450 |
with gr.Tab("New - Learning with Youtube"):
|
2451 |
gr.HTML("TODO: 1st syllable subtitle. First Syllable of any word is all you need to listen - pair this with Youtube subtitle interface - Listening is ability to spell really fast (real time)")
|
2452 |
gr.HTML(" <a href='https://huggingface.co/spaces/artificialguybr/VIDEO-TRANSLATION-TRANSCRIPTION'> -- artificialguybr's Video Translation/Transcription Space -- </a> | ")
|
@@ -2546,7 +2622,7 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
2546 |
gr.Interface(fn=keep_nouns, inputs="textbox", outputs="textbox", description="Nouns only")
|
2547 |
with gr.Tab("Placeholder Generation"):
|
2548 |
gr.HTML("Placeholder for every image of each sentence - Good for ChatGPT + Dall-E (First 16 Characters is part of the filename if you get error)")
|
2549 |
-
gr.Interface(fn=lambda
|
2550 |
with gr.Row():
|
2551 |
with gr.Column(scale=4):
|
2552 |
imageplaceholderinput = gr.TextArea(placeholder="Enter Text and Get a line by line (stand in for sentences for now) placeholder for image associated with the text")
|
@@ -2802,4 +2878,9 @@ Each type of knowing involves different cognitive processes and levels of unders
|
|
2802 |
lliface.queue().launch(share=True) #docker #(inbrowser="true") #colab
|
2803 |
|
2804 |
#httpcore and googletrans seem to be the cause all my bugs ---> These are problems to watch
|
2805 |
-
#not using the exact package versions as your local environment will lead to problems in the future when backwards compatibility is not reintroduced
|
|
|
|
|
|
|
|
|
|
|
|
35 |
from flores200_codes import flores_codes #- figure this out after it works
|
36 |
import whisper
|
37 |
import tempfile
|
38 |
+
import json
|
39 |
+
import shutil
|
40 |
|
41 |
#When I forgot about the readme file ChatGPT suggested these - Leaving to remember the Readme.md must be updated as well
|
42 |
#print(gr.__version__)
|
|
|
2175 |
else:
|
2176 |
return "Incorrect. Please try again.", gr.update(interactive=True)
|
2177 |
|
2178 |
+
#------------------------------------------------------------------------------------------------
|
2179 |
+
|
2180 |
+
def display_subtitles_from_zip(zip_file_path):
|
2181 |
+
temp_dir = tempfile.mkdtemp()
|
2182 |
+
try:
|
2183 |
+
with zipfile.ZipFile(zip_file_path.name, "r") as zip_file:
|
2184 |
+
zip_file.extractall(temp_dir) # Extract all files to the temporary directory
|
2185 |
+
|
2186 |
+
with open(f"{temp_dir}/preprocessed_subtitles.json", "r") as file:
|
2187 |
+
preprocessed_subtitles = json.load(file)
|
2188 |
+
|
2189 |
+
preprocessed_subtitles.sort(key=lambda x: x['start'])
|
2190 |
+
|
2191 |
+
yield "Subtitles loaded. Playback will begin shortly...", []
|
2192 |
+
time.sleep(2)
|
2193 |
+
|
2194 |
+
current_time = 0
|
2195 |
+
|
2196 |
+
for subtitle in preprocessed_subtitles:
|
2197 |
+
wait_time = subtitle['start'] - current_time
|
2198 |
+
if wait_time > 0:
|
2199 |
+
time.sleep(wait_time)
|
2200 |
+
current_time += wait_time
|
2201 |
+
|
2202 |
+
subtitle_output = f"Start Time: {subtitle['start']} s\n"
|
2203 |
+
subtitle_output += f"Duration: {subtitle['duration']} s\n"
|
2204 |
+
subtitle_output += f"Text: {subtitle['text']}\n"
|
2205 |
+
subtitle_output += f"Analysed Text: {subtitle['analysed_text']}\n"
|
2206 |
+
subtitle_output += "Images:\n"
|
2207 |
+
|
2208 |
+
images = []
|
2209 |
+
for image_name in subtitle['image_paths']:
|
2210 |
+
image_path = f"{temp_dir}/{image_name}"
|
2211 |
+
if Image.open(image_path): # Just a simple check if the file is an image
|
2212 |
+
images.append(image_path)
|
2213 |
+
|
2214 |
+
subtitle_output += "Word Durations:\n"
|
2215 |
+
for duration in subtitle['word_durations']:
|
2216 |
+
subtitle_output += f"{duration} ms\n"
|
2217 |
+
|
2218 |
+
subtitle_output += "---\n"
|
2219 |
+
|
2220 |
+
yield subtitle_output, images
|
2221 |
+
|
2222 |
+
time.sleep(subtitle['duration'])
|
2223 |
+
current_time += subtitle['duration']
|
2224 |
+
finally:
|
2225 |
+
shutil.rmtree(temp_dir) # Clean up the temporary directory
|
2226 |
+
|
2227 |
+
|
2228 |
#--------------------------------------------------------------------------------------------------------------------------------------------
|
2229 |
|
2230 |
with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', secondary_hue='red', neutral_hue='blue', )
|
|
|
2244 |
audiogenspacebtn.click(display_website, inputs=linktoaudiogen, outputs=audiogenspace)
|
2245 |
with gr.Accordion("Image Gen or Animation HF Spaces/Sites (Click Here to Open) - Use with the image placeholder in Workflows tab", open=False):
|
2246 |
with gr.Row():
|
2247 |
+
linktoimagegen = gr.Dropdown(choices=["https://gparmar-img2img-turbo-sketch.hf.space", "https://kadirnar-open-sora.hf.space", "https://bytedance-animatediff-lightning.hf.space", "https://ehristoforu-dalle-3-xl-lora-v2.hf.space", "https://multimodalart-cosxl.hf.space", "https://radames-real-time-text-to-image-sdxl-lightning.hf.space", "https://cagliostrolab-animagine-xl-3-1.hf.space", "https://wangfuyun-animatelcm-svd.hf.space" "https://modelscope-transferanything.hf.space", "https://visionmaze-magic-me.hf.space", "https://wangfuyun-animatelcm.hf.space", "https://artgan-diffusion-api.hf.space", "https://multimodalart-stable-cascade.hf.space", "https://ap123-sdxl-lightning.hf.space", "https://google-sdxl.hf.space", "https://guoyww-animatediff.hf.space", "https://segmind-segmind-stable-diffusion.hf.space", "https://simianluo-latent-consistency-model.hf.space", "https://artificialguybr-studio-ghibli-lora-sdxl.hf.space", "https://artificialguybr-pixel-art-generator.hf.space", "https://fffiloni-sdxl-control-loras.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
|
2248 |
imagegenspacebtn = gr.Button("Use the chosen URL to load interface with a chat model")
|
2249 |
imagegenspace = gr.HTML("Chat Space Chosen will load here")
|
2250 |
imagegenspacebtn.click(display_website, inputs=linktoimagegen, outputs=imagegenspace)
|
2251 |
+
with gr.Accordion("3D Model Spaces/Sites (Click Here to Open) - Image to Blender?", open=False):
|
2252 |
+
with gr.Row():
|
2253 |
+
linktoThreedModel = gr.Dropdown(choices=["https://ashawkey-lgm.hf.space", "https://dylanebert-lgm-mini.hf.space", "https://dylanebert-splat-to-mesh.hf.space", "https://dylanebert-multi-view-diffusion.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
|
2254 |
+
ThreedModelspacebtn = gr.Button("Use the chosen URL to load interface with a translate model")
|
2255 |
+
ThreedModelspace = gr.HTML("Translate Space Chosen will load here")
|
2256 |
+
ThreedModelspacebtn.click(display_website, inputs=linktoThreedModel, outputs=ThreedModelspace)
|
2257 |
with gr.Accordion("Image Understanding/Vision Conversation HF Spaces/Sites (Click Here to Open)", open=False):
|
2258 |
with gr.Row():
|
2259 |
linktovisionund = gr.Dropdown(choices=["https://linfanluntan-grounded-sam.hf.space", "https://merve-llava-next.hf.space", "https://badayvedat-llava.hf.space", "https://otter-ai-otterhd-demo.hf.space", "https://adept-fuyu-8b-demo.hf.space", "https://xinyu1205-recognize-anything.hf.space", "https://languagebind-moe-llava.hf.space", "https://vision-cair-minigpt4.hf.space", "https://fffiloni-live-vision.hf.space", "https://ysharma-gemini-pro-vision-chat.hf.space", "https://kvikontent-chatgpt-vision.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
|
|
|
2262 |
visionundspacebtn.click(display_website, inputs=linktovisionund, outputs=visionundspace)
|
2263 |
with gr.Accordion("LLM HF Spaces/Sites (Click Here to Open) - Use 'Acronym Map Creation Space' Tab with this - Ask for Translation of image tags made below, sentence to emojis, Wordlists, Test Conversations, Get Grammar Explanations etc., Can use GPT-4 or new SOTA to review the conversation", open=False):
|
2264 |
with gr.Row():
|
2265 |
+
linktochat = gr.Dropdown(choices=["https://sdk.vercel.ai/docs", "https://labs.perplexity.ai/", "https://chat.lmsys.org", "https://cyzgab-catch-me-if-you-can.hf.space", "https://qwen-qwen1-5-110b-chat-demo.hf.space", "https://ysharma-chat-with-meta-llama3-8b.hf.space", "https://databricks-dbrx-instruct.hf.space", "https://cohereforai-c4ai-command-r-plus.hf.space", "https://qwen-qwen1-5-moe-a2-7b-chat-demo.hf.space", "https://cohereforai-c4ai-command-r-v01.hf.space", "https://ehristoforu-mixtral-46-7b-chat.hf.space", "https://stabilityai-stablelm-2-1-6b-zephyr.hf.space", "https://qwen-qwen1-5-72b-chat.hf.space", "https://deepseek-ai-deepseek-coder-7b-instruct.hf.space", "https://01-ai-yi-34b-chat.hf.space", "https://ysharma-zephyr-playground.hf.space", "https://huggingfaceh4-zephyr-chat.hf.space", "https://osanseviero-mistral-super-fast.hf.space", "https://artificialguybr-qwen-14b-chat-demo.hf.space", "https://huggingface-projects-llama-2-7b-chat.hf.space", "https://ysharma-explore-llamav2-with-tgi.hf.space", "https://mosaicml-mpt-30b-chat.hf.space", "https://huggingfaceh4-falcon-chat.hf.space", "https://uwnlp-guanaco-playground-tgi.hf.space", "https://stabilityai-stablelm-tuned-alpha-chat.hf.space", "https://mosaicml-mpt-7b-storywriter.hf.space", "https://huggingfaceh4-starchat-playground.hf.space", "https://bigcode-bigcode-playground.hf.space", "https://mosaicml-mpt-7b-chat.hf.space", "https://huggingchat-chat-ui.hf.space", "https://togethercomputer-openchatkit.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
|
2266 |
chatspacebtn = gr.Button("Use the chosen URL to load interface with a chat model. For sdk.vercel click the chat button on the top left. For lymsys / chat arena copy the link and use a new tab")
|
2267 |
with gr.Accordion("Some prompt ideas", open=False):
|
2268 |
with gr.Accordion("Prompts in text (Manual copy paste)", open=False):
|
|
|
2279 |
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
2280 |
with gr.Row():
|
2281 |
with gr.Column(scale=1):
|
2282 |
+
gr.HTML(""" <div style="height: 350px; width: 100%; border: 1px solid black; overflow: auto;"> Some useful links <br> <a href='https://github.com/eugeneyan/open-llms'> -- Opensource List -- </a> | <a href='https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard'> -- Open LLM Leaderboard -- </a> | <a href='https://openxlab.org.cn/apps'> -- Openxlabs - Huggingface Alternative -- </a> | <a href='https://huggingface.co/spaces/sanchit-gandhi/whisper-jax'> -- Whisper JAX -- </a> | <a href="https://translate.google.com/?hl=en&tab=TT"> -- Google Translate -- </a> | <a href='https://huggingface.co/spaces/damo-vilab/modelscope-text-to-video-synthesis'> -- Modelscope Text to Video -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion'> -- stable-diffusion 2 -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion-1'> -- stable-diffusion 1 -- </a> | <a href='https://huggingface.co/spaces/kakaobrain/karlo'> -- karlo 1 -- </a> | <a href='https://huggingface.co/spaces/suno/bark'> -- Bark (TTS) -- </a> | <a href='https://chat.lmsys.org/'> -- Offline Text Model Demos -- </a> | <a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a> | <a href='https://beta.elevenlabs.io/'> -- Eleven Labs -- </a> | <a href='https://www.d-id.com/'> -- Animate an Image -- </a> | <a href='https://voice.ai/'> -- Clone a voice -- </a> | <a href='https://openai.com/pricing'> -- OpenAI pricing -- </a> | <a href='https://huggingface.co/spaces/sohojoe/soho-clip-embeddings-explorer'> -- Image Training Data Search -- </a> | <a href='https://huggingface.co/spaces/huggingchat/chat-ui'> -- Huggingface Chat -- </a> | <a href='https://huggingface.co/spaces/bguisard/stable-diffusion-nano'> -- 128x128 Stable Diffusion (Fast) -- </a> | <a href='https://huggingface.co/spaces/colonelwatch/abstracts-index'> -- Search 95 million research abstracts -- </a> | <a href='https://huggingface.co/datasets/roneneldan/TinyStories'> -- Tiny Stories Dataset -- </a> | <a href='https://huggingface.co/spaces/lykeven/visualglm-6b'> -- Visualglm6b - Discuss images -- </a> | <a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- RAM and Tag2Text -- </a> | <a href='https://huggingface.co/camenduru/potat1'> -- Potat1 Text2vid -- </a> | <a href='https://twitter.com/willdepue/status/1661781355452325889'> -- Alexandria Prohect (Will Deque) - Free Embeddings -- </a> | <a href='https://artsandculture.google.com/'> -- Google Arts and Culture Portal -- </a> | <a href='https://huggingface.co/spaces/Matthijs/whisper_word_timestamps'> -- Word Level Timestamps -- </a> | <a href='https://huggingface.co/spaces/zaanind/NLLB-translation'> -- NLLB 600M Demo -- </a> = <a href='https://github.com/facebookresearch/fairseq/tree/nllb'> -- NLLB Github -- </a> | <a href='https://huggingface.co/spaces/hysts/zeroscope-v2'> -- Zeroscope v2 Text to video -- </a> | <a href='https://huggingface.co/spaces/SpacesExamples/ComfyUI'> -- ComfyUI Text to Image -- </a> | <a href='https://huggingface.co/spaces/DeepFloyd/IF'> -- Deepfloyd IF - Text in image -- </a> | <a href='https://huggingface.co/spaces/ysharma/ChatGPT-Plugins-in-Gradio'> -- ChatGPT Custom Plugins Test Space -- </a> | <a href='https://www.reddit.com/r/LocalLLaMA/'> -- r/LocalLlama -- </a> | <a href='https://www.reddit.com/r/singularity/'> -- r/Singularity -- </a> | <a href='https://huggingface.co/spaces/hysts/SD-XL'> -- SD-XL Test Space -- </a> | <a href='https://huggingface.co/spaces/facebook/seamless_m4t'> -- Seamless M4T - Translation one stop shop -- </a> | <a href='https://huggingface.co/spaces/codellama/codellama-playground'> -- Code Llama playground -- </a> | <a href='https://huggingface.co/spaces/Voicemod/Text-to-Sing'> -- Text to sing -- </a> | <a href='https://huggingface.co/spaces/camenduru-com/webui'> -- Stable Diffusion Webui (Camenduru Space) -- </a> | <a href='https://huggingface.co/spaces/ysharma/WizardCoder34b'> -- Wizard Coder 34B -- </a> | <a href='https://huggingface.co/spaces/chansung/co-write-with-llama2'> -- Cowrite with llama2 -- </a> | <a href='https://huggingface.co/spaces/fffiloni/Image-to-Story'> -- Image to Story -- </a> | <a href='https://huggingface.co/spaces/fffiloni/CLIP-Interrogator-2'> -- Clip interrogator 2 -- </a> | <a href='https://github.com/THUDM/AgentBench'> -- Agent Benchmarks -- </a> | <a href='https://www.convex.dev/ai-town'> -- AI Town Live Demo -- </a> = <a href='https://github.com/a16z-infra/ai-town'> -- AI Town Repository (Deployment]) -- </a> | <a href='https://github.com/joonspk-research/generative_agents/tree/main'> -- Generative Agents: Interactive Simulacra of Human Behavior (Research paper Repository) -- </a> | <a href='https://huggingface.co/spaces/HuggingFaceM4/idefics_playground'> -- IDEFICS - open Multimodal model -- </a> | <a href='https://github.com/facebookresearch/belebele'> -- Belebele (Meta Dataset) -- </a> | <a href='https://huggingface.co/spaces/jbilcke-hf/ai-comic-factory'> -- AI Comic Factory -- </a> | <a href='https://github.com/camenduru'> -- CAMENDURU REPOS -- </a> | <a href='https://huggingface.co/datasets/b-mc2/sql-create-context'> -- SQL Dataset - A list of simple questions -- </a> | <a href='https://github.com/KillianLucas/open-interpreter'> -- Open Interpreter (alt to ChatGPT Pro) -- </a> | <a href='https://easywithai.com/fun-ai-tools/'> -- List - Easy with AI -- </a> | <a href='https://huggingface.co/spaces/Xenova/whisper-web'> -- Whisper Web (UI) -- </a> | <a href='https://blog.roblox.com/2023/09/revolutionizing-creation-roblox/'> -- Roblox Assistant -- </a> | <a href='https://huggingface.co/spaces/AP123/IllusionDiffusion'> -- Illusion Diffusion (Hide words or shapes in the image) -- </a> | <a href='https://huggingface.co/spaces/Shopify/background-replacement'> -- Background replacement - Shopify -- </a> | <a href='https://huggingface.co/spaces/multimodalart/LoraTheExplorer'> -- Lora The Explorer (SDXL) -- </a> | <a href='https://huggingface.co/spaces/XCLiu/InstaFlow'> -- InstaFlow (Under 1 second Inference) -- </a> | <a href='https://github.com/tairov/llama2.mojo'> -- TinyStories on mojo (230+ tk/s) -- </a> | <a href='https://emojis.alexandru.so/p/OHVEmfMwQl'> -- Any Emoji you want - emojijs -- </a> | <a href='https://huggingface.co/spaces/google/sdxl'> -- SDXL on TPUv5 -- </a> | <a href='https://huggingface.co/spaces/SimianLuo/Latent_Consistency_Model'> -- LCM - SD1.5 at 7secs per 4 images (after coldstart) -- </a> | <a href='https://huggingface.co/spaces/fffiloni/sdxl-control-loras'> -- SDXL Control Lora -- </a> | <a href='https://huggingface.co/spaces/aadnk/faster-whisper-webui'> -- Whisper WebUI -- </a> | <a href='https://huggingface.co/spaces/guoyww/AnimateDiff'> -- AnimateDiff: Create an image make a video -- </a> | <a href='https://huggingface.co/spaces/facebook/seamless-m4t-v2-large'> -- Seamless m4t v2 -- </a> | <a href='https://huggingface.co/spaces/Otter-AI/OtterHD-Demo'> -- OtterHD: Multimodal model -- </a> | <a href='https://ai.meta.com/blog/ego-exo4d-video-learning-perception/'> -- Ego-exo4d Multimodal dataset -- </a> | <a href='https://imagine.meta.com/'> -- Meta Imagine images (Free) -- </a> | <a href='https://www.mage.space/'> -- Mage Space images (Free) -- </a> | <a href='https://www.bing.com/images/create?FORM=GENILP'> -- Bing Image Creator (Free) -- </a> | <a href='https://jalammar.github.io/'> -- Jay Alammar Blog - Illustrated Transformer, Stable Diffusion and More -- </a> | <a href='https://huggingface.co/spaces/myshell-ai/OpenVoice'> -- OpenVoice - Open Source Voice Clone -- </a> | <a href='https://huggingface.co/spaces/fffiloni/live-vision'> -- Live-Vision HF Space - Live commentary on a video feed demo -- </a> | <a href='https://xenova.github.io/transformers.js/'> -- Transformers JS demo - Xenova (HF) -- </a> | <a href='https://huggingface.co/chat/assistants'> -- Huggingface Assistants -- </a> | <a href='https://huggingface.co/spaces/AP123/SDXL-Lightning'> -- 4-step SDXL Inference through LORA -- </a> | <a href='https://huggingface.co/datasets/HuggingFaceTB/cosmopedia'> -- Cosmopedia - 92 GB synthetic dataset made using Mixtral (25 billion tokens) -- </a> | <a href='https://huggingface.co/spaces/dylanebert/LGM-mini'> -- LGM-mini: image to ply -- </a> | <a href='https://playgroundai-playground-v2-5.hf.space'> -- Playground v2.5 -- </a> | <a href='https://github.com/openai/transformer-debugger'> -- OpenAI - Transformer Debugger -- </a> | <a href='https://huggingface.co/datasets/princeton-nlp/SWE-bench'> -- SWE-bench dataset (Real world github issues) -- </a> | <a href='https://huggingface.co/papers/2402.17764'> -- The Era of 1-bit LLMs - All Large Language Models are in 1.58 Bits -- </a> | <a href='https://github.com/microsoft/unilm/tree/master'> -- Microsoft Repo for AI research (Bitnet andd others will be here) -- </a> | <a href='https://huggingface.co/spaces/cyzgab/catch-me-if-you-can'> -- Realtime response using GroqCloud and live gradio interface -- </a> | <a href='https://console.groq.com/docs/showcase-applications'> -- GroqCloud Application showcase -- </a> | <a href='https://kadirnar-open-sora.hf.space'> -- Open version of Open AI SORA -- </a> | <a href='https://huggingface.co/spaces/mms-meta/MMS'> -- MMS (Meta) - TTS for 1000 languages -- </a> | <a href='https://huggingface.co/pyp1/VoiceCraft'> -- VoiceCraft (Audio Clone Model) -- </a> | <a href='https://huggingface.co/papers/2403.09629'> -- QuietStar Paper (HF) - Models linked -- </a> | <a href='https://huggingface.co/ai21labs/Jamba-v0.1'> -- JAMBA - mamba based 52B with 140K context on one gpu!! -- </a> | <a href='https://huggingface.co/papers/2403.16627'> -- SDXS for realtime generation (upto 100FPS) -- </a> | <a href='https://huggingface.co/spaces/ggml-org/gguf-my-repo'> -- gguf-my-repo: HF space to create ggufs -- </a> | <a href='https://ai.meta.com/blog/meta-llama-3/'> -- LLAMA 3 release page (8, 70 and 400+) -- </a> |
|
2283 |
+
</div>""")
|
2284 |
with gr.Tabs() as nav1:
|
2285 |
with gr.Tab("Rep - HTML"):
|
2286 |
gr.HTML("UNWFWO = Unknown Native Word Foreign Word Order i.e. during active listening practice you only need the words you dont know")
|
|
|
2403 |
SAMSpacetestbtn.click(display_website, SAMSpaceLink, SAMSpacetest)
|
2404 |
gr.HTML("Use Shift Enter To put text on new lines if the text doesnt fit <br> if theres an error you have to remove the foreign letters and place roman ones")
|
2405 |
gr.Interface(fn=add_text_to_image , inputs=["image", "text"], outputs="image", description="Create Annotated images (Can create using stable diffusion and use the prompt) - Describe from one side to the other to make guessing easy")
|
2406 |
+
with gr.Tab("Sentence Builder"):
|
2407 |
+
gr.HTML("Claude 3 assited ideas and code")
|
2408 |
+
with gr.Tab("Basic Sentence Builder"):
|
2409 |
+
with gr.Tab("SVO"):
|
2410 |
+
gr.Markdown(
|
2411 |
+
"""
|
2412 |
+
## Subject-Verb-Object (SVO) Order
|
2413 |
+
|
2414 |
+
Some languages that follow the SVO order:
|
2415 |
+
- English
|
2416 |
+
- Spanish
|
2417 |
+
- French
|
2418 |
+
- Italian
|
2419 |
+
- Chinese
|
2420 |
+
"""
|
2421 |
+
)
|
2422 |
+
svo_subject = gr.Dropdown(sentbuildsubjects, label="Subject")
|
2423 |
+
svo_verb = gr.Dropdown(sentbuildverbs, label="Verb")
|
2424 |
+
svo_object = gr.Dropdown(sentbuildobjects, label="Object")
|
2425 |
+
svo_output = gr.Textbox(label="Sentence (SVO)")
|
2426 |
+
svo_btn = gr.Button("Generate SVO Sentence")
|
2427 |
+
svo_btn.click(build_sentence_basic, inputs=[svo_subject, svo_verb, svo_object, gr.State("SVO")], outputs=svo_output)
|
2428 |
+
|
2429 |
+
with gr.Tab("SOV"):
|
2430 |
+
gr.Markdown(
|
2431 |
+
"""
|
2432 |
+
## Subject-Object-Verb (SOV) Order
|
2433 |
+
|
2434 |
+
Some languages that follow the SOV order:
|
2435 |
+
- Japanese
|
2436 |
+
- Korean
|
2437 |
+
- Turkish
|
2438 |
+
- Hindi
|
2439 |
+
- Latin
|
2440 |
+
"""
|
2441 |
+
)
|
2442 |
+
sov_subject = gr.Dropdown(sentbuildsubjects, label="Subject")
|
2443 |
+
sov_object = gr.Dropdown(sentbuildobjects, label="Object")
|
2444 |
+
sov_verb = gr.Dropdown(sentbuildverbs, label="Verb")
|
2445 |
+
sov_output = gr.Textbox(label="Sentence (SOV)")
|
2446 |
+
sov_btn = gr.Button("Generate SOV Sentence")
|
2447 |
+
sov_btn.click(build_sentence_basic, inputs=[sov_subject, sov_verb, sov_object, gr.State("SOV")], outputs=sov_output)
|
2448 |
+
|
2449 |
+
with gr.Tab("VSO"):
|
2450 |
+
gr.Markdown(
|
2451 |
+
"""
|
2452 |
+
## Verb-Subject-Object (VSO) Order
|
2453 |
+
|
2454 |
+
Some languages that follow the VSO order:
|
2455 |
+
- Arabic
|
2456 |
+
- Hebrew
|
2457 |
+
- Irish
|
2458 |
+
- Welsh
|
2459 |
+
- Samoan
|
2460 |
+
"""
|
2461 |
+
)
|
2462 |
+
vso_verb = gr.Dropdown(sentbuildverbs, label="Verb")
|
2463 |
+
vso_subject = gr.Dropdown(sentbuildsubjects, label="Subject")
|
2464 |
+
vso_object = gr.Dropdown(sentbuildobjects, label="Object")
|
2465 |
+
vso_output = gr.Textbox(label="Sentence (VSO)")
|
2466 |
+
vso_btn = gr.Button("Generate VSO Sentence")
|
2467 |
+
vso_btn.click(build_sentence_basic, inputs=[vso_subject, vso_verb, vso_object, gr.State("VSO")], outputs=vso_output)
|
2468 |
|
2469 |
with gr.Tab("Complex Sentence Builder"):
|
2470 |
gr.Markdown(
|
|
|
2508 |
submit_button.click(sentbuildcheck_answer, inputs=[quiz_choices, quiz_answer], outputs=[quiz_feedback, submit_button])
|
2509 |
with gr.Tab("Youtube Subs Listening Comprehension"):
|
2510 |
gr.HTML("<a href='https://www.lingq.com/en/'>State Management Solution for Word --> Find LingQ Here --> https://www.lingq.com/en/</a>")
|
2511 |
+
with gr.Tab("Prepocessed Subtitles Practice"):
|
2512 |
+
gr.HTML("Custom SD script to create image for for each noun/ verb/ etc. - ")
|
2513 |
+
with gr.Row():
|
2514 |
+
with gr.Column():
|
2515 |
+
gr.HTML("Subtitle and Image Display")
|
2516 |
+
gr.HTML("Upload a zip file containing preprocessed subtitles and images.")
|
2517 |
+
with gr.Column():
|
2518 |
+
ppssubtitleinput = gr.File(label="Upload Zip File")
|
2519 |
+
ppssubtitlebtn = gr.Button()
|
2520 |
+
with gr.Row():
|
2521 |
+
with gr.Column(scale=1):
|
2522 |
+
ppssubtitletextout = gr.Textbox(label="Subtitle Output")
|
2523 |
+
with gr.Column(scale=2):
|
2524 |
+
ppssubtitleimages = gr.Gallery(label="Images")#.style(grid=[2], height="auto")
|
2525 |
+
ppssubtitlebtn.click(display_subtitles_from_zip, inputs=[ppssubtitleinput], outputs=[ppssubtitletextout , ppssubtitleimages])
|
2526 |
with gr.Tab("New - Learning with Youtube"):
|
2527 |
gr.HTML("TODO: 1st syllable subtitle. First Syllable of any word is all you need to listen - pair this with Youtube subtitle interface - Listening is ability to spell really fast (real time)")
|
2528 |
gr.HTML(" <a href='https://huggingface.co/spaces/artificialguybr/VIDEO-TRANSLATION-TRANSCRIPTION'> -- artificialguybr's Video Translation/Transcription Space -- </a> | ")
|
|
|
2622 |
gr.Interface(fn=keep_nouns, inputs="textbox", outputs="textbox", description="Nouns only")
|
2623 |
with gr.Tab("Placeholder Generation"):
|
2624 |
gr.HTML("Placeholder for every image of each sentence - Good for ChatGPT + Dall-E (First 16 Characters is part of the filename if you get error)")
|
2625 |
+
gr.Interface(fn=lambda sentence: ". ".join(sentence.split()), inputs=["text"], outputs=["text"], description="Use full stops before input below to make a world level version")
|
2626 |
with gr.Row():
|
2627 |
with gr.Column(scale=4):
|
2628 |
imageplaceholderinput = gr.TextArea(placeholder="Enter Text and Get a line by line (stand in for sentences for now) placeholder for image associated with the text")
|
|
|
2878 |
lliface.queue().launch(share=True) #docker #(inbrowser="true") #colab
|
2879 |
|
2880 |
#httpcore and googletrans seem to be the cause all my bugs ---> These are problems to watch
|
2881 |
+
#not using the exact package versions as your local environment will lead to problems in the future when backwards compatibility is not reintroduced
|
2882 |
+
|
2883 |
+
# current exact packages
|
2884 |
+
# gradio==4.13
|
2885 |
+
# httpx==0.13.3
|
2886 |
+
# googletrans==3.1.0a0
|