KwabsHug commited on
Commit
f20ffae
1 Parent(s): f0960c8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +287 -44
app.py CHANGED
@@ -18,6 +18,10 @@ import numpy as np
18
  from docx import Document
19
  import textwrap
20
  import pandas as pd
 
 
 
 
21
 
22
  #Uncomment these for Huggingface
23
  nltk.download('maxent_ne_chunker') #Chunker
@@ -39,6 +43,15 @@ spacy.cli.download("en_core_web_sm")
39
  spacy.cli.download('ko_core_news_sm')
40
  spacy.cli.download('ja_core_news_sm')
41
  spacy.cli.download('zh_core_web_sm')
 
 
 
 
 
 
 
 
 
42
 
43
  nlp = spacy.load('en_core_web_sm')
44
  translator = Translator()
@@ -273,24 +286,29 @@ def split_verbs_nouns(text):
273
  doc = nlp(text)
274
 
275
  verbs_nouns = []
 
276
  other_words = []
 
277
  pos_string = []
278
 
279
  for token in doc:
280
  if token.pos_ in ["VERB", "NOUN"]:
281
- verbs_nouns.append(token.text)
282
- elif token.text in [punct.text for punct in doc if punct.is_punct]:
283
- verbs_nouns.append(token.text)
284
- other_words.append(token.text)
 
 
285
  else:
286
- other_words.append(token.text)
287
- pos_string.append(token.pos_)
 
288
 
289
- verbs_nouns_text = " ".join(verbs_nouns)
290
- other_words_text = " ".join(other_words)
291
- pos_string_text = " ".join(pos_string)
292
 
293
- return pos_string_text, verbs_nouns_text, other_words_text
294
 
295
  SRTLangOptions = gr.Dropdown(choices=["en", "ja", "ko", "zh-cn"], value="en", label="choose the language of the srt")
296
 
@@ -440,7 +458,7 @@ def text_to_links(text): #TextCompFormat
440
  html = ""
441
  for line in lines:
442
  if line.startswith("http"):
443
- html += f"<a href='{line}'> -- -- </a> | \n"
444
  else:
445
  html += line + "Not a link <br> \n"
446
  return html
@@ -637,7 +655,7 @@ def UnknownTrackTexttoApp(text): #Copy of def OptimisedTtAppForUNWFWO(text):
637
  with open(HTMLDownloadTemp, 'w') as f:
638
  f.write(FinalOutput)
639
 
640
- return FinalOutput, FinalOutput, HTMLDownloadTemp
641
 
642
  #Kathryn Lingel - Pyambic Pentameter Example - PyCon US
643
  #Basic Language Model Code
@@ -776,9 +794,158 @@ def arrealtimetestidea(img):
776
 
777
  #------------------------------------------------------------------------------------------------------------------------------
778
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
779
 
780
  LLPromptIdeas = """
781
 
 
 
 
 
 
 
 
 
 
 
 
 
 
782
  <p> Make jokes while following rules for a syllogism jokes game:
783
 
784
  The game can be played with any number of people.
@@ -789,18 +956,40 @@ The person who makes the last valid syllogism wins the game. </p>
789
 
790
  <hr>
791
 
792
- <p> Lets try this I will give you the english word and you find the <<chinese>> word and then mix the meaning of the word with the mnemonic </>
 
793
 
794
- Bings Explanation
795
- Youre welcome. Im happy to help. If I wanted to explain the rules of this game clearly and concisely, I would do something like this:
 
 
 
 
 
 
 
 
 
 
 
 
796
 
797
- The game is about finding the <<Chinese>> word for an English word and making a mnemonic that mixes the meaning of the word with the mnemonic.
798
- The game has two players: one who gives the English word and one who finds the <<Chinese>> word and makes the mnemonic.
799
- The player who gives the English word can choose any word they want, as long as it has a <<Chinese>> equivalent.
800
- The player who finds the <<Chinese>> word and makes the mnemonic can use any online or offline resources they want, such as dictionaries, translators, or search engines.
801
- The player who finds the <<Chinese>> word and makes the mnemonic should write their answer in the format: The <<Chinese>> word for X is Y <<(pinyin)>>, which means Z. Here are some possible mnemonics that mix the meaning of the word with the mnemonic: A, B, C, D. </p>
802
  """
803
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
804
 
805
  # Define the Gradio interface inputs and outputs for video split
806
  spvvideo_file_input = gr.File(label='Video File')
@@ -814,32 +1003,53 @@ groupoutput_text = gr.Textbox(label="Grouped words")
814
 
815
  Translationchuncksize = gr.Number(value=4998)
816
 
817
- randomExposuremessagelistitem = str(random.sample(["Bing mnemonic - lost = dont ignore unusual sounds here inside lost cave", "1000 verbs in lists of 100, verbs = easy setence structure estimation (SVO, SOV, etc.)", "Can put any message here in the navigatoin tab"], 1)).replace("['", "").replace("']", "")
818
- randomExposuremessage = f" <marquee> { randomExposuremessagelistitem } </marquee> "
819
-
820
 
 
821
 
822
  with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', secondary_hue='red', neutral_hue='blue', )
823
- PracticeExposure = gr.HTML(randomExposuremessage)
824
- gr.HTML("Advanced Repitition = Combinatorics --> to understand a sentence properly you need understanding of every word --> in language that means use with other words --> Combos within the unique words in a sentence, paragraph, page, etc. --> as close to 3 word sentences")
825
- gr.HTML("<p>Timing Practice - Repitition: Run from it, Dread it, Repitition is inevitable - Thanos --> Repitition of reaction - Foreign in eyes/ears native in mind (For beginners) | Repitition is a multitask activity like driving must be subconcious process to show mastery </p>")
826
- gr.HTML(""" <a href='https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard'> -- Open LLM Leaderboard -- </a> | <a href='https://huggingface.co/spaces/sanchit-gandhi/whisper-jax'> -- Whisper JAX -- </a> | <a href="https://translate.google.com/?hl=en&tab=TT"> -- Google Translate -- </a> | <a href='https://huggingface.co/spaces/damo-vilab/modelscope-text-to-video-synthesis'> -- Modelscope Text to Video -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion'> -- stable-diffusion 2 -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion-1'> -- stable-diffusion 1 -- </a> | <a href='https://huggingface.co/spaces/kakaobrain/karlo'> -- karlo 1 -- </a> | <a href='https://huggingface.co/spaces/suno/bark'> -- Bark (TTS) -- </a> | <a href='https://chat.lmsys.org/'> -- Offline Text Model Demos -- </a> | <a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a> | <a href='https://beta.elevenlabs.io/'> -- Eleven Labs -- </a> | <a href='https://www.d-id.com/'> -- Animate an Image -- </a> | <a href='https://voice.ai/'> -- Clone a voice -- </a> | <a href='https://openai.com/pricing'> -- OpenAI pricing -- </a> | <a href='https://huggingface.co/spaces/sohojoe/soho-clip-embeddings-explorer'> -- Image Training Data Search -- </a> | <a href='https://huggingface.co/spaces/huggingchat/chat-ui'> -- Huggingface Chat -- </a> | <a href='https://huggingface.co/spaces/bguisard/stable-diffusion-nano'> -- 128x128 Stable Diffusion (Fast) -- </a> | <a href='https://huggingface.co/spaces/colonelwatch/abstracts-index'> -- Search 95 million research abstracts -- </a> | <a href='https://huggingface.co/datasets/roneneldan/TinyStories'> -- Tiny Stories Dataset -- </a> | <a href='https://huggingface.co/spaces/lykeven/visualglm-6b'> -- Visualglm6b - Discuss images -- </a> | <a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- RAM and Tag2Text -- </a> | <a href='https://huggingface.co/camenduru/potat1'> -- Potat1 Text2vid -- </a> | """)
827
  with gr.Row():
828
  with gr.Column(scale=1):
829
- with gr.Tab("Rep - Gradio"):
830
- gr.HTML("""Gradio Version Below <iframe height="1200" style="width: 100%;" scrolling="no" title="Memorisation Aid" src="https://codepen.io/kwabs22/embed/GRXKQgj?default-tab=result&editable=true" frameborder="no" loading="lazy" allowtransparency="true" allowfullscreen="true">
 
 
 
 
 
 
 
 
 
 
 
 
 
 
831
  See the Pen <a href="https://codepen.io/kwabs22/pen/GRXKQgj"> Memorisation Aid</a> by kwabs22 (<a href="https://codepen.io/kwabs22">@kwabs22</a>) on <a href="https://codepen.io">CodePen</a>. </iframe>""")
832
  with gr.Tab("Rep - Gradio"):
833
  gr.Interface(fn=group_words, inputs=groupinput_text, outputs=groupoutput_text, description="Word Grouping and Rotation - Group a list of words into sets of 10 and rotate them every 60 seconds.") #.queue()
834
  with gr.Tab("Navigation"):
835
  gr.HTML("Picture Annotation <br> Chorus Focused Word List <br> Merged Subtitles <br> Repetitive Audio (TTS) <br> Word and Sentence Jumbling <br> Unkown: Wordnet <br> Unknown: Wikipeadia <br>")
836
- PracticeExposureInput = gr.Textbox(placeholer="Exposure practice = look up", label="Exposure at the top")
837
  PracticeExposurebtn = gr.Button("Change Default")
838
  PracticeExposurebtn.click(fn=changeexposuretext, inputs=PracticeExposureInput, outputs=PracticeExposure)
 
 
 
 
 
 
839
  with gr.Tab("Vector Database = Memorisation"):
840
- gr.HTML("Open AI - 2500000 character text = <1$ (0.0004 per 1000 tokens), Cohere Multilingual = free for personal use / Commercial use = \n Vector Database query = Better than text search but not for logical relationships")
841
  with gr.Column(scale=3):
842
  with gr.Tab("Beginner - Listen + Read"):
 
843
  with gr.Row():
844
  with gr.Column(scale=1):
845
  gr.HTML("Listening - Songs - Chorus <br> Anticipation of the item to remember is how you learn lyrics that is why songs are easy as if you heard it 10 times already your capacity to anticipate the words is great <br><br> This is where TTS helps as you are ignoring all words except the words just before the actual <br> <b>Tiny Stories dataset is like a graded reader</b> <br>")
@@ -848,11 +1058,24 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
848
  gr.Interface(fn=AutoChorusInvestigator, inputs="text", outputs="text", description="Paste Full Lyrics to try find only chorus lines")
849
  gr.Interface(fn=AutoChorusPerWordScheduler, inputs="text", outputs="text", description="Create order of repitition for tts practice")
850
  with gr.Column(scale=1):
851
- gr.HTML("""Reading - Caption images (SD/Dalle-E) <br> <a href='https://unsplash.com/'> -- Unsplash - free images -- </a> | <a href="https://huggingface.co/spaces/pharma/CLIP-Interrogator"> --Huggingface CLIP-Interrogator Space-- </a> | <a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- Tag2Text is faster than clip -- </a> | <br> <a href='https://huggingface.co/spaces/bkhmsi/Word-To-Image'> -- Transform word to an image -- </a> | <a href='https://huggingface.co/spaces/microsoft/Promptist'> -- Promptist (Microsoft) -- </a> | """)
852
- gr.Interface(fn=add_text_to_image , inputs=["image", "text"], outputs="image", description="Create Annotated images (Can create using stable diffusion and use the prompt) - Describe from one side to the other to make guessing easy")
 
 
 
 
 
 
 
 
 
853
  gr.HTML("Use Shift Enter To put text on new lines if the text doesnt fit <br> if theres an error you have to remove the foreign letters and place roman ones")
 
854
  #with gr.Tab("Transcribe - RASMUS Whisper"):
855
  #gr.Interface.load("spaces/RASMUS/Whisper-youtube-crosslingual-subtitles", title="Subtitles")
 
 
 
856
  with gr.Tab("Advanced - LingQ Addon Ideas"):
857
  gr.HTML("<a href='https://www.lingq.com/en/'>Find LingQ Here --> https://www.lingq.com/en/</a>")
858
  with gr.Tab("Visual - Multiline Custom Video Subtitles"):
@@ -890,13 +1113,16 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
890
  with gr.Tab("Split video to segments"):
891
  gr.HTML("<a href='https://www.vlchelp.com/automated-screenshots-interval/'>How to make screenshot in vlc - https://www.vlchelp.com/automated-screenshots-interval/</a><br>")
892
  gr.Interface(VideotoSegment, inputs=[spvvideo_file_input, spvsubtitle_file_input], outputs=spvdownload_output)
 
893
  gr.Text("Text to Closed Class + Adjectives + Punctuation or Noun Verb + Punctuation ")
894
  with gr.Tab("Audio - Only English thoughts as practice"):
895
  gr.HTML("For Audio Most productive is real time recall of native (where your full reasoning ability will always be) <br><hr> Find Replace new lines of the foreign text with full stops or | to get per word translation")
896
  gr.Interface(fn=TTSforListeningPractice, inputs=["text", TTSLangOptions2], outputs="audio", description="Paste only english words in foreign order and then keep removing the words from this to practice as effectively")
897
- with gr.Tab("Transition is the end goal"):
 
898
  with gr.Row():
899
  with gr.Column():
 
900
  gr.Textbox("A word is a list of letter as a fact is a list of words. Both are in a specific order. What is most important is practice the order so randomiser is the tool", lines=4)
901
  gr.Interface(fn=RandomiseTextbyType, inputs=["text", RandomiseTextType], outputs="text", description="Randomise order within words, sentences, paragrahs")
902
  with gr.Column():
@@ -910,20 +1136,25 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
910
  gr.HTML("<p> Spell multiple words simultaneously for simultaneous access </p> <p> Spelling Simplification - Use a dual language list? | Spelling is the end goal, you already know many letter orders called words so you need leverage them to remember random sequences")
911
  gr.Interface(fn=create_dictionary, inputs="text", outputs="text", title="Sort Text by first two letters")
912
  gr.Interface(fn=keep_nouns_verbs, inputs=["text"], outputs="text", description="Noun and Verbs only (Plus punctuation)")
913
- gr.Interface(fn=FrontRevSentChunk, inputs=[ChunkModeDrop, "checkbox", "text", langdest], outputs="text", description="Chunks creator")
914
  with gr.Tab("Unknown Tracker"):
915
  gr.HTML("Repitition of things you know is a waste of time when theres stuff you dont know <p> In Language the goal is bigger vocab --> Knowledge equivalent = question answer pairs but to get to those you need related information pairs</p> <p> Vocab = Glossary + all non text wall(lists, diagrams, etc.)</p>")
916
  gr.Textbox("Placeholder for a function that creates a set list and can takes a list for known words and auto find replaces the stuff you know out of the content")
917
  gr.Textbox("Place holder for a translate to english interface so that highlighting can still work as only english supported for now")
918
- gr.Interface(fn=UnknownTrackTexttoApp, inputs="text", outputs=["html", "text", "file"], description="Use the text from here to create lists you use for the TTS section")
919
  with gr.Tab("Unique word ID - use in Infranodus"):
 
 
 
 
920
  gr.Interface(fn=unique_word_count, inputs="text", outputs="text", description="Wordcounter")
921
  gr.Interface(fn=SepHypandSynExpansion, inputs="text", outputs=["text", "text"], description="Word suggestions - Analyse the unique words in infranodus")
922
  gr.Interface(fn=WikiSearch, inputs="text", outputs="text", description="One word at a time Unique word suggestions (wiki articles)")
923
  with gr.Tab("Automating related information linking"):
924
- gr.HTML("Questions - Tacking and suggesting questions to ask = new education")
925
- with gr.Tab("Thinking Practice"):
926
- with gr.Tab("Sentence to Format"):
 
 
927
  gr.Interface(fn=split_verbs_nouns , inputs="text", outputs=["text", "text", "text"], description="Comprehension reading and Sentence Format Creator")
928
  with gr.Tab("Knowledge Ideas - Notetaking"):
929
  gr.HTML("""<p>Good knowledge = ability to answer questions --> find Questions you cant answer and look for hidden answer within them </p>
@@ -932,18 +1163,25 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
932
  <p>Application of Knowledge = App Version of the text (eg. Jupyter Notebooks) is what you create and learn first</p>
933
  """)
934
  gr.Interface(fn=TextCompFormat, inputs=["textarea", HTMLCompMode], outputs="text", description="Convert Text to HTML Dropdown or Links which you paste in any html file")
935
- gr.Interface(fn=create_collapsiblebutton, inputs=["textbox", "textbox", "textarea"], outputs="textarea", description="Button and Div HTML Generator, Generate the HTML for a button and the corresponding div element.")
936
  with gr.Tab("Automated Reading Assitant"):
 
937
  gr.Textbox('Parts of Speech based | Automating the Notetaking Tab either directly or using visual llm to use this interface efficiently')
938
  gr.HTML("Types of comprehension agent <hr> Speed of Comprehension = Verb comprehension <br> From the following please extract the verbs <br> now explain each in context <br> Next, use picture descriptions for each word in the verb list <br> Create combinations using the verb list <hr> ")
 
939
  gr.HTML("Tree and Branches approach to learning = familiarity with keywords/headings/summaries before reading the whole text <hr> Productivity/Work revolves around repitition which can be found looking for plurals and grouping terms eg. Headings and Hyper/Hyponyms Analysis")
940
- with gr.Tab("AR"):
 
 
 
 
 
941
  gr.Textbox("Alpha Test version = Real time Lablling of All things in view using SAM and Clip Interrogator and OpenCV on pydroid --> Adjusted Demo")
942
  gr.HTML("Some Prompt ideas --> Prompt: Describe the place where these descriptions may be (You job is to be speculative for brainstorming purposes): A dog and a boy, the area is texas, the weather is sunny, the date is 01 May 2021 <hr> Prompt Content Ideas Ideas Clip Interrogator + Location Data aka tags for place, location and time + general news updates on the location + overview of the items in the location <br> Location based advise is most important but after that is information observed by appliances in the location eg. Times Computer turned on, times geyser inspected, amount of time keys havent been touched etc. <br> each location will have an ai personality that will relay more information ")
943
- gr.HTML("<a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a>")
944
  gr.Interface(fn=arrealtimetestidea, inputs='image', outputs="text", description="Vision Assistant - see and execute")
945
  gr.Textbox("Placeholder for webcam stream")
946
- #gr.Interface(fn=arrealtimetestidea, inputs='webcam', outputs="text", description="Vision Assistant aka Free Observation llm judgement (GPT Vision API goes here when released). FPS is the difference between realtime app and static image")
947
  with gr.Tab("Random Ideas"):
948
  gr.HTML("""<p>Spaces Test - Still Undercontruction --> Next Milestone is Turning this interface handsfree | Knowledge is a Language but productive knowledge is find replace as well | LingQ is good option for per word state management</p> <p> Arrows app json creator for easy knowledge graphing and spacy POS graph? --> Questions? -->
949
  <p> ChatGPT Turns Learning into a read only what you dont know ask only what you dont know feedback loop --> All you have to do is keep track of what prompts you have asked in the past</p> """)
@@ -951,6 +1189,11 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
951
  gr.HTML("Predictable to identify the parts of picture being described --> The description moves in one direction from one side of the image to the other side is easiest <hr>")
952
  gr.HTML("Image = instant comprehension like Stable Diffusion --> Audiovisual experience is the most optimal reading experience <br> Manga with summary descriptions for the chapters = Most aligned visual to audio experience")
953
  with gr.Tab("LLM Prompts and games"):
954
- gr.HTML(LLPromptIdeas)
 
 
 
 
 
955
 
956
  lliface.queue().launch() #(inbrowser="true")
 
18
  from docx import Document
19
  import textwrap
20
  import pandas as pd
21
+ import pykakasi
22
+ import hangul_romanize
23
+ import pinyin
24
+ from langdetect import detect
25
 
26
  #Uncomment these for Huggingface
27
  nltk.download('maxent_ne_chunker') #Chunker
 
43
  spacy.cli.download('ko_core_news_sm')
44
  spacy.cli.download('ja_core_news_sm')
45
  spacy.cli.download('zh_core_web_sm')
46
+ spacy.cli.download("es_core_news_sm")
47
+ spacy.cli.download("de_core_news_sm")
48
+
49
+ nlp_en = spacy.load("en_core_web_sm")
50
+ nlp_de = spacy.load("de_core_news_sm")
51
+ nlp_es = spacy.load("es_core_news_sm")
52
+ nlp_ko = spacy.load("ko_core_news_sm")
53
+ nlp_ja = spacy.load("ja_core_news_sm")
54
+ nlp_zh = spacy.load("zh_core_web_sm")
55
 
56
  nlp = spacy.load('en_core_web_sm')
57
  translator = Translator()
 
286
  doc = nlp(text)
287
 
288
  verbs_nouns = []
289
+ verbs_nouns_str = ""
290
  other_words = []
291
+ other_words_str = ""
292
  pos_string = []
293
 
294
  for token in doc:
295
  if token.pos_ in ["VERB", "NOUN"]:
296
+ verbs_nouns_str += token.text + " || "
297
+ other_words_str += "__ "
298
+ #verbs_nouns.append(token.text)
299
+ #elif token.text in [punct.text for punct in doc if punct.is_punct]:
300
+ # verbs_nouns.append(token.text)
301
+ # other_words.append(token.text)
302
  else:
303
+ other_words_str += token.text + " || "
304
+ #other_words.append(token.text)
305
+ #pos_string.append(token.pos_)
306
 
307
+ verbs_nouns_text = verbs_nouns_str #" ".join(verbs_nouns)
308
+ other_words_text = other_words_str #" ".join(other_words)
309
+ pos_string_text = "Debug Test" #" ".join(pos_string)
310
 
311
+ return other_words_text, pos_string_text, verbs_nouns_text
312
 
313
  SRTLangOptions = gr.Dropdown(choices=["en", "ja", "ko", "zh-cn"], value="en", label="choose the language of the srt")
314
 
 
458
  html = ""
459
  for line in lines:
460
  if line.startswith("http"):
461
+ html += f"<a href='{line}'> -- -- </a> | "
462
  else:
463
  html += line + "Not a link <br> \n"
464
  return html
 
655
  with open(HTMLDownloadTemp, 'w') as f:
656
  f.write(FinalOutput)
657
 
658
+ return HTMLDownloadTemp, FinalOutput, FinalOutput
659
 
660
  #Kathryn Lingel - Pyambic Pentameter Example - PyCon US
661
  #Basic Language Model Code
 
794
 
795
  #------------------------------------------------------------------------------------------------------------------------------
796
 
797
+ def ImageTranslationTest(text, video):
798
+
799
+ Finalvideo = video
800
+ return Finalvideo
801
+
802
+ def AutoSyllablePractice(String):
803
+ FinalOutput = ""
804
+
805
+ stringlen = len(String)
806
+
807
+ vowels =["a", "e", "i", "o", "y"]
808
+ VowelSyllables = []
809
+ allvowels = ""
810
+
811
+ for i in vowels:
812
+ if i in String:
813
+ allvowels = allvowels + " " + String.replace(i, i + " ")
814
+ allvowels = allvowels + " " + String.replace(i, " " + i)
815
+ VowelSyllables = allvowels.split(" ")
816
+
817
+ VowelSyllablesstr = ""
818
+
819
+ for item in VowelSyllables:
820
+ VowelSyllablesstr += item + ", "
821
+
822
+ FinalOutput += VowelSyllablesstr
823
+ return FinalOutput
824
+
825
+ def GuidedReading(textspreprocess,seperator):
826
+ FinalOutput = ""
827
+
828
+ if seperator == "Sentences":
829
+ textspreprocess = textspreprocess.split(".")
830
+ FinalOutput = ""
831
+ elif seperator == "lines":
832
+ textspreprocess = textspreprocess.splitlines()
833
+ else: textspreprocess = textspreprocess.split(seperator)
834
+
835
+ # Load language-specific models
836
+ nlp_en = spacy.load("en_core_web_sm")
837
+ nlp_de = spacy.load("de_core_news_sm")
838
+ nlp_es = spacy.load("es_core_news_sm")
839
+ nlp_ko = spacy.load("ko_core_news_sm")
840
+ nlp_ja = spacy.load("ja_core_news_sm")
841
+ nlp_zh = spacy.load("zh_core_web_sm")
842
+
843
+ # Create a dictionary of language codes and models
844
+ nlp_dict = {"en": nlp_en, "de": nlp_de, "es": nlp_es, "ko": nlp_ko, "ja": nlp_ja, "zh-cn": nlp_zh}
845
+
846
+ # Define a function to POS tag and transliterate a text given its language code
847
+ def pos_tag_and_transliterate(text, lang):
848
+ # Get the model for the language
849
+ nlp = nlp_dict.get(lang)
850
+ if nlp is None:
851
+ return None # No model found for the language
852
+ # Process the text and get a list of (token, tag) tuples
853
+ doc = nlp(text)
854
+ original_pos_tags = [(token.text, token.pos_) for token in doc]
855
+ # Use different libraries for different languages
856
+ if lang == "ja":
857
+ # Use pykakasi for Japanese
858
+ from pykakasi import kakasi
859
+ # Set the modes using properties
860
+ k = kakasi()
861
+ k.hira2a = True # Hiragana to ascii
862
+ k.kata2a = True # Katakana to ascii
863
+ k.kanji2a = True # Kanji to ascii
864
+ k.roman = "Hepburn" # Use Hepburn romanization
865
+ #words = re.findall(r"\S+|\s+", text)
866
+ words = [token.text for token in doc]
867
+ # Create a dictionary that maps each original word to its transliterated form with spaces
868
+ translit_dict = {word: k.convert(word)[0]['hepburn'] for word in words}
869
+ # Get the transliterated text with spaces
870
+ transliterated = " ".join(translit_dict.values())
871
+ # Replace the words in the original POS tag list with their transliterated forms
872
+ translit_pos_tags = [(translit_dict.get(word, word), tag) for word, tag in original_pos_tags]
873
+ # Get the transliterated language code
874
+ lang_translit = lang + "-translit"
875
+ elif lang == "ko":
876
+ # Use hangul-romanize for Korean
877
+ from hangul_romanize import Transliter
878
+ from hangul_romanize.rule import academic
879
+ transliter = Transliter(academic)
880
+ # Create a dictionary that maps each original word to its transliterated form with spaces
881
+ words = [token.text for token in doc]
882
+ translit_dict = {word: " ".join(transliter.translit(word)) for word in words}
883
+ # Get the transliterated text with spaces
884
+ transliterated = " ".join(translit_dict.values())
885
+ # Replace the words in the original POS tag list with their transliterated forms
886
+ translit_pos_tags = [(translit_dict.get(word, word), tag) for word, tag in original_pos_tags]
887
+ # Get the transliterated language code
888
+ lang_translit = lang + "-translit"
889
+ elif lang == "zh-cn":
890
+ # Use pinyin for Chinese
891
+ from pinyin import get
892
+ # Get the transliterated text without spaces
893
+ transliterated = get(text)
894
+ # Replace the words in the original POS tag list with their transliterated forms
895
+ translit_pos_tags = [(get(word), tag) for word, tag in original_pos_tags]
896
+ # Get the transliterated language code
897
+ lang_translit = lang + "-translit"
898
+ else:
899
+ # No transliteration needed for other languages
900
+ return (text, original_pos_tags, text, original_pos_tags, lang)
901
+ # Return a tuple of the original text, the original POS tags, the transliterated text, the transliterated POS tags, and the transliterated language code
902
+ return (text, original_pos_tags, transliterated, translit_pos_tags, lang_translit)
903
+
904
+ # Create an empty list to store the results
905
+ texts = []
906
+
907
+ # Loop through each text in the list
908
+ for text in textspreprocess:
909
+ # Detect the language of the text
910
+ lang = detect(text)
911
+ # Add the text and the language as a tuple to the results list
912
+ texts.append((text, lang))
913
+
914
+ # Process each text in the texts list and print the results
915
+ for text, lang in texts:
916
+ result = pos_tag_and_transliterate(text, lang)
917
+ if result is not None:
918
+ FinalOutput += f"\nLanguage: {lang}"
919
+ FinalOutput += f"\nText: {result[0]}"
920
+ if lang in ["ja", "ko", "zh-cn"]:
921
+ FinalOutput += f"\nTransliterated Text: {result[2]}"
922
+ FinalOutput += f"\n POS tags: {result[1]}"
923
+ if lang in ["ja", "ko", "zh-cn"]:
924
+ FinalOutput += f"\nTPOS tags: {result[3]}"
925
+ FinalOutput += f"\n"
926
+
927
+ return FinalOutput
928
+
929
+ guidedreadingseperator = gr.Dropdown(choices=[".", ",", "lines", "Sentences"], value=".", interactive=True)
930
+ textspreprocess = [
931
+ ["Bing created this example \nApple is looking at buying U.K. startup for $1 billion \nApple schaut sich für 1 Milliarde Dollar ein Startup aus Großbritannien an \nApple está mirando comprar una startup del Reino Unido por mil millones de dólares \n애플은 영국의 스타트업을 10억 달러에 사려고 한다 \nアップルは、英国のスタートアップを10億ドルで買収する予定だ \n苹果正考虑以10亿美元收购英国初创公司", "\n"]
932
+ ]
933
 
934
  LLPromptIdeas = """
935
 
936
+ <p> Lets try this I will give you the english word and you find the <<chinese>> word and then mix the meaning of the word with the mnemonic <p/>
937
+
938
+ Bings Explanation
939
+ Youre welcome. Im happy to help. If I wanted to explain the rules of this game clearly and concisely, I would do something like this:
940
+
941
+ The game is about finding the <<Chinese>> word for an English word and making a mnemonic that mixes the meaning of the word with the mnemonic.
942
+ The game has two players: one who gives the English word and one who finds the <<Chinese>> word and makes the mnemonic.
943
+ The player who gives the English word can choose any word they want, as long as it has a <<Chinese>> equivalent.
944
+ The player who finds the <<Chinese>> word and makes the mnemonic can use any online or offline resources they want, such as dictionaries, translators, or search engines.
945
+ The player who finds the <<Chinese>> word and makes the mnemonic should write their answer in the format: The <<Chinese>> word for X is Y <<(pinyin)>>, which means Z. Here are some possible mnemonics that mix the meaning of the word with the mnemonic: A, B, C, D. </p>
946
+
947
+ <hr>
948
+
949
  <p> Make jokes while following rules for a syllogism jokes game:
950
 
951
  The game can be played with any number of people.
 
956
 
957
  <hr>
958
 
959
+ Do you know pydot?
960
+ Please create code for a class diagragm using the pydot library in python for the following topic/entity
961
 
962
+ <hr>
963
+
964
+ (System/First request) Your job is to lengthen Text sent to you in a meaningful way. You must create 20 paragraphs for each Text line sent by the user
965
+ (User) Text: I went to the beach
966
+
967
+
968
+ <hr>
969
+
970
+ replace as many words with emojis in the sentence Life is very sweet <br>
971
+ next sentence is AI Town is a virtual town where AI characters live, chat and socialize.
972
+
973
+ <hr>
974
+
975
+ <p> </p>
976
 
 
 
 
 
 
977
  """
978
 
979
+ def display_website(link):
980
+ html = f"<iframe src='{link}' width='100%' height='1000px'></iframe>"
981
+ gr.Info("If 404 then the space/page has probably been disabled - normally due to a better alternative")
982
+ return html
983
+
984
+
985
+ randomExposuremessageText = ["Great Test for LLM function calling (with Gradio Client)", "Unknown Tracker Tab = Incomplete Reading Assistant Idea - HTML app based on text to be read", "Bing mnemonic - lost = dont ignore unusual sounds here inside lost cave", "1000 verbs in lists of 100, verbs = easy setence structure estimation (SVO, SOV, etc.)", "Can put any message here in the navigatoin tab"]
986
+
987
+ def randommarquee():
988
+ randomExposuremessagelistitem = ""
989
+ randomExposuremessagelistitem = str(random.sample(randomExposuremessageText, 1)).replace("['", "").replace("']", "")
990
+ #randomExposuremessagelistitem2 = str(random.sample(randomExposuremessageText, 1)).replace("['", "").replace("']", "")
991
+ return f" <marquee> { randomExposuremessagelistitem } </marquee> "
992
+
993
 
994
  # Define the Gradio interface inputs and outputs for video split
995
  spvvideo_file_input = gr.File(label='Video File')
 
1003
 
1004
  Translationchuncksize = gr.Number(value=4998)
1005
 
1006
+ randomExposuremessage = randommarquee()
1007
+ randomExposuremessage2 = randommarquee()
 
1008
 
1009
+ VideoTestInput = gr.File(label="select a mp4 video file", file_types=[".mp4"])
1010
 
1011
  with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', secondary_hue='red', neutral_hue='blue', )
1012
+ gr.HTML('<div style="display: flex; justify-content: center; align-items: center; height: 100%;"> ---- Under Construction: Slowly figuring out what AI intergrated interface means (Chat vs Forms vs Function calling vs Sensor + Trigger) ----</div>')
1013
+ with gr.Row():
1014
+ PracticeExposure = gr.HTML(randomExposuremessage)
1015
+ PracticeExposure2 = gr.HTML(randomExposuremessage2)
1016
  with gr.Row():
1017
  with gr.Column(scale=1):
1018
+ gr.HTML("Advanced Repitition = Combinatorics --> to understand a sentence properly you need understanding of every word --> in language that means use with other words --> Combos within the unique words in a sentence, paragraph, page, etc. --> as close to 3 word sentences")
1019
+ with gr.Column(scale=1):
1020
+ gr.HTML("<p>Timing Practice - Repitition: Run from it, Dread it, Repitition is inevitable - Thanos --> Repitition of reaction - Foreign in eyes/ears native in mind (For beginners) | Repitition is a multitask activity like driving must be subconcious process to show mastery </p>")
1021
+ gr.HTML(""" <a href='https://github.com/eugeneyan/open-llms'> -- Opensource List -- </a> | <a href='https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard'> -- Open LLM Leaderboard -- </a> | <a href='https://huggingface.co/spaces/sanchit-gandhi/whisper-jax'> -- Whisper JAX -- </a> | <a href="https://translate.google.com/?hl=en&tab=TT"> -- Google Translate -- </a> | <a href='https://huggingface.co/spaces/damo-vilab/modelscope-text-to-video-synthesis'> -- Modelscope Text to Video -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion'> -- stable-diffusion 2 -- </a> | <a href='https://huggingface.co/spaces/stabilityai/stable-diffusion-1'> -- stable-diffusion 1 -- </a> | <a href='https://huggingface.co/spaces/kakaobrain/karlo'> -- karlo 1 -- </a> | <a href='https://huggingface.co/spaces/suno/bark'> -- Bark (TTS) -- </a> | <a href='https://chat.lmsys.org/'> -- Offline Text Model Demos -- </a> | <a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a> | <a href='https://beta.elevenlabs.io/'> -- Eleven Labs -- </a> | <a href='https://www.d-id.com/'> -- Animate an Image -- </a> | <a href='https://voice.ai/'> -- Clone a voice -- </a> | <a href='https://openai.com/pricing'> -- OpenAI pricing -- </a> | <a href='https://huggingface.co/spaces/sohojoe/soho-clip-embeddings-explorer'> -- Image Training Data Search -- </a> | <a href='https://huggingface.co/spaces/huggingchat/chat-ui'> -- Huggingface Chat -- </a> | <a href='https://huggingface.co/spaces/bguisard/stable-diffusion-nano'> -- 128x128 Stable Diffusion (Fast) -- </a> | <a href='https://huggingface.co/spaces/colonelwatch/abstracts-index'> -- Search 95 million research abstracts -- </a> | <a href='https://huggingface.co/datasets/roneneldan/TinyStories'> -- Tiny Stories Dataset -- </a> | <a href='https://huggingface.co/spaces/lykeven/visualglm-6b'> -- Visualglm6b - Discuss images -- </a> | <a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- RAM and Tag2Text -- </a> | <a href='https://huggingface.co/camenduru/potat1'> -- Potat1 Text2vid -- </a> | <a href='https://twitter.com/willdepue/status/1661781355452325889'> -- Alexandria Prohect (Will Deque) - Free Embeddings -- </a> | <a href='https://artsandculture.google.com/'> -- Google Arts and Culture Portal -- </a> | <a href='https://huggingface.co/spaces/Matthijs/whisper_word_timestamps'> -- Word Level Timestamps -- </a> | <a href='https://huggingface.co/spaces/zaanind/NLLB-translation'> -- NLLB 600M Demo -- </a> | <a href='https://huggingface.co/spaces/hysts/zeroscope-v2'> -- Zeroscope v2 Text to video -- </a> | <a href='https://huggingface.co/spaces/SpacesExamples/ComfyUI'> -- ComfyUI Text to Image -- </a> | <a href='https://huggingface.co/spaces/DeepFloyd/IF'> -- Deepfloyd IF - Text in image -- </a> | <a href='https://huggingface.co/spaces/ysharma/ChatGPT-Plugins-in-Gradio'> -- ChatGPT Custom Plugins Test Space -- </a> | <a href='https://www.reddit.com/r/LocalLLaMA/'> -- r/LocalLlama -- </a> | <a href='https://www.reddit.com/r/singularity/'> -- r/Singularity -- </a> | <a href='https://huggingface.co/spaces/hysts/SD-XL'> -- SD-XL Test Space -- </a> | <a href='https://huggingface.co/spaces/facebook/seamless_m4t'> -- Seamless M4T - Translation one stop shop -- </a> | <a href='https://huggingface.co/spaces/codellama/codellama-playground'> -- Code Llama playground -- </a> | <a href='https://huggingface.co/spaces/Voicemod/Text-to-Sing'> -- Text to sing -- </a> | <a href='https://huggingface.co/spaces/camenduru-com/webui'> -- Stable Diffusion Webui (Camenduru Space) -- </a> | <a href='https://huggingface.co/spaces/ysharma/WizardCoder34b'> -- Wizard Coder 34B -- </a> | <a href='https://huggingface.co/spaces/chansung/co-write-with-llama2'> -- Cowrite with llama2 -- </a> | <a href='https://huggingface.co/spaces/fffiloni/Image-to-Story'> -- Image to Story -- </a> | <a href='https://huggingface.co/spaces/fffiloni/CLIP-Interrogator-2'> -- Clip interrogator 2 -- </a> | <a href='https://github.com/THUDM/AgentBench'> -- Agent Benchmarks -- </a> | <a href='https://www.convex.dev/ai-town'> -- AI Town Live Demo -- </a> = <a href='https://github.com/a16z-infra/ai-town'> -- AI Town Repository (Deployment]) -- </a> | <a href='https://github.com/joonspk-research/generative_agents/tree/main'> -- Generative Agents: Interactive Simulacra of Human Behavior (Research paper Repository) -- </a> | """)
1022
+ with gr.Accordion("LLM HF Spaces (Click Here to Open) - Ask for Translation of image tags made below, sentence to emojis, Wordlists, Test Conversations, Get Grammar Explanations etc., Can use GPT-4 or new SOTA to review the conversation", open=False):
1023
+ with gr.Row():
1024
+ linktochat = gr.Dropdown(choices=["https://sdk.vercel.ai/docs", "https://lmsys-chatbot-arena.hf.space", "https://huggingface-projects-llama-2-7b-chat.hf.space", "https://ysharma-explore-llamav2-with-tgi.hf.space", "https://mosaicml-mpt-30b-chat.hf.space", "https://huggingfaceh4-falcon-chat.hf.space", "https://uwnlp-guanaco-playground-tgi.hf.space", "https://stabilityai-stablelm-tuned-alpha-chat.hf.space", "https://mosaicml-mpt-7b-storywriter.hf.space", "https://huggingfaceh4-starchat-playground.hf.space", "https://bigcode-bigcode-playground.hf.space", "https://mosaicml-mpt-7b-chat.hf.space", "https://huggingchat-chat-ui.hf.space", "https://togethercomputer-openchatkit.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
1025
+ chatspacebtn = gr.Button("Use the chosen URL to load interface with a chat model. For sdk.vercel click the chat button on the top left")
1026
+ with gr.Row():
1027
+ chatspace = gr.HTML("Chat Space Chosen will load here")
1028
+ chatspacebtn.click(display_website, inputs=linktochat, outputs=chatspace)
1029
+ with gr.Row():
1030
+ with gr.Column(scale=1):
1031
+ with gr.Tab("Rep - HTML"):
1032
+ gr.HTML("UNNWFWO = Unknown Native Word Foreign Word Order i.e. during active listening practice you only need the words you dont know")
1033
+ gr.HTML("""<iframe height="1200" style="width: 100%;" scrolling="no" title="Memorisation Aid" src="https://codepen.io/kwabs22/embed/GRXKQgj?default-tab=result&editable=true" frameborder="no" loading="lazy" allowtransparency="true" allowfullscreen="true">
1034
  See the Pen <a href="https://codepen.io/kwabs22/pen/GRXKQgj"> Memorisation Aid</a> by kwabs22 (<a href="https://codepen.io/kwabs22">@kwabs22</a>) on <a href="https://codepen.io">CodePen</a>. </iframe>""")
1035
  with gr.Tab("Rep - Gradio"):
1036
  gr.Interface(fn=group_words, inputs=groupinput_text, outputs=groupoutput_text, description="Word Grouping and Rotation - Group a list of words into sets of 10 and rotate them every 60 seconds.") #.queue()
1037
  with gr.Tab("Navigation"):
1038
  gr.HTML("Picture Annotation <br> Chorus Focused Word List <br> Merged Subtitles <br> Repetitive Audio (TTS) <br> Word and Sentence Jumbling <br> Unkown: Wordnet <br> Unknown: Wikipeadia <br>")
1039
+ PracticeExposureInput = gr.Textbox("", placeholder="Exposure practice = look up", label="Exposure at the top")
1040
  PracticeExposurebtn = gr.Button("Change Default")
1041
  PracticeExposurebtn.click(fn=changeexposuretext, inputs=PracticeExposureInput, outputs=PracticeExposure)
1042
+ with gr.Tab("Words Lists"):
1043
+ gr.HTML("Stop, Sight(Dolch) and other Wordlists")
1044
+ gr.HTML("Wikipeadia <br> Basic: <a href='https://en.wikipedia.org/wiki/Dolch_word_list'> -- Dolch (Sight) Words -- </a> | <br> Advanced: <a href='https://en.wikipedia.org/wiki/Blend_word'> -- Blend Word -- </a> | <a href='https://en.wikipedia.org/wiki/List_of_portmanteaus'> -- List_of_portmanteaus -- </a> | ")
1045
+ gr.HTML("Reddit <br> <a href='https://www.reddit.com/r/wordplay/'> -- Wordplay -- </a> | ")
1046
+ gr.HTML("Language Tests <br> ")
1047
+ gr.HTML("Other <br> <a href='https://english.stackexchange.com/'> -- English (StackExchange) -- </a> | <a href='https://english.stackexchange.com/questions/572884/what-is-it-called-when-two-words-are-combined-by-overlapping-each-other'> -- Overlapping Blends (StackExchange) -- </a> | ")
1048
  with gr.Tab("Vector Database = Memorisation"):
1049
+ gr.HTML("Phrasebook on demand in realtime <br><br> Open AI - 10000 * 1000tokens (+- 4000 characters) = 1$ (0.0001 per 1000 tokens / 750 words), Cohere Multilingual = free for personal use / Commercial use = \n Vector Database query = Better than text search but not for logical relationships")
1050
  with gr.Column(scale=3):
1051
  with gr.Tab("Beginner - Listen + Read"):
1052
+ gr.Label("1 new word a minute while recycling the words from the previous minutes")
1053
  with gr.Row():
1054
  with gr.Column(scale=1):
1055
  gr.HTML("Listening - Songs - Chorus <br> Anticipation of the item to remember is how you learn lyrics that is why songs are easy as if you heard it 10 times already your capacity to anticipate the words is great <br><br> This is where TTS helps as you are ignoring all words except the words just before the actual <br> <b>Tiny Stories dataset is like a graded reader</b> <br>")
 
1058
  gr.Interface(fn=AutoChorusInvestigator, inputs="text", outputs="text", description="Paste Full Lyrics to try find only chorus lines")
1059
  gr.Interface(fn=AutoChorusPerWordScheduler, inputs="text", outputs="text", description="Create order of repitition for tts practice")
1060
  with gr.Column(scale=1):
1061
+ gr.HTML("""Reading - Caption images (SD/Dalle-E) <br> <a href='https://unsplash.com/'> -- Unsplash - free images -- </a> | <a href="https://huggingface.co/spaces/pharma/CLIP-Interrogator"> --Huggingface CLIP-Interrogator Space-- </a> | <a href='https://huggingface.co/spaces/fffiloni/CLIP-Interrogator-2'> -- Clip interrogator 2 -- </a> | <a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- Tag2Text is faster than clip -- </a> | <br> <a href='https://huggingface.co/spaces/bkhmsi/Word-To-Image'> -- Transform word to an image -- </a> | <a href='https://huggingface.co/spaces/microsoft/Promptist'> -- Promptist (Microsoft) -- </a> | <a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- RAM and Tag2Text -- </a> | <a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a> """)
1062
+ with gr.Accordion("RAM/Tag2Text Space - Create Tags here and Copy paste", open=False):
1063
+ RAMSpaceLink = gr.Textbox("https://xinyu1205-recognize-anything.hf.space")
1064
+ RAMSpacetest = gr.HTML("")
1065
+ RAMSpacetestbtn = gr.Button('Load Space')
1066
+ RAMSpacetestbtn.click(display_website, RAMSpaceLink, RAMSpacetest)
1067
+ with gr.Accordion("SAM Space Test", open=False):
1068
+ SAMSpaceLink = gr.Textbox("https://curt-park-segment-anything-with-clip.hf.space")
1069
+ SAMSpacetest = gr.HTML("")
1070
+ SAMSpacetestbtn = gr.Button('Load Space')
1071
+ SAMSpacetestbtn.click(display_website, SAMSpaceLink, SAMSpacetest)
1072
  gr.HTML("Use Shift Enter To put text on new lines if the text doesnt fit <br> if theres an error you have to remove the foreign letters and place roman ones")
1073
+ gr.Interface(fn=add_text_to_image , inputs=["image", "text"], outputs="image", description="Create Annotated images (Can create using stable diffusion and use the prompt) - Describe from one side to the other to make guessing easy")
1074
  #with gr.Tab("Transcribe - RASMUS Whisper"):
1075
  #gr.Interface.load("spaces/RASMUS/Whisper-youtube-crosslingual-subtitles", title="Subtitles")
1076
+ with gr.Tab("Beginner - Vague Language"):
1077
+ gr.HTML("Some Vague Words - Quantifiers, Pronouns, etc. <br/> <br/> Very, Many, Few, Lots, <hr> Find Replace all nouns with something/someone or and for verbs figure out how to generalise them")
1078
+
1079
  with gr.Tab("Advanced - LingQ Addon Ideas"):
1080
  gr.HTML("<a href='https://www.lingq.com/en/'>Find LingQ Here --> https://www.lingq.com/en/</a>")
1081
  with gr.Tab("Visual - Multiline Custom Video Subtitles"):
 
1113
  with gr.Tab("Split video to segments"):
1114
  gr.HTML("<a href='https://www.vlchelp.com/automated-screenshots-interval/'>How to make screenshot in vlc - https://www.vlchelp.com/automated-screenshots-interval/</a><br>")
1115
  gr.Interface(VideotoSegment, inputs=[spvvideo_file_input, spvsubtitle_file_input], outputs=spvdownload_output)
1116
+ gr.TextArea("Placeholder for ffmpeg command generator and ffmpeg-python code to split video")
1117
  gr.Text("Text to Closed Class + Adjectives + Punctuation or Noun Verb + Punctuation ")
1118
  with gr.Tab("Audio - Only English thoughts as practice"):
1119
  gr.HTML("For Audio Most productive is real time recall of native (where your full reasoning ability will always be) <br><hr> Find Replace new lines of the foreign text with full stops or | to get per word translation")
1120
  gr.Interface(fn=TTSforListeningPractice, inputs=["text", TTSLangOptions2], outputs="audio", description="Paste only english words in foreign order and then keep removing the words from this to practice as effectively")
1121
+ with gr.Tab("Transition is the end goal (SOV, SVO, VSO)"):
1122
+ gr.Interface(fn=FrontRevSentChunk, inputs=[ChunkModeDrop, "checkbox", "text", langdest], outputs="text", description="Chunks creator")
1123
  with gr.Row():
1124
  with gr.Column():
1125
+ gr.Interface(fn=AutoSyllablePractice, inputs="text", outputs="text", description="One Word At A Time | Audio Spelling Practice Using vowels as the seperator")
1126
  gr.Textbox("A word is a list of letter as a fact is a list of words. Both are in a specific order. What is most important is practice the order so randomiser is the tool", lines=4)
1127
  gr.Interface(fn=RandomiseTextbyType, inputs=["text", RandomiseTextType], outputs="text", description="Randomise order within words, sentences, paragrahs")
1128
  with gr.Column():
 
1136
  gr.HTML("<p> Spell multiple words simultaneously for simultaneous access </p> <p> Spelling Simplification - Use a dual language list? | Spelling is the end goal, you already know many letter orders called words so you need leverage them to remember random sequences")
1137
  gr.Interface(fn=create_dictionary, inputs="text", outputs="text", title="Sort Text by first two letters")
1138
  gr.Interface(fn=keep_nouns_verbs, inputs=["text"], outputs="text", description="Noun and Verbs only (Plus punctuation)")
 
1139
  with gr.Tab("Unknown Tracker"):
1140
  gr.HTML("Repitition of things you know is a waste of time when theres stuff you dont know <p> In Language the goal is bigger vocab --> Knowledge equivalent = question answer pairs but to get to those you need related information pairs</p> <p> Vocab = Glossary + all non text wall(lists, diagrams, etc.)</p>")
1141
  gr.Textbox("Placeholder for a function that creates a set list and can takes a list for known words and auto find replaces the stuff you know out of the content")
1142
  gr.Textbox("Place holder for a translate to english interface so that highlighting can still work as only english supported for now")
1143
+ gr.Interface(fn=UnknownTrackTexttoApp, inputs="text", outputs=["file", "html", "text"], description="HTML mini App - UNNWFWO (To track verbs you dont know for listening practice). Use the text from here to create lists you use for the TTS section")
1144
  with gr.Tab("Unique word ID - use in Infranodus"):
1145
+ with gr.Accordion(label="Infranodus", open=False):
1146
+ gr.HTML(" <a href='https://infranodus.com/'> -- Infranodus - Word Level Knowledge graphs -- </a> | <br> Use the below interfaces to find the items that dont have entries --> These will represent new concepts or people which need to be understood individually to fully understand the text --> Infranodus search will help find related and unrelated investigation paths <br><br> TODO Figure Output Zoom / Image Dimensions")
1147
+ gr.Image(source="upload", label="Open Infranodus Screenshot")
1148
+ gr.Image(source="upload", label="Open Infranodus Screenshot")
1149
  gr.Interface(fn=unique_word_count, inputs="text", outputs="text", description="Wordcounter")
1150
  gr.Interface(fn=SepHypandSynExpansion, inputs="text", outputs=["text", "text"], description="Word suggestions - Analyse the unique words in infranodus")
1151
  gr.Interface(fn=WikiSearch, inputs="text", outputs="text", description="One word at a time Unique word suggestions (wiki articles)")
1152
  with gr.Tab("Automating related information linking"):
1153
+ gr.HTML("Questions - Taking and suggesting questions to ask = new education --> Esp. Infranodus type outer discourse identification as question generation")
1154
+ gr.HTML("The point of reading is to refine future actions especially problem solving --> Creating problem scenarios = thinking ahead of time = One form of effective reading")
1155
+ with gr.Tab("Thinking Practice (POS)"):
1156
+ gr.HTML("By removing all nouns and verbs you get a format to practice thinking about your words to use to make sentences which make sense within constraints")
1157
+ with gr.Tab("Sentence to Practice Format"):
1158
  gr.Interface(fn=split_verbs_nouns , inputs="text", outputs=["text", "text", "text"], description="Comprehension reading and Sentence Format Creator")
1159
  with gr.Tab("Knowledge Ideas - Notetaking"):
1160
  gr.HTML("""<p>Good knowledge = ability to answer questions --> find Questions you cant answer and look for hidden answer within them </p>
 
1163
  <p>Application of Knowledge = App Version of the text (eg. Jupyter Notebooks) is what you create and learn first</p>
1164
  """)
1165
  gr.Interface(fn=TextCompFormat, inputs=["textarea", HTMLCompMode], outputs="text", description="Convert Text to HTML Dropdown or Links which you paste in any html file")
1166
+ gr.Interface(fn=create_collapsiblebutton, inputs=["textbox", "textbox", "textarea"], outputs="textbox", description="Button and Div HTML Generator, Generate the HTML for a button and the corresponding div element.")
1167
  with gr.Tab("Automated Reading Assitant"):
1168
+ gr.Interface(fn=GuidedReading, inputs=["text", guidedreadingseperator], outputs="text", description="POS Tag and Transliteration", examples=textspreprocess)
1169
  gr.Textbox('Parts of Speech based | Automating the Notetaking Tab either directly or using visual llm to use this interface efficiently')
1170
  gr.HTML("Types of comprehension agent <hr> Speed of Comprehension = Verb comprehension <br> From the following please extract the verbs <br> now explain each in context <br> Next, use picture descriptions for each word in the verb list <br> Create combinations using the verb list <hr> ")
1171
+ gr.HTML("How VERBS RELATE TO EACH OTHER --> Shared Nodes - what other verbs are connected to the noun in a INFRANODUS With POS Tag filters")
1172
  gr.HTML("Tree and Branches approach to learning = familiarity with keywords/headings/summaries before reading the whole text <hr> Productivity/Work revolves around repitition which can be found looking for plurals and grouping terms eg. Headings and Hyper/Hyponyms Analysis")
1173
+ gr.HTML("Sentence to PyDot graph")
1174
+ gr.HTML("Currently a bug that locks all buttons in the space when you use this above example - Reload to fix")
1175
+ with gr.Tab("Video/AR"):
1176
+ gr.HTML("HUD Experiment - so far sending the frame directly to a html frame --> that only solves image but the audio will be skipping so how do we solve that")
1177
+ gr.Interface(fn=ImageTranslationTest , inputs=["text", VideoTestInput], outputs="video")
1178
+ gr.HTML("Nicolai Nielsen Youtube channel - aruco markers = position --> can test using premade ones from an image search")
1179
  gr.Textbox("Alpha Test version = Real time Lablling of All things in view using SAM and Clip Interrogator and OpenCV on pydroid --> Adjusted Demo")
1180
  gr.HTML("Some Prompt ideas --> Prompt: Describe the place where these descriptions may be (You job is to be speculative for brainstorming purposes): A dog and a boy, the area is texas, the weather is sunny, the date is 01 May 2021 <hr> Prompt Content Ideas Ideas Clip Interrogator + Location Data aka tags for place, location and time + general news updates on the location + overview of the items in the location <br> Location based advise is most important but after that is information observed by appliances in the location eg. Times Computer turned on, times geyser inspected, amount of time keys havent been touched etc. <br> each location will have an ai personality that will relay more information ")
1181
+ gr.HTML("<a href='https://huggingface.co/spaces/xinyu1205/Recognize_Anything-Tag2Text'> -- RAM and Tag2Text -- </a> | <a href='https://huggingface.co/spaces/curt-park/segment-anything-with-clip'> -- SAM with Clip -- </a>")
1182
  gr.Interface(fn=arrealtimetestidea, inputs='image', outputs="text", description="Vision Assistant - see and execute")
1183
  gr.Textbox("Placeholder for webcam stream")
1184
+ gr.Interface(fn=arrealtimetestidea, inputs="text", outputs="text", description="Vision Assistant aka Free Observation llm judgement (GPT Vision API goes here when released). FPS is the difference between realtime app and static image")
1185
  with gr.Tab("Random Ideas"):
1186
  gr.HTML("""<p>Spaces Test - Still Undercontruction --> Next Milestone is Turning this interface handsfree | Knowledge is a Language but productive knowledge is find replace as well | LingQ is good option for per word state management</p> <p> Arrows app json creator for easy knowledge graphing and spacy POS graph? --> Questions? -->
1187
  <p> ChatGPT Turns Learning into a read only what you dont know ask only what you dont know feedback loop --> All you have to do is keep track of what prompts you have asked in the past</p> """)
 
1189
  gr.HTML("Predictable to identify the parts of picture being described --> The description moves in one direction from one side of the image to the other side is easiest <hr>")
1190
  gr.HTML("Image = instant comprehension like Stable Diffusion --> Audiovisual experience is the most optimal reading experience <br> Manga with summary descriptions for the chapters = Most aligned visual to audio experience")
1191
  with gr.Tab("LLM Prompts and games"):
1192
+ gr.HTML("TODO = Llama-cpp-python with falcon 7b / openllama 7b intergrated into each of the interfaces in this space aka --> interfaces as tools for open source llm <br><br> <b>Test using gradio space/interfaces through the api as function calls for gpt3.5 and 4</b>")
1193
+ with gr.Accordion("Some prompt ideas"):
1194
+ gr.HTML(LLPromptIdeas)
1195
+ with gr.Tab("Gradio Client Tests"):
1196
+ gr.HTML("How to return componets here in gradio (as each client interface needs different inputs) like in react")
1197
+
1198
 
1199
  lliface.queue().launch() #(inbrowser="true")