KwabsHug commited on
Commit
4608ef6
1 Parent(s): 8c1e871

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +168 -39
app.py CHANGED
@@ -9,9 +9,12 @@ import time
9
  import random
10
  import os
11
  import zipfile
12
- import gradio as gr
13
  import ffmpeg
14
-
 
 
 
 
15
 
16
  nltk.download('maxent_ne_chunker') #Chunker
17
  nltk.download('stopwords') #Stop Words List (Mainly Roman Languages)
@@ -29,7 +32,7 @@ nltk.download('opinion_lexicon') #Sentiment words
29
  nltk.download('averaged_perceptron_tagger') #Parts of Speech Tagging
30
 
31
 
32
- spacy.cli.download("en_core_web_sm")
33
 
34
  nlp = spacy.load('en_core_web_sm')
35
  translator = Translator()
@@ -183,7 +186,47 @@ def merge_lines(roman_file, w4w_file, full_mean_file, macaronic_file):
183
  return "\n".join(merged_lines)
184
 
185
  def TTSforListeningPractice(text):
186
- return "not finished"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
 
188
  def group_words(inlist):
189
  inlisttoks = inlist.split(" ")
@@ -221,7 +264,8 @@ def split_verbs_nouns(text):
221
 
222
  verbs_nouns = []
223
  other_words = []
224
-
 
225
  for token in doc:
226
  if token.pos_ in ["VERB", "NOUN"]:
227
  verbs_nouns.append(token.text)
@@ -230,11 +274,13 @@ def split_verbs_nouns(text):
230
  other_words.append(token.text)
231
  else:
232
  other_words.append(token.text)
233
-
 
234
  verbs_nouns_text = " ".join(verbs_nouns)
235
  other_words_text = " ".join(other_words)
 
236
 
237
- return verbs_nouns_text, other_words_text
238
 
239
  def split_srt_file(text): #file_path):
240
  # Open the SRT file and read its contents
@@ -327,6 +373,73 @@ def VideotoSegment(video_file, subtitle_file):
327
  # Return the ZIP archive for download
328
  return 'segmented_files.zip'
329
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330
 
331
  # Define the Gradio interface inputs and outputs for video split
332
  spvvideo_file_input = gr.File(label='Video File')
@@ -338,6 +451,7 @@ groupinput_text = gr.inputs.Textbox(lines=2, label="Enter a list of words")
338
  groupoutput_text = gr.outputs.Textbox(label="Grouped words")
339
 
340
  with gr.Blocks() as lliface:
 
341
  with gr.Tab("Welcome"):
342
  gr.HTML("""<h1> Spaces Test - Still Undercontruction </h1> <p> You only learn when you convert things you dont know to known --> Normally Repetition is the only reliable method for everybody </p>
343
  <p> Knowledge is a Language but productive knowledge is find replace as well </p> <p>LingQ is good option for per word state management</p> <p> Arrows app json creator for easy knowledge graphing and spacy POS graph? </p>
@@ -347,49 +461,48 @@ with gr.Blocks() as lliface:
347
  <p> ChatGPT Turns Learning into a read only what you dont know ask only what you dont know feedback loop --> All you have to do is keep track of what prompts you have asked in the past</p>
348
  <p> Spell multiple words simultaneously for simultaneous access </p>
349
  """)
350
- with gr.Tab("Unique word ID"):
351
  gr.Interface(fn=unique_word_count, inputs="text", outputs="text", title="Wordcounter")
352
- gr.Interface(fn=SepHypandSynExpansion, inputs="text", outputs=["text", "text"], title="Word suggestions")
353
  gr.Interface(fn=WikiSearch, inputs="text", outputs="text", title="Unique word suggestions(wiki articles)")
354
  with gr.Tab("Automating related information linking"):
355
  gr.HTML("Questions - Tacking and suggesting questions to ask = new education")
356
- with gr.Tab("Spelling and Chunks"):
357
- gr.Text("Merged Spelling Practice Placeholder - Spell multiple words simultaneously for simultaneous access")
358
- gr.HTML("<p> Spelling is the end goal, you already know many letter orders called words so you need leverage them to remember random sequences")
359
- with gr.Tab("Spelling Simplification - Use a dual language list"):
360
- gr.Interface(fn=create_dictionary, inputs="text", outputs="text", title="Sort Text by first two letters")
361
- with gr.Tab("Chunks"):
362
- gr.Interface(fn=FrontRevSentChunk, inputs=[ChunkModeDrop, "checkbox", "text", langdest], outputs="text")
363
- gr.Interface(fn=keep_nouns_verbs, inputs=["text"], outputs="text", title="Noun and Verbs only (Plus punctuation)")
364
  with gr.Tab("Timing Practice - Repitition"):
365
  gr.HTML("<p>Run from it, Dread it, Repitition is inevitable - Thanos</p> <p>Next Milestone is Turning this interface handsfree</p>")
366
  with gr.Tab("Gradio Version"):
367
- gr.Interface(fn=group_words, inputs=groupinput_text, outputs=groupoutput_text, title="Word Grouping and Rotation", description="Group a list of words into sets of 10 and rotate them every 60 seconds.").queue()
368
  with gr.Tab("HTML Version"):
369
  gr.HTML("""<iframe height="1200" style="width: 100%;" scrolling="no" title="Memorisation Aid" src="https://codepen.io/kwabs22/embed/preview/GRXKQgj?default-tab=result&editable=true" frameborder="no" loading="lazy" allowtransparency="true" allowfullscreen="true">
370
  See the Pen <a href="https://codepen.io/kwabs22/pen/GRXKQgj">
371
  Memorisation Aid</a> by kwabs22 (<a href="https://codepen.io/kwabs22">@kwabs22</a>)
372
  on <a href="https://codepen.io">CodePen</a>.
373
  </iframe>""")
374
- with gr.Tab("Knowledge Ideas"):
375
- gr.HTML("""<p>Good knowledge = ability to answer questions --> find Questions you cant answer and look for hidden answer within them </p>
376
- <p>My One Word Theory = We only use more words than needed when we have to or are bored --> Headings exist because title is not sufficient, subheadings exist because headings are not sufficient, Book Text exists because subheadings are not sufficient</p>
377
- <p>Big Picture = Expand the Heading and the subheadings and compare them to each other</p>
378
- <p>Application of Knowledge = App Version of the text (eg. Jupyter Notebooks) is what you create and learn first</p>
379
- """)
380
- with gr.Tab("Beginner - Songs - Chorus"):
381
- gr.HTML("Essentially if the sounds are repeated or long notes they are easy to remember")
382
- gr.Interface(fn=TTSforListeningPractice, inputs="text", outputs="text", title="Placeholder - paste chorus here and use TTS or make notes to save here")
383
- with gr.Tab("Transcribe - RASMUS Whisper"):
384
- gr.HTML("""<p>If this tab doesnt work use the link below ⬇️</p> <a href="https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles">https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles</a>""")
385
- gr.Interface.load("spaces/RASMUS/Whisper-youtube-crosslingual-subtitles", title="Subtitles")
 
 
 
 
386
  with gr.Tab("Advanced - LingQ Addons ideas"):
387
- gr.HTML("Extra functions needed - Persitent Sentence translation, UNWFWO, POS tagging and Word Count per user of words in their account. Macaronic Text is also another way to practice only the important information")
 
 
388
  with gr.Tab("Merged Subtitles"):
389
- gr.HTML("SRT Contents to W4W Split SRT for Google Translate")
390
- gr.Interface(fn=split_srt_file, inputs="text", outputs="text")
391
- gr.HTML("Text for w4w creation in G Translate")
392
- gr.Interface(fn=splittext, inputs="text", outputs="text")
 
393
  with gr.Row():
394
  RomanFile = gr.File(label="Paste Roman")
395
  W4WFile = gr.File(label="Paste Word 4 Word")
@@ -404,9 +517,25 @@ with gr.Blocks() as lliface:
404
  with gr.Tab("Split video to segments"):
405
  gr.Interface(VideotoSegment, inputs=[spvvideo_file_input, spvsubtitle_file_input], outputs=spvdownload_output)
406
  with gr.Tab("Sentence to Format"):
407
- gr.Interface(fn=split_verbs_nouns , inputs="text", outputs=["text", "text"], title="Comprehension reading and Sentence Format Creator")
408
  gr.Text("Text to Closed Class + Adjectives + Punctuation or Noun Verb + Punctuation ")
409
- with gr.Tab("Dictionary from text"):
410
- gr.Interface(fn=create_dictionary, inputs="text", outputs="text", title="Two Letter Dictionary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
411
 
412
- lliface.launch()
 
9
  import random
10
  import os
11
  import zipfile
 
12
  import ffmpeg
13
+ from gtts import gTTS
14
+ #from io import BytesIO
15
+ from collections import Counter
16
+ from PIL import Image, ImageDraw, ImageFont
17
+ import numpy as np
18
 
19
  nltk.download('maxent_ne_chunker') #Chunker
20
  nltk.download('stopwords') #Stop Words List (Mainly Roman Languages)
 
32
  nltk.download('averaged_perceptron_tagger') #Parts of Speech Tagging
33
 
34
 
35
+ #spacy.cli.download("en_core_web_sm")
36
 
37
  nlp = spacy.load('en_core_web_sm')
38
  translator = Translator()
 
186
  return "\n".join(merged_lines)
187
 
188
  def TTSforListeningPractice(text):
189
+ language = "en"
190
+ speech = gTTS(text=text, lang=language, slow="False")
191
+ speech.save("CurrentTTSFile.mp3")
192
+ #file = BytesIO()
193
+ #speech.write_to_fp(file)
194
+ #file.seek(0)
195
+ return "CurrentTTSFile.mp3" #file
196
+
197
+ def AutoChorusInvestigator(sentences):
198
+ sentences = sentences.splitlines()
199
+ # Use Counter to count the number of occurrences of each sentence
200
+ sentence_counts = Counter(sentences)
201
+
202
+ # Identify duplicate sentences
203
+ duplicates = [s for s, count in sentence_counts.items() if count > 1]
204
+
205
+ FinalOutput = ""
206
+ if len(duplicates) == 0:
207
+ FinalOutput += "No duplicate sentences found in the file."
208
+ else:
209
+ FinalOutput += "The following sentences appear more than once in the file:"
210
+ for sentence in duplicates:
211
+ FinalOutput += "\n" + sentence
212
+ return FinalOutput
213
+
214
+ def AutoChorusPerWordScheduler(sentences):
215
+ words = set(sentences.split(" "))
216
+ wordsoneattime =[]
217
+ practicestring = ""
218
+
219
+ FinalOutput = "This is supposed to output the words in repetition format (i.e. schedule for repitition) \nCurrent Idea = 1 new word every min and 1 old word every second" + "\n\nWords: \n"
220
+ for word in words:
221
+ wordsoneattime.append(word)
222
+ for i in range(0, 59):
223
+ practicestring += word + " "
224
+ practicestring += random.choice(wordsoneattime) + " "
225
+ FinalOutput += word + "\n "
226
+ practicestring += "\n"
227
+
228
+ FinalOutput += practicestring
229
+ return FinalOutput
230
 
231
  def group_words(inlist):
232
  inlisttoks = inlist.split(" ")
 
264
 
265
  verbs_nouns = []
266
  other_words = []
267
+ pos_string = []
268
+
269
  for token in doc:
270
  if token.pos_ in ["VERB", "NOUN"]:
271
  verbs_nouns.append(token.text)
 
274
  other_words.append(token.text)
275
  else:
276
  other_words.append(token.text)
277
+ pos_string.append(token.pos_)
278
+
279
  verbs_nouns_text = " ".join(verbs_nouns)
280
  other_words_text = " ".join(other_words)
281
+ pos_string_text = " ".join(pos_string)
282
 
283
+ return pos_string_text, verbs_nouns_text, other_words_text
284
 
285
  def split_srt_file(text): #file_path):
286
  # Open the SRT file and read its contents
 
373
  # Return the ZIP archive for download
374
  return 'segmented_files.zip'
375
 
376
+ def text_to_dropdown(text, id=None): #TextCompFormat
377
+ lines = text.strip().split("\n")
378
+ html = "<select"
379
+ if id:
380
+ html += f' id="{id}"'
381
+ html += "> \n"
382
+ for line in lines:
383
+ html += f" <option>{line}</option>\n"
384
+ html += "</select> \n"
385
+ return html
386
+
387
+ def text_to_links(text): #TextCompFormat
388
+ lines = text.strip().split("\n")
389
+ html = ""
390
+ for line in lines:
391
+ if line.startswith("http"):
392
+ html += f'<a href="{line}">{line}</a><br> \n'
393
+ else:
394
+ html += line + "Not a link <br> \n"
395
+ return html
396
+
397
+ HTMLCompMode = gr.Dropdown(choices=["Dropdown", "Links"], value="Dropdown")
398
+
399
+ def TextCompFormat(text, HTMLCompMode):
400
+ FinalOutput = ""
401
+ if HTMLCompMode == "Dropdown":
402
+ FinalOutput = text_to_dropdown(text)
403
+ if HTMLCompMode == "Links":
404
+ FinalOutput = text_to_links(text)
405
+ return FinalOutput
406
+
407
+ def create_collapsiblebutton(button_id, button_caption, div_content):
408
+ button_html = f'<button id="{button_id}" class="accordionbtn">{button_caption}</button>'
409
+ div_html = f'<div id="{button_id}Div" class="panel">\n{div_content}\n </div>'
410
+ return button_html + "\n " + div_html
411
+
412
+ #---------------
413
+
414
+ def removeTonalMarks(string):
415
+ tonalMarks = "āēīōūǖáéíóúǘǎěǐǒǔǚàèìòùǜ"
416
+ nonTonalMarks = "aeiouuaeiouuaeiouuaeiou"
417
+ noTonalMarksStr = ""
418
+ for char in string:
419
+ index = tonalMarks.find(char)
420
+ if index != -1:
421
+ noTonalMarksStr += nonTonalMarks[index]
422
+ else:
423
+ noTonalMarksStr += char
424
+ return noTonalMarksStr
425
+
426
+
427
+ def add_text_to_image(input_image, text, output_image_path="output.png", border_size=2):
428
+ imagearr = np.asarray(input_image) #Image.open(input_image_path)
429
+ width, height = imagearr.shape[:2] #width, height = image.size
430
+ img = Image.fromarray(imagearr)
431
+ draw = ImageDraw.Draw(img)
432
+ font = ImageFont.truetype("ShortBaby.ttf", 36) #ShortBaby-Mg2w.ttf
433
+ text_width, text_height = draw.textbbox((0, 0), text, font=font)[2:] #draw.textsize(text, font)
434
+ # calculate the x, y coordinates of the text box
435
+ x = (width - text_width) / 2
436
+ y = (height - text_height) / 2
437
+ # put the text on the image with a border
438
+ for dx, dy in [(0, 0), (border_size, border_size), (-border_size, -border_size), (border_size, -border_size), (-border_size, border_size)]:
439
+ draw.text((x + dx, y + dy), text, font=font, fill=(255, 255, 255))
440
+ draw.text((x, y), text, font=font, fill=(0, 0, 0))
441
+ img.save(output_image_path, "PNG")
442
+ return "output.png"
443
 
444
  # Define the Gradio interface inputs and outputs for video split
445
  spvvideo_file_input = gr.File(label='Video File')
 
451
  groupoutput_text = gr.outputs.Textbox(label="Grouped words")
452
 
453
  with gr.Blocks() as lliface:
454
+ gr.HTML("<p>Audio = best long form attention mechanism AS it is ANTICIPATION (Awareness of something before it happens like knowing song Lyrics) FOCUSED - Attention (Focused Repitition) + Exposure (Random Repitition) </p>")
455
  with gr.Tab("Welcome"):
456
  gr.HTML("""<h1> Spaces Test - Still Undercontruction </h1> <p> You only learn when you convert things you dont know to known --> Normally Repetition is the only reliable method for everybody </p>
457
  <p> Knowledge is a Language but productive knowledge is find replace as well </p> <p>LingQ is good option for per word state management</p> <p> Arrows app json creator for easy knowledge graphing and spacy POS graph? </p>
 
461
  <p> ChatGPT Turns Learning into a read only what you dont know ask only what you dont know feedback loop --> All you have to do is keep track of what prompts you have asked in the past</p>
462
  <p> Spell multiple words simultaneously for simultaneous access </p>
463
  """)
464
+ with gr.Tab("Unique word ID - use in Infranodus"):
465
  gr.Interface(fn=unique_word_count, inputs="text", outputs="text", title="Wordcounter")
466
+ gr.Interface(fn=SepHypandSynExpansion, inputs="text", outputs=["text", "text"], title="Word suggestions - Analyse the unique words in infranodus")
467
  gr.Interface(fn=WikiSearch, inputs="text", outputs="text", title="Unique word suggestions(wiki articles)")
468
  with gr.Tab("Automating related information linking"):
469
  gr.HTML("Questions - Tacking and suggesting questions to ask = new education")
 
 
 
 
 
 
 
 
470
  with gr.Tab("Timing Practice - Repitition"):
471
  gr.HTML("<p>Run from it, Dread it, Repitition is inevitable - Thanos</p> <p>Next Milestone is Turning this interface handsfree</p>")
472
  with gr.Tab("Gradio Version"):
473
+ gr.Interface(fn=group_words, inputs=groupinput_text, outputs=groupoutput_text, title="Word Grouping and Rotation", description="Group a list of words into sets of 10 and rotate them every 60 seconds.") #.queue()
474
  with gr.Tab("HTML Version"):
475
  gr.HTML("""<iframe height="1200" style="width: 100%;" scrolling="no" title="Memorisation Aid" src="https://codepen.io/kwabs22/embed/preview/GRXKQgj?default-tab=result&editable=true" frameborder="no" loading="lazy" allowtransparency="true" allowfullscreen="true">
476
  See the Pen <a href="https://codepen.io/kwabs22/pen/GRXKQgj">
477
  Memorisation Aid</a> by kwabs22 (<a href="https://codepen.io/kwabs22">@kwabs22</a>)
478
  on <a href="https://codepen.io">CodePen</a>.
479
  </iframe>""")
480
+ with gr.Tab("Beginner - Listening and Reading"):
481
+ with gr.Tab("Listening - Songs - Chorus"):
482
+ gr.HTML("Anticipation of the item to remember is how you learn lyrics that is why songs are easy as if you heard it 10 times already your capacity to anticipate the words is great <br><br> This is where TTS helps as you are ignoring all words except the words just before the actual <hr>")
483
+ gr.HTML("<p>Fastest way to learn words = is to have your own sound reference --> probably why babies learn fast as they make random noise</p> <p>If you know the flow of the song you can remember the spelling easier</p><p>Essentially if the sounds are repeated or long notes they are easy to remember</p>")
484
+ gr.HTML("""<hr><a href="https://translate.google.com/?hl=en&tab=TT"> --Google Translate-- </a><br>""")
485
+ gr.Interface(fn=AutoChorusInvestigator, inputs="text", outputs="text", description="Paste Full Lyrics to try find only chorus lines")
486
+ gr.Interface(fn=AutoChorusPerWordScheduler, inputs="text", outputs="text", description="Create order of repitition for tts practice")
487
+ gr.Interface(fn=TTSforListeningPractice, inputs="text", outputs="file", title="Placeholder - paste chorus here and use TTS or make notes to save here")
488
+ with gr.Tab("Reading - Caption images (SD/Dalle-E)"):
489
+ gr.HTML("Predictable to identify the parts of picture being described --> The description moves in one direction from one side of the image to the other side is easiest <hr>")
490
+ gr.HTML("Image = instant comprehension like Stable Diffusion --> Audiovisual experience is the most optimal reading experience <br> Manga with summary descriptions for the chapters = Most aligned visual to audio experience")
491
+ gr.HTML("""<a href="https://huggingface.co/spaces/pharma/CLIP-Interrogator"> --Huggingface CLIP-Interrogator Space-- </a><br> """)
492
+ gr.Interface(fn=removeTonalMarks, inputs="text", outputs="text", description="For text with characters use this function to remove any conflicting characters (if error below)")
493
+ gr.Interface(fn=add_text_to_image , inputs=["image", "text"], outputs="image", description="Create Annotated images (Can create using stable diffusion and use the prompt)")
494
+ #with gr.Tab("Transcribe - RASMUS Whisper"):
495
+ #gr.Interface.load("spaces/RASMUS/Whisper-youtube-crosslingual-subtitles", title="Subtitles")
496
  with gr.Tab("Advanced - LingQ Addons ideas"):
497
+ gr.HTML("LingQ Companion Idea - i.e. Full Translation Read along, and eventually Videoplayer watch along like RAMUS whisper space <br><br>Extra functions needed - Persitent Sentence translation, UNWFWO, POS tagging and Word Count per user of words in their account. Macaronic Text is also another way to practice only the important information")
498
+ gr.HTML("""<hr> <p>For Transcripts to any video on youtube use the link below ⬇️</p> <a href="https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles">https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles</a>""")
499
+ #gr.HTML("<p>If Space not loaded its because of offline devopment errors please message for edit</p> <hr>")
500
  with gr.Tab("Merged Subtitles"):
501
+ gr.HTML("Step 1 - Word for Word Translation Creation in both Directions (Paste Google Translation here)")
502
+ gr.Interface(fn=split_srt_file, inputs="text", outputs="text", description="SRT Contents to W4W Split SRT for Google Translate")
503
+ gr.HTML("Step 2 - Pronounciation (Roman) to Subtitle Format --> GTranslate returns unformatted string")
504
+ gr.Interface(fn=splittext, inputs="text", outputs="text", description="Text for w4w creation in G Translate")
505
+ gr.HTML("Step 3 - Merge into one file")
506
  with gr.Row():
507
  RomanFile = gr.File(label="Paste Roman")
508
  W4WFile = gr.File(label="Paste Word 4 Word")
 
517
  with gr.Tab("Split video to segments"):
518
  gr.Interface(VideotoSegment, inputs=[spvvideo_file_input, spvsubtitle_file_input], outputs=spvdownload_output)
519
  with gr.Tab("Sentence to Format"):
520
+ gr.Interface(fn=split_verbs_nouns , inputs="text", outputs=["text", "text", "text"], title="Comprehension reading and Sentence Format Creator")
521
  gr.Text("Text to Closed Class + Adjectives + Punctuation or Noun Verb + Punctuation ")
522
+ with gr.Tab("Spelling and Chunks"):
523
+ gr.Text("Merged Spelling Practice Placeholder - Spell multiple words simultaneously for simultaneous access")
524
+ gr.HTML("<p> Spelling is the end goal, you already know many letter orders called words so you need leverage them to remember random sequences")
525
+ with gr.Tab("Spelling Simplification - Use a dual language list"):
526
+ gr.Interface(fn=create_dictionary, inputs="text", outputs="text", title="Sort Text by first two letters")
527
+ with gr.Tab("Chunks"):
528
+ gr.Interface(fn=FrontRevSentChunk, inputs=[ChunkModeDrop, "checkbox", "text", langdest], outputs="text")
529
+ gr.Interface(fn=keep_nouns_verbs, inputs=["text"], outputs="text", title="Noun and Verbs only (Plus punctuation)")
530
+ with gr.Tab("Knowledge Ideas - Notetaking"):
531
+ gr.HTML("""<p>Good knowledge = ability to answer questions --> find Questions you cant answer and look for hidden answer within them </p>
532
+ <p>My One Word Theory = We only use more words than needed when we have to or are bored --> Headings exist because title is not sufficient, subheadings exist because headings are not sufficient, Book Text exists because subheadings are not sufficient</p>
533
+ <p>Big Picture = Expand the Heading and the subheadings and compare them to each other</p>
534
+ <p>Application of Knowledge = App Version of the text (eg. Jupyter Notebooks) is what you create and learn first</p>
535
+ """)
536
+ gr.Interface(fn=TextCompFormat, inputs=["textarea", HTMLCompMode], outputs="text", description="Convert Text to HTML Dropdown or Links which you paste in any html file")
537
+ gr.Interface(fn=create_collapsiblebutton, inputs=["textbox", "textbox", "textarea"], outputs="textarea", description="Button and Div HTML Generator, Generate the HTML for a button and the corresponding div element.")
538
+ with gr.Tab("Automated Reading Assitant"):
539
+ gr.HTML("Tree and Branches approach to learning = familiarity with keywords/headings/summaries before reading the whole text <hr> Productivity/Work revolves around repitition which can be found looking for plurals and grouping terms eg. Headings and Hyper/Hyponyms Analysis")
540
 
541
+ lliface.queue().launch()