Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -26,35 +26,35 @@ import datetime
|
|
26 |
import cv2
|
27 |
import math
|
28 |
|
|
|
29 |
#print(gr.__version__)
|
30 |
#import subprocess
|
31 |
-
#subprocess.run(["pip", "install", "gradio==3.47.1", "--force-reinstall"])
|
32 |
#subprocess.run(["pip", "install", "--upgrade", "gradio==3.47.1"]) #For huggingface as they sometimes install specific versions on container build
|
33 |
|
34 |
#Uncomment these for Huggingface
|
35 |
-
nltk.download('maxent_ne_chunker') #Chunker
|
36 |
-
nltk.download('stopwords') #Stop Words List (Mainly Roman Languages)
|
37 |
-
nltk.download('words') #200 000+ Alphabetical order list
|
38 |
-
nltk.download('punkt') #Tokenizer
|
39 |
-
nltk.download('verbnet') #For Description of Verbs
|
40 |
-
nltk.download('omw')
|
41 |
-
nltk.download('omw-1.4') #Multilingual Wordnet
|
42 |
-
nltk.download('wordnet') #For Definitions, Antonyms and Synonyms
|
43 |
-
nltk.download('shakespeare')
|
44 |
-
nltk.download('dolch') #Sight words
|
45 |
-
nltk.download('names') #People Names NER
|
46 |
-
nltk.download('gazetteers') #Location NER
|
47 |
-
nltk.download('opinion_lexicon') #Sentiment words
|
48 |
-
nltk.download('averaged_perceptron_tagger') #Parts of Speech Tagging
|
49 |
-
nltk.download('udhr') # Declaration of Human rights in many languages
|
50 |
-
|
51 |
-
|
52 |
-
spacy.cli.download("en_core_web_sm")
|
53 |
-
spacy.cli.download('ko_core_news_sm')
|
54 |
-
spacy.cli.download('ja_core_news_sm')
|
55 |
-
spacy.cli.download('zh_core_web_sm')
|
56 |
-
spacy.cli.download("es_core_news_sm")
|
57 |
-
spacy.cli.download("de_core_news_sm")
|
58 |
|
59 |
nlp_en = spacy.load("en_core_web_sm")
|
60 |
nlp_de = spacy.load("de_core_news_sm")
|
@@ -1252,7 +1252,7 @@ def loadforcopybuttonllmpromptideas():
|
|
1252 |
|
1253 |
def display_website(link):
|
1254 |
html = f"<iframe src='{link}' width='100%' height='1000px'></iframe>"
|
1255 |
-
|
1256 |
return html
|
1257 |
|
1258 |
def RepititionPracticeTimeCalculator(text, reps_per_item, seconds_per_item):
|
@@ -1309,6 +1309,10 @@ def segment_video_with_opencv(file_path, segment_duration=60):
|
|
1309 |
|
1310 |
return generated_files
|
1311 |
|
|
|
|
|
|
|
|
|
1312 |
# For testing purposes
|
1313 |
# file_paths = segment_video_with_opencv("path_to_your_video.mp4")
|
1314 |
# print(file_paths)
|
@@ -1342,7 +1346,7 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
1342 |
with gr.Accordion("Some prompt ideas", open=False):
|
1343 |
with gr.Accordion("Prompts in text (Manual copy paste)", open=False):
|
1344 |
gr.HTML(LLPromptIdeas)
|
1345 |
-
gr.Interface(loadforcopybuttonllmpromptideas, inputs=None, outputs=["html", "
|
1346 |
chatspace = gr.HTML("Chat Space Chosen will load here")
|
1347 |
chatspacebtn.click(display_website, inputs=linktochat, outputs=chatspace)
|
1348 |
gr.HTML("<div style='display: flex; justify-content: center; align-items: center; height: 100%;'><a href='https://github.com/KillianLucas/open-interpreter'> -- Open Interpreter = Function Calling Killer = (Almost) All functions are now just prompt engineering away from existing = Modding any app in the world especially games = custom learning environments -- </a> | </div>")
|
@@ -1374,7 +1378,9 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
1374 |
gr.Interface(fn=RepititionPracticeTimeCalculator, inputs=["text", "number", "number"], outputs="text")
|
1375 |
with gr.Column(scale=3):
|
1376 |
with gr.Tab("Workflows"):
|
1377 |
-
gr.
|
|
|
|
|
1378 |
with gr.Row():
|
1379 |
PracticeExposure = gr.HTML(randomExposuremessage)
|
1380 |
PracticeExposure2 = gr.HTML(randomExposuremessage2)
|
@@ -1397,8 +1403,13 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
1397 |
with gr.Column(scale=3):
|
1398 |
imageplaceholderoutput = gr.HTML("Preview will load here")
|
1399 |
with gr.Column(scale=2):
|
1400 |
-
imageplaceholdertextoutput = gr.
|
1401 |
imageplaceholderbtn.click(fn=imagebasedreading, inputs=[imageplaceholderinput], outputs=[imageplaceholderdownload, imageplaceholderoutput, imageplaceholdertextoutput])
|
|
|
|
|
|
|
|
|
|
|
1402 |
with gr.Tab("Beginner - Listen + Read"):
|
1403 |
gr.Label("Closed Eye Recital per new word | 1 new word a minute while recycling the words from the previous minutes")
|
1404 |
with gr.Row():
|
@@ -1435,7 +1446,6 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
1435 |
gr.Textbox("Placeholder for a function that creates a set list and can takes a list for known words and auto find replaces the stuff you know out of the content")
|
1436 |
gr.Interface(fn=GuidedReading, inputs=["text", guidedreadingseperator], outputs="text", description="Manual POS Tag and Transliteration", examples=textspreprocess)
|
1437 |
gr.HTML("Place holder for a translate to english interface so that highlighting can still work as only english supported for now - <a href='https://translate.google.com/'> -- Google Translate -- </a>")
|
1438 |
-
gr.Interface(fn=UnknownTrackTexttoApp, inputs="text", outputs=["file", "html", "text"], description="HTML mini App - UNNWFWO (To track verbs you dont know for listening practice). Use the text from here to create lists you use for the TTS section")
|
1439 |
with gr.Tab("Unique word ID - use in Infranodus"):
|
1440 |
with gr.Accordion(label="Infranodus", open=False):
|
1441 |
gr.HTML(" <a href='https://infranodus.com/'> -- Infranodus - Word Level Knowledge graphs -- </a> | <br> Use the below interfaces to find the items that dont have entries --> These will represent new concepts or people which need to be understood individually to fully understand the text --> Infranodus search will help find related and unrelated investigation paths <br><br> TODO Figure Output Zoom / Image Dimensions")
|
@@ -1464,7 +1474,7 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
1464 |
gr.HTML("Acronym cant be read with previous attentive reading - accurate measure of known vs unknown")
|
1465 |
with gr.Row():
|
1466 |
with gr.Accordion('Acronym Map/Skeleton Creator'):
|
1467 |
-
gr.
|
1468 |
with gr.Accordion('Test with LLM'):
|
1469 |
gr.Label('Letters are always easier to recall than whole words. GPT 4 and above best suited for this prompt but can test anywhere')
|
1470 |
gr.HTML('Please help me study by making a acronym map for the maths ontology (Ask if theres questions)')
|
@@ -1611,7 +1621,9 @@ with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', seconda
|
|
1611 |
gr.Interface(fn=onlyplurals, inputs=["text"], outputs=["text"], title="Only plurals = optimal concepts to learn first as work = repitition")
|
1612 |
gr.Label("Placeholder for old code for concordance and word counting in other test space")
|
1613 |
with gr.Tab("Video Segmentation with OpenCV Test"):
|
1614 |
-
gr.Interface(fn=segment_video_with_opencv, inputs=VideoSplitTestInput, outputs="
|
|
|
|
|
1615 |
|
1616 |
|
1617 |
lliface.queue().launch() #(inbrowser="true")
|
|
|
26 |
import cv2
|
27 |
import math
|
28 |
|
29 |
+
#When I forgot about the readme file ChatGPT suggested these - Leaving to remember the ReadmeF.md must be updated as well
|
30 |
#print(gr.__version__)
|
31 |
#import subprocess
|
|
|
32 |
#subprocess.run(["pip", "install", "--upgrade", "gradio==3.47.1"]) #For huggingface as they sometimes install specific versions on container build
|
33 |
|
34 |
#Uncomment these for Huggingface
|
35 |
+
#nltk.download('maxent_ne_chunker') #Chunker
|
36 |
+
#nltk.download('stopwords') #Stop Words List (Mainly Roman Languages)
|
37 |
+
#nltk.download('words') #200 000+ Alphabetical order list
|
38 |
+
#nltk.download('punkt') #Tokenizer
|
39 |
+
#nltk.download('verbnet') #For Description of Verbs
|
40 |
+
#nltk.download('omw')
|
41 |
+
#nltk.download('omw-1.4') #Multilingual Wordnet
|
42 |
+
#nltk.download('wordnet') #For Definitions, Antonyms and Synonyms
|
43 |
+
#nltk.download('shakespeare')
|
44 |
+
#nltk.download('dolch') #Sight words
|
45 |
+
#nltk.download('names') #People Names NER
|
46 |
+
#nltk.download('gazetteers') #Location NER
|
47 |
+
#nltk.download('opinion_lexicon') #Sentiment words
|
48 |
+
#nltk.download('averaged_perceptron_tagger') #Parts of Speech Tagging
|
49 |
+
#nltk.download('udhr') # Declaration of Human rights in many languages
|
50 |
+
|
51 |
+
|
52 |
+
#spacy.cli.download("en_core_web_sm")
|
53 |
+
#spacy.cli.download('ko_core_news_sm')
|
54 |
+
#spacy.cli.download('ja_core_news_sm')
|
55 |
+
#spacy.cli.download('zh_core_web_sm')
|
56 |
+
#spacy.cli.download("es_core_news_sm")
|
57 |
+
#spacy.cli.download("de_core_news_sm")
|
58 |
|
59 |
nlp_en = spacy.load("en_core_web_sm")
|
60 |
nlp_de = spacy.load("de_core_news_sm")
|
|
|
1252 |
|
1253 |
def display_website(link):
|
1254 |
html = f"<iframe src='{link}' width='100%' height='1000px'></iframe>"
|
1255 |
+
gr.Info("If 404 then the space/page has probably been disabled - normally due to a better alternative")
|
1256 |
return html
|
1257 |
|
1258 |
def RepititionPracticeTimeCalculator(text, reps_per_item, seconds_per_item):
|
|
|
1309 |
|
1310 |
return generated_files
|
1311 |
|
1312 |
+
def TestSplitandUpdate(Text):
|
1313 |
+
|
1314 |
+
return f"What does this do again - {Text}", gr.Button("Life is weird and this is cool if it works")
|
1315 |
+
|
1316 |
# For testing purposes
|
1317 |
# file_paths = segment_video_with_opencv("path_to_your_video.mp4")
|
1318 |
# print(file_paths)
|
|
|
1346 |
with gr.Accordion("Some prompt ideas", open=False):
|
1347 |
with gr.Accordion("Prompts in text (Manual copy paste)", open=False):
|
1348 |
gr.HTML(LLPromptIdeas)
|
1349 |
+
gr.Interface(loadforcopybuttonllmpromptideas, inputs=None, outputs=["html", "code", "code", "code", "code", "code"])
|
1350 |
chatspace = gr.HTML("Chat Space Chosen will load here")
|
1351 |
chatspacebtn.click(display_website, inputs=linktochat, outputs=chatspace)
|
1352 |
gr.HTML("<div style='display: flex; justify-content: center; align-items: center; height: 100%;'><a href='https://github.com/KillianLucas/open-interpreter'> -- Open Interpreter = Function Calling Killer = (Almost) All functions are now just prompt engineering away from existing = Modding any app in the world especially games = custom learning environments -- </a> | </div>")
|
|
|
1378 |
gr.Interface(fn=RepititionPracticeTimeCalculator, inputs=["text", "number", "number"], outputs="text")
|
1379 |
with gr.Column(scale=3):
|
1380 |
with gr.Tab("Workflows"):
|
1381 |
+
with gr.Row():
|
1382 |
+
gr.HTML("<span style:{'fontsize: 20'}>Start at Unkown Tracker if unseure<span> <br> UNNWFWO = Unknown Native Word Foreign Word Order i.e. during active listening practice you only need the words you dont know <br><br> General Ideas in this space - Speed of Learning = Avoid Things you know like the plague -- How to track what you know -- Counter is easiest and How you feel is the hardest (The more you know, the more confusion on what you dont know as you probably werent keeping track) <br><br> Visulisation of long text - Bottom of this page <br> Wordlist - 1 new word at a time per minute in the space to the left <br> Youtube Video Watching - Subtitles Tab <br> Reading - Unknown Tracker Tabs <br> Longer Text Memorising - Acronym Map Creation Tab and Transition Tab <br> Brainstorming - Reading Assistant <br> Random Exposure <br> ")
|
1383 |
+
gr.Interface(fn=TestSplitandUpdate, inputs="text", outputs=["text", "button"])
|
1384 |
with gr.Row():
|
1385 |
PracticeExposure = gr.HTML(randomExposuremessage)
|
1386 |
PracticeExposure2 = gr.HTML(randomExposuremessage2)
|
|
|
1403 |
with gr.Column(scale=3):
|
1404 |
imageplaceholderoutput = gr.HTML("Preview will load here")
|
1405 |
with gr.Column(scale=2):
|
1406 |
+
imageplaceholdertextoutput = gr.Code("The code for the HTML created will come here")
|
1407 |
imageplaceholderbtn.click(fn=imagebasedreading, inputs=[imageplaceholderinput], outputs=[imageplaceholderdownload, imageplaceholderoutput, imageplaceholdertextoutput])
|
1408 |
+
with gr.Tab("Progress Tracking"):
|
1409 |
+
gr.Label("Missing is database integration for the counter and non-english - ALSO TODO - Parralell interface for the html and acronym creator")
|
1410 |
+
gr.Interface(fn=UnknownTrackTexttoApp, inputs="text", outputs=["file", "html", "text"], description="HTML mini App - UNNWFWO (To track verbs you dont know for listening practice). Use the text from here to create lists you use for the TTS section")
|
1411 |
+
gr.Interface(create_acronym_map, inputs='text', outputs=['text', 'text'])
|
1412 |
+
gr.HTML("On the Acronyms you need to underline the verbs")
|
1413 |
with gr.Tab("Beginner - Listen + Read"):
|
1414 |
gr.Label("Closed Eye Recital per new word | 1 new word a minute while recycling the words from the previous minutes")
|
1415 |
with gr.Row():
|
|
|
1446 |
gr.Textbox("Placeholder for a function that creates a set list and can takes a list for known words and auto find replaces the stuff you know out of the content")
|
1447 |
gr.Interface(fn=GuidedReading, inputs=["text", guidedreadingseperator], outputs="text", description="Manual POS Tag and Transliteration", examples=textspreprocess)
|
1448 |
gr.HTML("Place holder for a translate to english interface so that highlighting can still work as only english supported for now - <a href='https://translate.google.com/'> -- Google Translate -- </a>")
|
|
|
1449 |
with gr.Tab("Unique word ID - use in Infranodus"):
|
1450 |
with gr.Accordion(label="Infranodus", open=False):
|
1451 |
gr.HTML(" <a href='https://infranodus.com/'> -- Infranodus - Word Level Knowledge graphs -- </a> | <br> Use the below interfaces to find the items that dont have entries --> These will represent new concepts or people which need to be understood individually to fully understand the text --> Infranodus search will help find related and unrelated investigation paths <br><br> TODO Figure Output Zoom / Image Dimensions")
|
|
|
1474 |
gr.HTML("Acronym cant be read with previous attentive reading - accurate measure of known vs unknown")
|
1475 |
with gr.Row():
|
1476 |
with gr.Accordion('Acronym Map/Skeleton Creator'):
|
1477 |
+
gr.HTML("Moved to Progress for now")
|
1478 |
with gr.Accordion('Test with LLM'):
|
1479 |
gr.Label('Letters are always easier to recall than whole words. GPT 4 and above best suited for this prompt but can test anywhere')
|
1480 |
gr.HTML('Please help me study by making a acronym map for the maths ontology (Ask if theres questions)')
|
|
|
1621 |
gr.Interface(fn=onlyplurals, inputs=["text"], outputs=["text"], title="Only plurals = optimal concepts to learn first as work = repitition")
|
1622 |
gr.Label("Placeholder for old code for concordance and word counting in other test space")
|
1623 |
with gr.Tab("Video Segmentation with OpenCV Test"):
|
1624 |
+
gr.Interface(fn=segment_video_with_opencv, inputs=VideoSplitTestInput, outputs="fileexplorer")
|
1625 |
+
with gr.Tab("State Management and Education"):
|
1626 |
+
gr.HTML("Education = Learning things you didnt know yesterday and not forgetting more than you learn <br><br> What you didnt know forms = <br> Glossary <br> Lists <br> Formulas <br> graphs <br> Procedures <br> <br> for each you will need a seperate way to track the progress but amount of times + recency = approximate state ")
|
1627 |
|
1628 |
|
1629 |
lliface.queue().launch() #(inbrowser="true")
|