ricardo-lsantos commited on
Commit
91e858d
β€’
1 Parent(s): e65df7b

Added AI and Pages for first few examples

Browse files
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ __pycache__
2
+ cache
AI/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Author: Ricardo Lisboa Santos
2
+ # Creation date: 2024-01-10
AI/question_answering.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Ricardo Lisboa Santos
2
+ # Creation date: 2024-01-10
3
+
4
+ import torch
5
+ import torch_directml
6
+ from transformers import pipeline
7
+
8
+ def getDevice(DEVICE):
9
+ device = None
10
+ if DEVICE == "cpu":
11
+ device = torch.device("cpu")
12
+ dtype = torch.float32
13
+ elif DEVICE == "cuda":
14
+ device = torch.device("cuda")
15
+ dtype = torch.float16
16
+ elif DEVICE == "directml":
17
+ device = torch_directml.device()
18
+ dtype = torch.float16
19
+ return device
20
+
21
+ def loadGenerator(device):
22
+ generator = pipeline("question-answering") # .to(device)
23
+ return generator
24
+
25
+ def query(generator, question, context):
26
+ output = generator(
27
+ question=question,
28
+ context=context,
29
+ )
30
+ return output
31
+
32
+ def clearCache(DEVICE, generator):
33
+ generator.tokenizer.save_pretrained("cache")
34
+ generator.model.save_pretrained("cache")
35
+ del generator
36
+ if DEVICE == "directml":
37
+ torch_directml.empty_cache()
AI/sentiment_analysis.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Ricardo Lisboa Santos
2
+ # Creation date: 2024-01-10
3
+
4
+ import torch
5
+ import torch_directml
6
+ from transformers import pipeline
7
+
8
+ def getDevice(DEVICE):
9
+ device = None
10
+ if DEVICE == "cpu":
11
+ device = torch.device("cpu")
12
+ dtype = torch.float32
13
+ elif DEVICE == "cuda":
14
+ device = torch.device("cuda")
15
+ dtype = torch.float16
16
+ elif DEVICE == "directml":
17
+ device = torch_directml.device()
18
+ dtype = torch.float16
19
+ return device
20
+
21
+ def loadClassifier(device):
22
+ classifier = pipeline("sentiment-analysis") # .to(device)
23
+ return classifier
24
+
25
+ def classify(classifier, text):
26
+ output = classifier(text)
27
+ return output
28
+
29
+ def clearCache(DEVICE, classifier):
30
+ classifier.tokenizer.save_pretrained("cache")
31
+ classifier.model.save_pretrained("cache")
32
+ del classifier
33
+ if DEVICE == "directml":
34
+ torch_directml.empty_cache()
AI/summarization.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Ricardo Lisboa Santos
2
+ # Creation date: 2024-01-10
3
+
4
+ import torch
5
+ import torch_directml
6
+ from transformers import pipeline
7
+
8
+ def getDevice(DEVICE):
9
+ device = None
10
+ if DEVICE == "cpu":
11
+ device = torch.device("cpu")
12
+ dtype = torch.float32
13
+ elif DEVICE == "cuda":
14
+ device = torch.device("cuda")
15
+ dtype = torch.float16
16
+ elif DEVICE == "directml":
17
+ device = torch_directml.device()
18
+ dtype = torch.float16
19
+ return device
20
+
21
+ def loadSummarizer(device):
22
+ summarizer = pipeline("summarization") # .to(device)
23
+ return summarizer
24
+
25
+ def summarize(summarizer, text):
26
+ output = summarizer(text)
27
+ return output
28
+
29
+ def clearCache(DEVICE, summarizer):
30
+ summarizer.tokenizer.save_pretrained("cache")
31
+ summarizer.model.save_pretrained("cache")
32
+ del summarizer
33
+ if DEVICE == "directml":
34
+ torch_directml.empty_cache()
AI/text_generation.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Ricardo Lisboa Santos
2
+ # Creation date: 2024-01-10
3
+
4
+ import torch
5
+ import torch_directml
6
+ from transformers import pipeline
7
+
8
+ def getDevice(DEVICE):
9
+ device = None
10
+ if DEVICE == "cpu":
11
+ device = torch.device("cpu")
12
+ dtype = torch.float32
13
+ elif DEVICE == "cuda":
14
+ device = torch.device("cuda")
15
+ dtype = torch.float16
16
+ elif DEVICE == "directml":
17
+ device = torch_directml.device()
18
+ dtype = torch.float16
19
+ return device
20
+
21
+ def loadGenerator(device):
22
+ generator = pipeline("text-generation") # .to(device)
23
+ return generator
24
+
25
+ def generate(generator, text):
26
+ output = generator(text)
27
+ return output
28
+
29
+ def clearCache(DEVICE, generator):
30
+ generator.tokenizer.save_pretrained("cache")
31
+ generator.model.save_pretrained("cache")
32
+ del generator
33
+ if DEVICE == "directml":
34
+ torch_directml.empty_cache()
AI/zero_shot_classification.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Ricardo Lisboa Santos
2
+ # Creation date: 2024-01-10
3
+
4
+ import torch
5
+ import torch_directml
6
+ from transformers import pipeline
7
+
8
+ def getDevice(DEVICE):
9
+ device = None
10
+ if DEVICE == "cpu":
11
+ device = torch.device("cpu")
12
+ dtype = torch.float32
13
+ elif DEVICE == "cuda":
14
+ device = torch.device("cuda")
15
+ dtype = torch.float16
16
+ elif DEVICE == "directml":
17
+ device = torch_directml.device()
18
+ dtype = torch.float16
19
+ return device
20
+
21
+ def loadGenerator(device):
22
+ generator = pipeline("zero-shot-classification") # .to(device)
23
+ return generator
24
+
25
+ def classify(generator, text, labels=["education", "politics", "business"]):
26
+ output = generator(text, candidate_labels=labels)
27
+ return output
28
+
29
+ def clearCache(DEVICE, generator):
30
+ generator.tokenizer.save_pretrained("cache")
31
+ generator.model.save_pretrained("cache")
32
+ del generator
33
+ if DEVICE == "directml":
34
+ torch_directml.empty_cache()
Home.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Ricardo Lisboa Santos
2
+ # Creation date: 2024-01-10
3
+
4
+ import streamlit as st
5
+ import pandas as pd
6
+ import numpy as np
7
+
8
+ def main():
9
+ st.title('Home Page')
10
+ dataframe = pd.DataFrame(
11
+ np.random.randn(10, 20),
12
+ columns=('col %d' % i for i in range(20))
13
+ )
14
+ st.dataframe(dataframe.style.highlight_max(axis=0))
15
+
16
+
17
+ if __name__ == '__main__':
18
+ main()
README.md CHANGED
@@ -5,8 +5,11 @@ colorFrom: blue
5
  colorTo: purple
6
  sdk: streamlit
7
  sdk_version: 1.29.0
8
- app_file: app.py
9
  pinned: false
10
  ---
 
 
 
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
5
  colorTo: purple
6
  sdk: streamlit
7
  sdk_version: 1.29.0
8
+ app_file: Home.py
9
  pinned: false
10
  ---
11
+ BasicTransformersExample
12
+
13
+ Author: Ricardo Lisboa Santos
14
 
15
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
pages/Question_Answering.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Ricardo Lisboa Santos
2
+ # Creation date: 2024-01-10
3
+
4
+ import streamlit as st
5
+ import AI.question_answering as ai
6
+
7
+ def run():
8
+
9
+ st.set_page_config(page_title="Question Answering", page_icon="πŸ“ˆ")
10
+
11
+ st.markdown("# Question Answering")
12
+ st.write('Give some context. Ask some questions.')
13
+ context = st.text_area('Enter your context here.')
14
+ question = st.text_input('Enter your question here.')
15
+ if st.button('Click me to run'):
16
+ progress_bar = st.sidebar.progress(0)
17
+ status_text = st.sidebar.empty()
18
+ with st.spinner(text='Loading Model'):
19
+ status_text.text("Getting Device")
20
+ device = ai.getDevice("cpu")
21
+ progress_bar.progress(30)
22
+ status_text.text("Loading Model")
23
+ model = ai.loadGenerator(device)
24
+ progress_bar.progress(60)
25
+ status_text.text("Generating Answer")
26
+ output = ai.query(model, question, context)
27
+ progress_bar.progress(90)
28
+ status_text.text("Clearing Cache")
29
+ ai.clearCache("cpu", model)
30
+ progress_bar.progress(100)
31
+ status_text.text("Done")
32
+ st.code(output.get('answer'))
33
+ # st.success('Done')
34
+
35
+ if __name__ == '__main__':
36
+ run()
pages/Sentiment_Analysis.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Ricardo Lisboa Santos
2
+ # Creation date: 2024-01-10
3
+
4
+ import streamlit as st
5
+ import time
6
+ import AI.sentiment_analysis as ai
7
+
8
+
9
+
10
+ def run():
11
+
12
+ st.set_page_config(page_title="Sentiment Analysis", page_icon="πŸ“ˆ")
13
+ # setSidebar()
14
+ # st.write('Sentiment Analysis')
15
+ st.markdown("# Sentiment Analysis")
16
+ input = st.text_input('Enter your prompt here.')
17
+ if st.button('Click me to run'):
18
+ progress_bar = st.sidebar.progress(0)
19
+ status_text = st.sidebar.empty()
20
+ loading_text='Loading Model'
21
+ with st.spinner(text=loading_text):
22
+ status_text.text("Getting Device")
23
+ device = ai.getDevice("cpu")
24
+ progress_bar.progress(30)
25
+ status_text.text("Loading Model")
26
+ model = ai.loadClassifier(device)
27
+ progress_bar.progress(60)
28
+ status_text.text("Classifying")
29
+ output = ai.classify(model, input)
30
+ progress_bar.progress(90)
31
+ ai.clearCache("cpu", model)
32
+ progress_bar.progress(100)
33
+ status_text.text("Done")
34
+ if output[0].get('label') == 'NEGATIVE':
35
+ st.error(' πŸ˜” ' + output[0].get('label'))
36
+ elif output[0].get('label') == 'POSITIVE':
37
+ st.success(' πŸ˜ƒ ' + output[0].get('label'))
38
+
39
+ # def setSidebar():
40
+ # st.sidebar.header("Sentiment Analysis")
41
+ # st.sidebar.write(
42
+ # """This demo illustrates a combination of plotting and animation with
43
+ # Streamlit. We're generating a bunch of random numbers in a loop for around
44
+ # 5 seconds. Enjoy!"""
45
+ # )
46
+
47
+ if __name__ == '__main__':
48
+ run()
pages/Summarization.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Ricardo Lisboa Santos
2
+ # Creation date: 2024-01-10
3
+
4
+ import streamlit as st
5
+ import time
6
+ import AI.summarization as ai
7
+
8
+
9
+
10
+ def run():
11
+
12
+ st.set_page_config(page_title="Summarization", page_icon="πŸ“ˆ")
13
+ # setSidebar()
14
+ # st.write('Summarization')
15
+ st.markdown("# Summarization")
16
+ input = st.text_area('Enter your LOOOONG text here.')
17
+ if st.button('Click me to run'):
18
+ progress_bar = st.sidebar.progress(0)
19
+ status_text = st.sidebar.empty()
20
+ loading_text='Loading Model'
21
+ with st.spinner(text=loading_text):
22
+ status_text.text("Getting Device")
23
+ device = ai.getDevice("cpu")
24
+ progress_bar.progress(30)
25
+ status_text.text("Loading Model")
26
+ model = ai.loadSummarizer(device)
27
+ progress_bar.progress(60)
28
+ status_text.text("Summarizing")
29
+ output = ai.summarize(model, input)
30
+ progress_bar.progress(90)
31
+ ai.clearCache("cpu", model)
32
+ progress_bar.progress(100)
33
+ status_text.text("Done")
34
+ st.code(output[0].get('summary_text'))
35
+
36
+ if __name__ == '__main__':
37
+ run()
pages/Text_Generation.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Ricardo Lisboa Santos
2
+ # Creation date: 2024-01-10
3
+
4
+ import streamlit as st
5
+ import AI.text_generation as ai
6
+
7
+ def run():
8
+
9
+ st.set_page_config(page_title="Text Generation", page_icon="πŸ“ˆ")
10
+
11
+ st.markdown("# Text Generation")
12
+ st.write('Generate some text.')
13
+ input = st.text_input('Enter your prompt here.')
14
+ if st.button('Click me to run'):
15
+ progress_bar = st.sidebar.progress(0)
16
+ status_text = st.sidebar.empty()
17
+ with st.spinner(text='Loading Model'):
18
+ status_text.text("Getting Device")
19
+ device = ai.getDevice("cpu")
20
+ progress_bar.progress(30)
21
+ status_text.text("Loading Model")
22
+ model = ai.loadGenerator(device)
23
+ progress_bar.progress(60)
24
+ status_text.text("Generating Answer")
25
+ output = ai.generate(model, input)
26
+ progress_bar.progress(90)
27
+ status_text.text("Clearing Cache")
28
+ ai.clearCache("cpu", model)
29
+ progress_bar.progress(100)
30
+ status_text.text("Done")
31
+ st.code(output[0].get('generated_text'))
32
+ # st.success('Done')
33
+
34
+ if __name__ == '__main__':
35
+ run()
pages/Zero_Shot_Classification.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Ricardo Lisboa Santos
2
+ # Creation date: 2024-01-10
3
+
4
+ import streamlit as st
5
+ import AI.zero_shot_classification as ai
6
+
7
+ def run():
8
+
9
+ st.set_page_config(page_title="Zero Shot Classification", page_icon="πŸ“ˆ")
10
+
11
+ st.markdown("# Zero Shot Classification")
12
+ st.write('Add some labels and then add a text to classify.')
13
+ labels_box = st.text_area('Enter your labels here.')
14
+ st.markdown('<p class="breadcrumb">{}</p>'.format(labels_box))
15
+ text = st.text_input('Enter your text here.')
16
+ if st.button('Click me to run'):
17
+ progress_bar = st.sidebar.progress(0)
18
+ status_text = st.sidebar.empty()
19
+ with st.spinner(text='Loading Model'):
20
+ status_text.text("Getting Device")
21
+ device = ai.getDevice("cpu")
22
+ progress_bar.progress(30)
23
+ status_text.text("Loading Model")
24
+ model = ai.loadGenerator(device)
25
+ progress_bar.progress(60)
26
+ status_text.text("Generating Answer")
27
+ output = ai.classify(model, question, context)
28
+ progress_bar.progress(90)
29
+ status_text.text("Clearing Cache")
30
+ ai.clearCache("cpu", model)
31
+ progress_bar.progress(100)
32
+ status_text.text("Done")
33
+ st.code(output.get('answer'))
34
+ # st.success('Done')
35
+
36
+ if __name__ == '__main__':
37
+ run()
pages/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Author: Ricardo Lisboa Santos
2
+ # Creation date: 2024-01-10