Spaces:
Sleeping
Sleeping
app.py update with NLI model and skeleton code for remaining models
Browse files
app.py
CHANGED
@@ -5,22 +5,63 @@ from peft import AutoPeftModelForSequenceClassification
|
|
5 |
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
|
6 |
loraModel = AutoPeftModelForSequenceClassification.from_pretrained("Intradiction/text_classification_WithLORA")
|
7 |
|
|
|
|
|
|
|
|
|
8 |
#pretrained models
|
9 |
-
#Textclass_pipe = pipeline()
|
10 |
#STSmodel_pipe = pipeline()
|
11 |
#NLImodel_pipe = pipeline()
|
12 |
|
13 |
-
# Handle calls to DistilBERT
|
|
|
14 |
distilBERTnoLORA_pipe = pipeline(model="Intradiction/text_classification_NoLORA")
|
15 |
distilBERTwithLORA_pipe = pipeline("sentiment-analysis", model=loraModel, tokenizer=tokenizer)
|
16 |
|
17 |
-
|
18 |
def distilBERTnoLORA_fn(text):
|
19 |
return distilBERTnoLORA_pipe(text)
|
20 |
|
21 |
def distilBERTwithLORA_fn(text):
|
22 |
return distilBERTwithLORA_pipe(text)
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
def chat1(message,history):
|
25 |
history = history or []
|
26 |
message = message.lower()
|
@@ -32,9 +73,6 @@ def chat1(message,history):
|
|
32 |
history.append((message, response))
|
33 |
return history, history
|
34 |
|
35 |
-
chatbot = gr.Chatbot()
|
36 |
-
chatbot1 = gr.Chatbot()
|
37 |
-
chatbot2 = gr.Chatbot()
|
38 |
|
39 |
with gr.Blocks(
|
40 |
title="",
|
@@ -76,29 +114,30 @@ with gr.Blocks(
|
|
76 |
|
77 |
with gr.Column():
|
78 |
with gr.Row(variant="panel"):
|
79 |
-
|
80 |
gr.Markdown("""<div>
|
81 |
<span><center><B>Training Information</B><center></span>
|
82 |
<span><br><br><br><br><br></span>
|
83 |
</div>""")
|
84 |
|
85 |
with gr.Row(variant="panel"):
|
86 |
-
|
87 |
gr.Markdown("""<div>
|
88 |
<span><center><B>Training Information</B><center></span>
|
89 |
<span><br><br><br><br><br></span>
|
90 |
</div>""")
|
91 |
|
92 |
with gr.Row(variant="panel"):
|
93 |
-
|
94 |
gr.Markdown("""<div>
|
95 |
<span><center><B>Training Information</B><center></span>
|
96 |
<span><br><br><br><br><br></span>
|
97 |
</div>""")
|
98 |
|
99 |
-
btn.click(fn=
|
100 |
-
btn.click(fn=
|
101 |
-
btn.click(fn=
|
|
|
102 |
|
103 |
with gr.Tab("Natrual Language Infrencing"):
|
104 |
with gr.Row():
|
@@ -115,21 +154,21 @@ with gr.Blocks(
|
|
115 |
with gr.Column(scale=0.3,variant="panel"):
|
116 |
nli_p1 = gr.Textbox(placeholder="Prompt One",label= "Enter Query")
|
117 |
nli_p2 = gr.Textbox(placeholder="Prompt Two",label= "Enter Query")
|
118 |
-
|
119 |
gr.Examples(
|
120 |
[
|
121 |
-
"
|
122 |
-
"
|
123 |
-
"
|
124 |
],
|
125 |
nli_p1,
|
126 |
label="Try asking",
|
127 |
)
|
128 |
gr.Examples(
|
129 |
[
|
130 |
-
"
|
131 |
-
"
|
132 |
-
"
|
133 |
],
|
134 |
nli_p2,
|
135 |
label="Try asking",
|
@@ -137,25 +176,29 @@ with gr.Blocks(
|
|
137 |
|
138 |
with gr.Column():
|
139 |
with gr.Row(variant="panel"):
|
140 |
-
|
141 |
gr.Markdown("""<div>
|
142 |
<span><center><B>Training Information</B><center></span>
|
143 |
<span><br><br><br><br><br></span>
|
144 |
</div>""")
|
145 |
|
146 |
with gr.Row(variant="panel"):
|
147 |
-
|
148 |
gr.Markdown("""<div>
|
149 |
<span><center><B>Training Information</B><center></span>
|
150 |
<span><br><br><br><br><br></span>
|
151 |
</div>""")
|
152 |
|
153 |
with gr.Row(variant="panel"):
|
154 |
-
|
155 |
gr.Markdown("""<div>
|
156 |
<span><center><B>Training Information</B><center></span>
|
157 |
<span><br><br><br><br><br></span>
|
158 |
</div>""")
|
|
|
|
|
|
|
|
|
159 |
|
160 |
with gr.Tab("Sematic Text Similarity"):
|
161 |
with gr.Row():
|
@@ -172,21 +215,21 @@ with gr.Blocks(
|
|
172 |
with gr.Column(scale=0.3,variant="panel"):
|
173 |
sts_p1 = gr.Textbox(placeholder="Prompt One",label= "Enter Query")
|
174 |
sts_p2 = gr.Textbox(placeholder="Prompt Two",label= "Enter Query")
|
175 |
-
|
176 |
gr.Examples(
|
177 |
[
|
178 |
-
"
|
179 |
-
"
|
180 |
-
"
|
181 |
],
|
182 |
sts_p1,
|
183 |
label="Try asking",
|
184 |
)
|
185 |
gr.Examples(
|
186 |
[
|
187 |
-
"
|
188 |
-
"
|
189 |
-
"
|
190 |
],
|
191 |
sts_p2,
|
192 |
label="Try asking",
|
@@ -194,25 +237,29 @@ with gr.Blocks(
|
|
194 |
|
195 |
with gr.Column():
|
196 |
with gr.Row(variant="panel"):
|
197 |
-
|
198 |
gr.Markdown("""<div>
|
199 |
<span><center><B>Training Information</B><center></span>
|
200 |
<span><br><br><br><br><br></span>
|
201 |
</div>""")
|
202 |
|
203 |
with gr.Row(variant="panel"):
|
204 |
-
|
205 |
gr.Markdown("""<div>
|
206 |
<span><center><B>Training Information</B><center></span>
|
207 |
<span><br><br><br><br><br></span>
|
208 |
</div>""")
|
209 |
|
210 |
with gr.Row(variant="panel"):
|
211 |
-
|
212 |
gr.Markdown("""<div>
|
213 |
<span><center><B>Training Information</B><center></span>
|
214 |
<span><br><br><br><br><br></span>
|
215 |
</div>""")
|
|
|
|
|
|
|
|
|
216 |
|
217 |
with gr.Tab("More information"):
|
218 |
gr.Markdown("stuff to add")
|
|
|
5 |
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
|
6 |
loraModel = AutoPeftModelForSequenceClassification.from_pretrained("Intradiction/text_classification_WithLORA")
|
7 |
|
8 |
+
tokenizer1 = AutoTokenizer.from_pretrained("albert-base-v2")
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
#pretrained models
|
|
|
13 |
#STSmodel_pipe = pipeline()
|
14 |
#NLImodel_pipe = pipeline()
|
15 |
|
16 |
+
# Handle calls to DistilBERT
|
17 |
+
distilBERTUntrained_pipe = pipeline("sentiment-analysis", model="bert-base-uncased")
|
18 |
distilBERTnoLORA_pipe = pipeline(model="Intradiction/text_classification_NoLORA")
|
19 |
distilBERTwithLORA_pipe = pipeline("sentiment-analysis", model=loraModel, tokenizer=tokenizer)
|
20 |
|
21 |
+
#text class models
|
22 |
def distilBERTnoLORA_fn(text):
|
23 |
return distilBERTnoLORA_pipe(text)
|
24 |
|
25 |
def distilBERTwithLORA_fn(text):
|
26 |
return distilBERTwithLORA_pipe(text)
|
27 |
|
28 |
+
def distilBERTUntrained_fn(text):
|
29 |
+
return distilBERTUntrained_pipe(text)
|
30 |
+
|
31 |
+
|
32 |
+
# Handle calls to ALBERT
|
33 |
+
ALbertUntrained_pipe = pipeline("text-classification", model="albert-base-v2")
|
34 |
+
AlbertnoLORA_pipe = pipeline(model="Intradiction/NLI-Conventional-Fine-Tuning")
|
35 |
+
#AlbertwithLORA_pipe = pipeline()
|
36 |
+
|
37 |
+
#NLI models
|
38 |
+
def AlbertnoLORA_fn(text1, text2):
|
39 |
+
return AlbertnoLORA_pipe(text1, text2)
|
40 |
+
|
41 |
+
def AlbertwithLORA_fn(text1, text2):
|
42 |
+
return ("working2")
|
43 |
+
|
44 |
+
def AlbertUntrained_fn(text1, text2):
|
45 |
+
return ALbertUntrained_pipe(text1,text2)
|
46 |
+
|
47 |
+
|
48 |
+
# Handle calls to Deberta
|
49 |
+
#DebertaUntrained_pipe = pipeline()
|
50 |
+
#DebertanoLORA_pipe = pipeline()
|
51 |
+
#DebertawithLORA_pipe = pipeline()
|
52 |
+
|
53 |
+
#STS models
|
54 |
+
def DebertanoLORA_fn(text1, text2):
|
55 |
+
return ("working3")
|
56 |
+
|
57 |
+
def DebertawithLORA_fn(text1, text2):
|
58 |
+
return ("working2")
|
59 |
+
|
60 |
+
def DebertaUntrained_fn(text1, text2):
|
61 |
+
return ("working3")
|
62 |
+
|
63 |
+
|
64 |
+
#placeholder
|
65 |
def chat1(message,history):
|
66 |
history = history or []
|
67 |
message = message.lower()
|
|
|
73 |
history.append((message, response))
|
74 |
return history, history
|
75 |
|
|
|
|
|
|
|
76 |
|
77 |
with gr.Blocks(
|
78 |
title="",
|
|
|
114 |
|
115 |
with gr.Column():
|
116 |
with gr.Row(variant="panel"):
|
117 |
+
TextClassOut = gr.Textbox(label= "Untrained Base Model")
|
118 |
gr.Markdown("""<div>
|
119 |
<span><center><B>Training Information</B><center></span>
|
120 |
<span><br><br><br><br><br></span>
|
121 |
</div>""")
|
122 |
|
123 |
with gr.Row(variant="panel"):
|
124 |
+
TextClassOut1 = gr.Textbox(label= "Conventionaly Trained Model")
|
125 |
gr.Markdown("""<div>
|
126 |
<span><center><B>Training Information</B><center></span>
|
127 |
<span><br><br><br><br><br></span>
|
128 |
</div>""")
|
129 |
|
130 |
with gr.Row(variant="panel"):
|
131 |
+
TextClassOut2 = gr.Textbox(label= "LoRA Fine Tuned Model")
|
132 |
gr.Markdown("""<div>
|
133 |
<span><center><B>Training Information</B><center></span>
|
134 |
<span><br><br><br><br><br></span>
|
135 |
</div>""")
|
136 |
|
137 |
+
btn.click(fn=distilBERTUntrained_fn, inputs=inp, outputs=TextClassOut)
|
138 |
+
btn.click(fn=distilBERTnoLORA_fn, inputs=inp, outputs=TextClassOut1)
|
139 |
+
btn.click(fn=distilBERTwithLORA_fn, inputs=inp, outputs=TextClassOut2)
|
140 |
+
|
141 |
|
142 |
with gr.Tab("Natrual Language Infrencing"):
|
143 |
with gr.Row():
|
|
|
154 |
with gr.Column(scale=0.3,variant="panel"):
|
155 |
nli_p1 = gr.Textbox(placeholder="Prompt One",label= "Enter Query")
|
156 |
nli_p2 = gr.Textbox(placeholder="Prompt Two",label= "Enter Query")
|
157 |
+
nli_btn = gr.Button("Run")
|
158 |
gr.Examples(
|
159 |
[
|
160 |
+
"I am with my friends",
|
161 |
+
"People like apples",
|
162 |
+
"Dogs like bones",
|
163 |
],
|
164 |
nli_p1,
|
165 |
label="Try asking",
|
166 |
)
|
167 |
gr.Examples(
|
168 |
[
|
169 |
+
"I am happy",
|
170 |
+
"Apples are good",
|
171 |
+
"Bones like dogs",
|
172 |
],
|
173 |
nli_p2,
|
174 |
label="Try asking",
|
|
|
176 |
|
177 |
with gr.Column():
|
178 |
with gr.Row(variant="panel"):
|
179 |
+
NLIOut = gr.Textbox(label= "Untrained Base Model")
|
180 |
gr.Markdown("""<div>
|
181 |
<span><center><B>Training Information</B><center></span>
|
182 |
<span><br><br><br><br><br></span>
|
183 |
</div>""")
|
184 |
|
185 |
with gr.Row(variant="panel"):
|
186 |
+
NLIOut1 = gr.Textbox(label= "Conventionaly Trained Model")
|
187 |
gr.Markdown("""<div>
|
188 |
<span><center><B>Training Information</B><center></span>
|
189 |
<span><br><br><br><br><br></span>
|
190 |
</div>""")
|
191 |
|
192 |
with gr.Row(variant="panel"):
|
193 |
+
NLIOut2 = gr.Textbox(label= "LoRA Fine Tuned Model")
|
194 |
gr.Markdown("""<div>
|
195 |
<span><center><B>Training Information</B><center></span>
|
196 |
<span><br><br><br><br><br></span>
|
197 |
</div>""")
|
198 |
+
|
199 |
+
nli_btn.click(fn=AlbertUntrained_fn, inputs=[nli_p1,nli_p2], outputs=NLIOut)
|
200 |
+
nli_btn.click(fn=AlbertnoLORA_fn, inputs=[nli_p1,nli_p2], outputs=NLIOut1)
|
201 |
+
nli_btn.click(fn=AlbertwithLORA_fn, inputs=[nli_p1,nli_p2], outputs=NLIOut2)
|
202 |
|
203 |
with gr.Tab("Sematic Text Similarity"):
|
204 |
with gr.Row():
|
|
|
215 |
with gr.Column(scale=0.3,variant="panel"):
|
216 |
sts_p1 = gr.Textbox(placeholder="Prompt One",label= "Enter Query")
|
217 |
sts_p2 = gr.Textbox(placeholder="Prompt Two",label= "Enter Query")
|
218 |
+
sts_btn = gr.Button("Run")
|
219 |
gr.Examples(
|
220 |
[
|
221 |
+
"the ball is green",
|
222 |
+
"i dont like apples",
|
223 |
+
"our air is clean becase of trees",
|
224 |
],
|
225 |
sts_p1,
|
226 |
label="Try asking",
|
227 |
)
|
228 |
gr.Examples(
|
229 |
[
|
230 |
+
"the green ball",
|
231 |
+
"apples are great",
|
232 |
+
"trees produce oxygen",
|
233 |
],
|
234 |
sts_p2,
|
235 |
label="Try asking",
|
|
|
237 |
|
238 |
with gr.Column():
|
239 |
with gr.Row(variant="panel"):
|
240 |
+
sts_out = gr.Textbox(label= "Untrained Base Model")
|
241 |
gr.Markdown("""<div>
|
242 |
<span><center><B>Training Information</B><center></span>
|
243 |
<span><br><br><br><br><br></span>
|
244 |
</div>""")
|
245 |
|
246 |
with gr.Row(variant="panel"):
|
247 |
+
sts_out1 = gr.Textbox(label= "Conventionaly Trained Model")
|
248 |
gr.Markdown("""<div>
|
249 |
<span><center><B>Training Information</B><center></span>
|
250 |
<span><br><br><br><br><br></span>
|
251 |
</div>""")
|
252 |
|
253 |
with gr.Row(variant="panel"):
|
254 |
+
sts_out2 = gr.Textbox(label= "LoRA Fine Tuned Model")
|
255 |
gr.Markdown("""<div>
|
256 |
<span><center><B>Training Information</B><center></span>
|
257 |
<span><br><br><br><br><br></span>
|
258 |
</div>""")
|
259 |
+
|
260 |
+
sts_btn.click(fn=DebertaUntrained_fn, inputs=[sts_p1,sts_p2], outputs=sts_out)
|
261 |
+
sts_btn.click(fn=DebertanoLORA_fn, inputs=[sts_p1,sts_p2], outputs=sts_out1)
|
262 |
+
sts_btn.click(fn=DebertawithLORA_fn, inputs=[sts_p1,sts_p2], outputs=sts_out2)
|
263 |
|
264 |
with gr.Tab("More information"):
|
265 |
gr.Markdown("stuff to add")
|