Spaces:
Sleeping
Sleeping
updated conventioanal sts model
Browse files
app.py
CHANGED
@@ -66,8 +66,8 @@ model2 = PeftModel.from_pretrained(model=base_model2, model_id=peft_model_id2)
|
|
66 |
sa_merged_model2 = model2.merge_and_unload()
|
67 |
bbu_tokenizer2 = AutoTokenizer.from_pretrained("FacebookAI/roberta-base")
|
68 |
|
69 |
-
DebertaUntrained_pipe = pipeline("text-classification", model="
|
70 |
-
DebertanoLORA_pipe = pipeline(model="rajevan123/STS-
|
71 |
DebertawithLORA_pipe = pipeline("text-classification",model=sa_merged_model2, tokenizer=bbu_tokenizer2)
|
72 |
|
73 |
#STS models
|
@@ -221,7 +221,7 @@ def displayMetricStatsTextSTSLora():
|
|
221 |
return metrics
|
222 |
def displayMetricStatsTextSTSNoLora():
|
223 |
#file_name = 'events.out.tfevents.STS-Conventional.0'
|
224 |
-
file_name = hf_hub_download(repo_id="rajevan123/STS-
|
225 |
event_acc = event_accumulator.EventAccumulator(file_name,
|
226 |
size_guidance={
|
227 |
event_accumulator.COMPRESSED_HISTOGRAMS: 500,
|
@@ -415,7 +415,7 @@ with gr.Blocks(
|
|
415 |
with gr.Column(variant="panel"):
|
416 |
gr.Markdown("""
|
417 |
<h2>Specifications</h2>
|
418 |
-
<p><b>Model:</b>
|
419 |
<b>Dataset:</b> Semantic Text Similarity Benchmark <br>
|
420 |
<b>NLP Task:</b> Semantic Text Similarity</p>
|
421 |
<p>Semantic text similarity measures the closeness in meaning of two pieces of text despite differences in their wording or structure. This task involves two input prompts which can be sentences, phrases or entire documents and assessing them for similarity. In our implementation we compare phrases represented by a score that can range between zero and one. A score of zero implies completely different phrases, while one indicates identical meaning between the text pair. This implementation uses a DeBERTa-v3-xsmall and training was performed on the semantic text similarity benchmark dataset which contains over 86k semantic pairs and their scores. We can see that when training is performed over [XX] epochs we see an increase in X% of training time for the LoRA trained model compared to a conventionally tuned model.</p>
|
|
|
66 |
sa_merged_model2 = model2.merge_and_unload()
|
67 |
bbu_tokenizer2 = AutoTokenizer.from_pretrained("FacebookAI/roberta-base")
|
68 |
|
69 |
+
DebertaUntrained_pipe = pipeline("text-classification", model="FacebookAI/roberta-base")
|
70 |
+
DebertanoLORA_pipe = pipeline(model="rajevan123/STS-conventional-Fine-Tuning-Capstone-roberta-base-filtered-137")
|
71 |
DebertawithLORA_pipe = pipeline("text-classification",model=sa_merged_model2, tokenizer=bbu_tokenizer2)
|
72 |
|
73 |
#STS models
|
|
|
221 |
return metrics
|
222 |
def displayMetricStatsTextSTSNoLora():
|
223 |
#file_name = 'events.out.tfevents.STS-Conventional.0'
|
224 |
+
file_name = hf_hub_download(repo_id="rajevan123/STS-conventional-Fine-Tuning-Capstone-roberta-base-filtered-137", filename="runs/Mar31_15-13-28_585e70ba99a4/events.out.tfevents.1711898010.585e70ba99a4.247.0")
|
225 |
event_acc = event_accumulator.EventAccumulator(file_name,
|
226 |
size_guidance={
|
227 |
event_accumulator.COMPRESSED_HISTOGRAMS: 500,
|
|
|
415 |
with gr.Column(variant="panel"):
|
416 |
gr.Markdown("""
|
417 |
<h2>Specifications</h2>
|
418 |
+
<p><b>Model:</b> Roberta Base <br>
|
419 |
<b>Dataset:</b> Semantic Text Similarity Benchmark <br>
|
420 |
<b>NLP Task:</b> Semantic Text Similarity</p>
|
421 |
<p>Semantic text similarity measures the closeness in meaning of two pieces of text despite differences in their wording or structure. This task involves two input prompts which can be sentences, phrases or entire documents and assessing them for similarity. In our implementation we compare phrases represented by a score that can range between zero and one. A score of zero implies completely different phrases, while one indicates identical meaning between the text pair. This implementation uses a DeBERTa-v3-xsmall and training was performed on the semantic text similarity benchmark dataset which contains over 86k semantic pairs and their scores. We can see that when training is performed over [XX] epochs we see an increase in X% of training time for the LoRA trained model compared to a conventionally tuned model.</p>
|