File size: 8,156 Bytes
92421c9 bf9098c 92421c9 f486d2a 92421c9 24bce51 92421c9 24bce51 92421c9 21e471e 92421c9 bf9098c 92421c9 bf9098c 29b6159 bf9098c 92421c9 bf9098c 92421c9 bf9098c 92421c9 bf9098c a7d7f3e bf9098c 29b6159 92421c9 24bce51 92421c9 24bce51 92421c9 24bce51 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
# HuggingFace Spaces file to run a Gradio Interface for the ALBERT v2 Steam Review Constructiveness Classifier by Samuel Ruairí Bullard
# Package Imports
import gradio as gr
from transformers import pipeline
import torch
# Checks if CUDA is available on the machine
print("CUDA Available: ", torch.cuda.is_available())
# if not os.path.isfile("./README.md"):
# !git clone https://huggingface.co/spaces/abullard1/albert-v2-steam-review-constructiveness-classifier
# Sets the torch dtype to 16-bit half-precision floating-point format if CUDA is available, otherwise sets it to 32-bit single-precision floating-point format. (Available for GPUs with Tensor Cores like NVIDIA's Volta, Turing, Ampere Architectures have for example)
device = 0 if torch.cuda.is_available() else -1
torch_d_type = torch.float16 if torch.cuda.is_available() else torch.float32
print(f"Device: {device}")
# Defines the name of the base model, the classifier was fine-tuned from
base_model_name = "albert-base-v2"
# Defines the name of the fine-tuned model used for the steam-review constructiveness classification
finetuned_model_name = "abullard1/albert-v2-steam-review-constructiveness-classifier"
# PyTorch classifier pipeline
classifier = pipeline(
task="text-classification", # Defines the task
model=finetuned_model_name, # Defines the fine-tuned model to use
tokenizer=base_model_name, # Defines the tokenizer to use (same as the base model)
device=device, # Defines the device the classification will be run on
top_k=None, # Returns all scores for all labels, not just the one with the highest score
truncation=True, # Truncates the input text if it exceeds the maximum length
max_length=512, # Defines the maximum length of the input text (512 for BERT. Explicitly set here)
torch_dtype=torch_d_type # Sets the torch dtype to 16-bit half-precision floating-point format if CUDA is available, otherwise sets it to 32-bit single-precision floating-point format
)
# Extracts the labels and scores from the prediction result
def classify_steam_review(input_text):
result = classifier(input_text)
label_1, label_2 = result[0][0]["label"], result[0][1]["label"]
score_1, score_2 = round(result[0][0]["score"], 6), round(result[0][1]["score"], 6)
return {"label_1": label_1, "score_1": score_1, "label_2": label_2, "score_2": score_2}
# Provides a textual representation of the classification result
def get_steam_review_classification_result_text(label_1, score_1):
if label_1 == "LABEL_1":
return f"Constructive with a score of {score_1}. 👍🏻"
else:
return f"Not Constructive with a score of {score_1}. 👎🏻"
# Examples Steam Reviews to display in the Gradio Interface using the "examples" parameter
examples = [
["Review: I think this is a great game but it still has some room for improvement., Playtime: 12, Voted Up: True, Upvotes: 1, Votes Funny: 0"],
["Review: Trash game. Deleted., Playtime: 1, Voted Up: False, Upvotes: 0, Votes Funny: 0"],
["Review: This game is amazing., Playtime: 100, Voted Up: True, Upvotes: 1, Votes Funny: 0"],
["Review: Great game, but the community is toxic., Playtime: 50, Voted Up: True, Upvotes: 1, Votes Funny: 0"]
]
# Information Description about the Steam Review Format
# info_description = (
# """
# Format your input as follows for the best results: ***Review**: {review_text}, **Playtime**: {author_playtime_at_review}, **Voted Up**: {voted_up}, **Upvotes**: {upvotes}, **Votes Funny**: {votes_funny}.*
# """
# )
# Labeling Criteria Information Markdown
labeling_criteria = (
"""
<ul>
<li>
<b><span style="color:green;">Constructive</span></b>:
Reviews that provide <b>helpful feedback</b>, <b>suggestions for improvement</b>,
<b>constructive criticism</b>, or <b>detailed insights</b> into the game.
</li>
<li>
<b><span style="color:red;">Not Constructive</span></b>:
Reviews that <b>do not offer useful feedback</b>, <b>lack substance</b>,
are <b>vague</b>, <b>off-topic</b>, <b>irrelevant</b>, or <b>trolling</b>.
</li>
</ul>
"""
)
# Custom CSS to create a card-like design for the Gradio Interface
custom_css = """
<style>
.card {
border: 1px solid rgba(255, 255, 255, 0.1);
padding: 16px;
border-radius: 12px;
box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.1);
background-color: transparent;
margin-bottom: 16px;
}
</style>
"""
# Main Gradio Interface using Gradio Blocks
# Docs: https://www.gradio.app/docs/gradio/blocks
with gr.Blocks() as steam_reviews_classifier_block:
with gr.Column(elem_classes=["card"]):
gr.Markdown("## Steam Review Constructiveness Classifier")
# gr.Markdown(info_description)
gr.HTML(custom_css)
# Main Column
with gr.Column():
# Card Column to create a card-like Design for the Main Functionality Components
with gr.Column(elem_classes=["card"]):
# Upper Row (Input Textbox, Constructive Label, Not Constructive Label)
with gr.Row(equal_height=True):
# Input Textbox Column
with gr.Column():
input_textbox = gr.Textbox(
lines=8,
label="Steam Review",
# info="Input Steam Review here",
placeholder="Review: I think this is a great game but it still has some room for improvement., Playtime: 12, Voted Up: True, Upvotes: 1, Votes Funny: 0",
show_copy_button=False,
value="Review: I think this is a great game but it still has some room for improvement., Playtime: 12, Voted Up: True, Upvotes: 1, Votes Funny: 0"
)
# Constructive and Not Constructive Labels Column
with gr.Column():
constructive_label = gr.Label(label="Constructive")
not_constructive_label = gr.Label(label="Not Constructive")
# Output Textbox which shows the textual representation of the Constructiveness Prediction
output_textbox = gr.Textbox(
label="Constructiveness Prediction",
interactive=False,
show_copy_button=False,
# info="Textual representation of the Constructiveness Prediction"
)
gr.Markdown("<br>")
# Examples Component
example_component = gr.Examples(
examples=examples,
inputs=input_textbox
)
# Submit Button
submit_button = gr.Button(value="Submit")
# Collapsable Labeling Criteria Info Accordion
with gr.Accordion(label="See Labeling Criteria:", open=False):
gr.Markdown(labeling_criteria)
# Function to run when the Submit Button is clicked (Passes the input text to the classifier and displays the output text)
def on_submit_click(input_text):
classification_result = classify_steam_review(input_text)
label_1 = classification_result["label_1"]
score_1 = classification_result["score_1"]
score_2 = classification_result["score_2"]
output_text = get_steam_review_classification_result_text(label_1, score_1)
# Maps the scores to the constructive and not constructive labels in the Gradio Interface
if label_1 == "LABEL_1":
constructive, not_constructive = score_1, score_2
else:
constructive, not_constructive = score_2, score_1
return output_text, constructive, not_constructive
# onClick event for the Submit Button
submit_button.click(
fn=on_submit_click,
inputs=input_textbox,
outputs=[output_textbox, constructive_label, not_constructive_label]
)
# Launches the Gradio Blocks Interface
steam_reviews_classifier_block.launch()
|