abullard1's picture
Add rounding to the score
f486d2a
# HuggingFace Spaces file to run a Gradio Interface for the ALBERT v2 Steam Review Constructiveness Classifier by Samuel Ruairรญ Bullard
# Package Imports
import gradio as gr
from transformers import pipeline
import torch
# Checks if CUDA is available on the machine
print("CUDA Available: ", torch.cuda.is_available())
# if not os.path.isfile("./README.md"):
# !git clone https://huggingface.co/spaces/abullard1/albert-v2-steam-review-constructiveness-classifier
# Sets the torch dtype to 16-bit half-precision floating-point format if CUDA is available, otherwise sets it to 32-bit single-precision floating-point format. (Available for GPUs with Tensor Cores like NVIDIA's Volta, Turing, Ampere Architectures have for example)
device = 0 if torch.cuda.is_available() else -1
torch_d_type = torch.float16 if torch.cuda.is_available() else torch.float32
print(f"Device: {device}")
# Defines the name of the base model, the classifier was fine-tuned from
base_model_name = "albert-base-v2"
# Defines the name of the fine-tuned model used for the steam-review constructiveness classification
finetuned_model_name = "abullard1/albert-v2-steam-review-constructiveness-classifier"
# PyTorch classifier pipeline
classifier = pipeline(
task="text-classification", # Defines the task
model=finetuned_model_name, # Defines the fine-tuned model to use
tokenizer=base_model_name, # Defines the tokenizer to use (same as the base model)
device=device, # Defines the device the classification will be run on
top_k=None, # Returns all scores for all labels, not just the one with the highest score
truncation=True, # Truncates the input text if it exceeds the maximum length
max_length=512, # Defines the maximum length of the input text (512 for BERT. Explicitly set here)
torch_dtype=torch_d_type # Sets the torch dtype to 16-bit half-precision floating-point format if CUDA is available, otherwise sets it to 32-bit single-precision floating-point format
)
# Extracts the labels and scores from the prediction result
def classify_steam_review(input_text):
result = classifier(input_text)
label_1, label_2 = result[0][0]["label"], result[0][1]["label"]
score_1, score_2 = round(result[0][0]["score"], 6), round(result[0][1]["score"], 6)
return {"label_1": label_1, "score_1": score_1, "label_2": label_2, "score_2": score_2}
# Provides a textual representation of the classification result
def get_steam_review_classification_result_text(label_1, score_1):
if label_1 == "LABEL_1":
return f"Constructive with a score of {score_1}. ๐Ÿ‘๐Ÿป"
else:
return f"Not Constructive with a score of {score_1}. ๐Ÿ‘Ž๐Ÿป"
# Examples Steam Reviews to display in the Gradio Interface using the "examples" parameter
examples = [
["Review: I think this is a great game but it still has some room for improvement., Playtime: 12, Voted Up: True, Upvotes: 1, Votes Funny: 0"],
["Review: Trash game. Deleted., Playtime: 1, Voted Up: False, Upvotes: 0, Votes Funny: 0"],
["Review: This game is amazing., Playtime: 100, Voted Up: True, Upvotes: 1, Votes Funny: 0"],
["Review: Great game, but the community is toxic., Playtime: 50, Voted Up: True, Upvotes: 1, Votes Funny: 0"]
]
# Information Description about the Steam Review Format
# info_description = (
# """
# Format your input as follows for the best results: ***Review**: {review_text}, **Playtime**: {author_playtime_at_review}, **Voted Up**: {voted_up}, **Upvotes**: {upvotes}, **Votes Funny**: {votes_funny}.*
# """
# )
# Labeling Criteria Information Markdown
labeling_criteria = (
"""
<ul>
<li>
<b><span style="color:green;">Constructive</span></b>:
Reviews that provide <b>helpful feedback</b>, <b>suggestions for improvement</b>,
<b>constructive criticism</b>, or <b>detailed insights</b> into the game.
</li>
<li>
<b><span style="color:red;">Not Constructive</span></b>:
Reviews that <b>do not offer useful feedback</b>, <b>lack substance</b>,
are <b>vague</b>, <b>off-topic</b>, <b>irrelevant</b>, or <b>trolling</b>.
</li>
</ul>
"""
)
# Custom CSS to create a card-like design for the Gradio Interface
custom_css = """
<style>
.card {
border: 1px solid rgba(255, 255, 255, 0.1);
padding: 16px;
border-radius: 12px;
box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.1);
background-color: transparent;
margin-bottom: 16px;
}
</style>
"""
# Main Gradio Interface using Gradio Blocks
# Docs: https://www.gradio.app/docs/gradio/blocks
with gr.Blocks() as steam_reviews_classifier_block:
with gr.Column(elem_classes=["card"]):
gr.Markdown("## Steam Review Constructiveness Classifier")
# gr.Markdown(info_description)
gr.HTML(custom_css)
# Main Column
with gr.Column():
# Card Column to create a card-like Design for the Main Functionality Components
with gr.Column(elem_classes=["card"]):
# Upper Row (Input Textbox, Constructive Label, Not Constructive Label)
with gr.Row(equal_height=True):
# Input Textbox Column
with gr.Column():
input_textbox = gr.Textbox(
lines=8,
label="Steam Review",
# info="Input Steam Review here",
placeholder="Review: I think this is a great game but it still has some room for improvement., Playtime: 12, Voted Up: True, Upvotes: 1, Votes Funny: 0",
show_copy_button=False,
value="Review: I think this is a great game but it still has some room for improvement., Playtime: 12, Voted Up: True, Upvotes: 1, Votes Funny: 0"
)
# Constructive and Not Constructive Labels Column
with gr.Column():
constructive_label = gr.Label(label="Constructive")
not_constructive_label = gr.Label(label="Not Constructive")
# Output Textbox which shows the textual representation of the Constructiveness Prediction
output_textbox = gr.Textbox(
label="Constructiveness Prediction",
interactive=False,
show_copy_button=False,
# info="Textual representation of the Constructiveness Prediction"
)
gr.Markdown("<br>")
# Examples Component
example_component = gr.Examples(
examples=examples,
inputs=input_textbox
)
# Submit Button
submit_button = gr.Button(value="Submit")
# Collapsable Labeling Criteria Info Accordion
with gr.Accordion(label="See Labeling Criteria:", open=False):
gr.Markdown(labeling_criteria)
# Function to run when the Submit Button is clicked (Passes the input text to the classifier and displays the output text)
def on_submit_click(input_text):
classification_result = classify_steam_review(input_text)
label_1 = classification_result["label_1"]
score_1 = classification_result["score_1"]
score_2 = classification_result["score_2"]
output_text = get_steam_review_classification_result_text(label_1, score_1)
# Maps the scores to the constructive and not constructive labels in the Gradio Interface
if label_1 == "LABEL_1":
constructive, not_constructive = score_1, score_2
else:
constructive, not_constructive = score_2, score_1
return output_text, constructive, not_constructive
# onClick event for the Submit Button
submit_button.click(
fn=on_submit_click,
inputs=input_textbox,
outputs=[output_textbox, constructive_label, not_constructive_label]
)
# Launches the Gradio Blocks Interface
steam_reviews_classifier_block.launch()