Rock Paper Scissors Object Detection Model

Created by FRC Team 578

Description

This YOLO v10 small model was trained for educational purposes only. It is used to illustrate to students how an object detection model works. It was trained for 10 epochs.

Training Data

The model trained on 100 images found online. No augmentation of the images were preformed.

Metrics

Class Images Instances Box R mAP50 mAP50-95
all 100 260 0.917 0.795 0.925 0.735
rock 69 84 0.875 0.835 0.924 0.728
paper 56 65 0.899 0.815 0.909 0.721
scissors 88 111 0.976 0.736 0.943 0.755

How to Use

pip install ultralytics
pip install huggingface_hub
from ultralytics import YOLO
from huggingface_hub import hf_hub_download
from matplotlib import pyplot as plt

# Load the weights from our repository
model_path = hf_hub_download(
    local_dir=".",
    repo_id="fairportrobotics/rock-paper-scissors",
    filename="model.pt"
)
model = YOLO(model_path)

# Load a test image
sample_path = hf_hub_download(
    local_dir=".",
    repo_id="fairportrobotics/rock-paper-scissors",
    filename="sample.jpg"
)

# Do the predictions
res = model.predict(
    source=sample_path,
    project='.',
    name='detected',
    exist_ok=True,
    save=True,
    show=False,
    show_labels=True,
    show_conf=True,
    conf=0.5
)

plt.figure(figsize=(15,10))
plt.imshow(plt.imread('detected/sample.jpg'))
plt.show()

As you can see the model isn't perfect ;)

Use the model with your webcam

from ultralytics import YOLO
import cv2
import math 
from huggingface_hub import hf_hub_download

# start the webcam
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)

# Load the weights from our repository
model_path = hf_hub_download(
    local_dir=".",
    repo_id="fairportrobotics/rock-paper-scissors",
    filename="model.pt"
)
model = YOLO(model_path)

# object classes
classNames = ["rock", "paper", "scissors"]


while True:
    success, img = cap.read()
    results = model(img, stream=True)

    # coordinates
    for r in results:
        boxes = r.boxes

        for box in boxes:
            # bounding box
            x1, y1, x2, y2 = box.xyxy[0]
            x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) # convert to int values

            # put box in cam
            cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 255), 3)

            # confidence
            confidence = math.ceil((box.conf[0]*100))/100

            # class name
            cls = int(box.cls[0])

            # object details
            org = [x1, y1]
            font = cv2.FONT_HERSHEY_SIMPLEX
            fontScale = 1
            color = (255, 0, 0)
            thickness = 2

            cv2.putText(img, classNames[cls] + " " + str(round(confidence,2)), org, font, fontScale, color, thickness)

    cv2.imshow('Webcam', img)
    if cv2.waitKey(1) == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()
Downloads last month

-

Downloads are not tracked for this model. How to track
Inference Examples
Inference API (serverless) does not yet support yolov10 models for this pipeline type.