Spaces:
Runtime error
Runtime error
import streamlit as st | |
x = st.slider("Select a value") | |
st.write(x, "squared is", x * x) | |
# -*- coding: utf-8 -*- | |
"""Accelerator_Model_Training_Notebook.ipynb | |
Automatically generated by Colaboratory. | |
Original file is located at | |
https://colab.research.google.com/drive/1CSyAE9DhwGTl7bLaSoo7QSyMuoEqJpCj | |
##This is the Image Classification Model Training Accelerator Notebook | |
In this notebook, you will input your labelbox API Key, the Model Run ID and Ontology ID associated with the dataset you created using the labelbox platform. | |
Please note this Notebook will run through given you have followed the beginning of the accelerator tutorial and set up a project that labels **images as one option of a radio classification list**. | |
label names must be lower case. | |
Inout your API_Key, Ontology_ID, and Model_Run_ID | |
""" | |
def train_and_inference(api_key, ontology_id, model_run_id): | |
# st.write('thisisstarting') | |
api_key = api_key # insert Labelbox API key | |
ontology_id = ontology_id # get the ontology ID from the Settings tab at the top left of your model run | |
model_run_id = model_run_id #get the model run ID from the settings gear icon on the right side of your Model Run | |
# st.write('1') | |
import pydantic | |
st.write(pydantic.__version__) | |
import numpy as np | |
# st.write('2') | |
import tensorflow as tf | |
# st.write('3') | |
from tensorflow.keras import layers | |
# st.write('4') | |
from tensorflow.keras.models import Sequential | |
# st.write('5') | |
from tensorflow.keras.preprocessing.image import ImageDataGenerator | |
# st.write('6') | |
import os | |
# st.write('7') | |
import labelbox | |
# st.write('zat') | |
from labelbox import Client | |
# st.write('8') | |
# st.write('9') | |
import numpy as np | |
import tensorflow as tf | |
from tensorflow.keras import layers | |
from tensorflow.keras.models import Sequential | |
from tensorflow.keras.preprocessing.image import ImageDataGenerator | |
import os | |
from labelbox.schema.ontology import OntologyBuilder, Tool, Classification, Option | |
from labelbox import Client, LabelingFrontend, LabelImport, MALPredictionImport | |
from labelbox.data.annotation_types import ( | |
Label, ImageData, ObjectAnnotation, MaskData, | |
Rectangle, Point, Line, Mask, Polygon, | |
Radio, Checklist, Text, | |
ClassificationAnnotation, ClassificationAnswer | |
) | |
from labelbox import MediaType | |
from labelbox.data.serialization import NDJsonConverter | |
import pandas as pd | |
import shutil | |
import labelbox.data | |
import scipy | |
import json | |
import uuid | |
import time | |
import requests | |
import pandas as pd | |
import shutil | |
import json | |
import uuid | |
import time | |
import requests | |
# st.write('imports') | |
"""Connect to labelbox client | |
Define Model Variables | |
""" | |
client = Client(api_key) | |
EPOCHS = 10 | |
"""#Setup Training | |
Export Classifications from Model Run | |
""" | |
model_run = client.get_model_run(model_run_id) | |
client.enable_experimental = True | |
data_json = model_run.export_labels(download=True) | |
print(data_json) | |
"""Separate datarows into folders.""" | |
import requests | |
import os | |
def download_and_save_image(url, destination_folder, filename): | |
if not os.path.exists(destination_folder): | |
os.makedirs(destination_folder) | |
response = requests.get(url, stream=True) | |
response.raise_for_status() | |
with open(os.path.join(destination_folder, filename), 'wb') as file: | |
for chunk in response.iter_content(8192): | |
file.write(chunk) | |
BASE_DIR = 'dataset' | |
for entry in data_json: | |
data_split = entry['Data Split'] | |
if data_split not in ['training', 'validation']: # we are skipping 'test' for now | |
continue | |
image_url = entry['Labeled Data'] | |
label = entry['Label']['classifications'][0]['answer']['value'] | |
destination_folder = os.path.join(BASE_DIR, data_split, label) | |
filename = os.path.basename(image_url) | |
download_and_save_image(image_url, destination_folder, filename) | |
"""#Train Model""" | |
import tensorflow as tf | |
from tensorflow.keras.preprocessing.image import ImageDataGenerator | |
from tensorflow.keras.applications import MobileNetV2 | |
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D | |
from tensorflow.keras.models import Model | |
from tensorflow.keras.optimizers import Adam | |
TRAIN_DIR = 'dataset/training' | |
VALIDATION_DIR = 'dataset/validation' | |
IMG_HEIGHT, IMG_WIDTH = 224, 224 # default size for MobileNetV2 | |
BATCH_SIZE = 32 | |
train_datagen = ImageDataGenerator( | |
rescale=1./255, | |
rotation_range=20, | |
width_shift_range=0.2, | |
height_shift_range=0.2, | |
shear_range=0.2, | |
zoom_range=0.2, | |
horizontal_flip=True, | |
fill_mode='nearest' | |
) | |
validation_datagen = ImageDataGenerator(rescale=1./255) | |
train_ds = train_datagen.flow_from_directory( | |
TRAIN_DIR, | |
target_size=(IMG_HEIGHT, IMG_WIDTH), | |
batch_size=BATCH_SIZE, | |
class_mode='categorical' | |
) | |
validation_ds = validation_datagen.flow_from_directory( | |
VALIDATION_DIR, | |
target_size=(IMG_HEIGHT, IMG_WIDTH), | |
batch_size=BATCH_SIZE, | |
class_mode='categorical' | |
) | |
base_model = MobileNetV2(input_shape=(IMG_HEIGHT, IMG_WIDTH, 3), | |
include_top=False, | |
weights='imagenet') | |
# Freeze the base model | |
for layer in base_model.layers: | |
layer.trainable = False | |
# Create custom classification head | |
x = base_model.output | |
x = GlobalAveragePooling2D()(x) | |
x = Dense(1024, activation='relu')(x) | |
predictions = Dense(train_ds.num_classes, activation='softmax')(x) | |
model = Model(inputs=base_model.input, outputs=predictions) | |
model.compile(optimizer=Adam(learning_rate=0.0001), | |
loss='categorical_crossentropy', | |
metrics=['accuracy']) | |
st.write("training") | |
history = model.fit( | |
train_ds, | |
validation_data=validation_ds, | |
epochs=EPOCHS | |
) | |
"""Run Inference on Model run Datarows""" | |
st.write('running Inference') | |
import numpy as np | |
import requests | |
from tensorflow.keras.preprocessing import image | |
from PIL import Image | |
from io import BytesIO | |
# Fetch the image from the URL | |
def load_image_from_url(img_url, target_size=(224, 224)): | |
response = requests.get(img_url) | |
img = Image.open(BytesIO(response.content)) | |
img = img.resize(target_size) | |
img_array = image.img_to_array(img) | |
return np.expand_dims(img_array, axis=0) | |
def make_prediction(img_url): | |
# Image URL | |
img_url = img_url | |
# Load and preprocess the image | |
img_data = load_image_from_url(img_url) | |
img_data = img_data / 255.0 # Normalize the image data to [0,1] | |
# Make predictions | |
predictions = model.predict(img_data) | |
predicted_class = np.argmax(predictions[0]) | |
# Retrieve the confidence score (probability) for the predicted class | |
confidence = predictions[0][predicted_class] | |
# Map the predicted class index to its corresponding label | |
class_map = train_ds.class_indices | |
inverse_map = {v: k for k, v in class_map.items()} | |
predicted_label = inverse_map[predicted_class] | |
return predicted_label, confidence | |
from tensorflow.errors import InvalidArgumentError # Add this import | |
ontology = client.get_ontology(ontology_id) | |
label_list = [] | |
for datarow in model_run.export_labels(download=True): | |
try: | |
label, confidence = make_prediction(datarow['Labeled Data']) | |
except InvalidArgumentError as e: | |
print(f"InvalidArgumentError: {e}. Skipping this data row.") | |
continue # Skip to the next datarow if an exception occurs | |
my_checklist_answer = ClassificationAnswer( | |
name = label, | |
confidence=confidence) | |
checklist_prediction = ClassificationAnnotation( | |
name=ontology.classifications()[0].instructions, | |
value=Radio( | |
answer = my_checklist_answer | |
)) | |
# print(datarow["DataRow ID"]) | |
label_prediction = Label( | |
data=ImageData(uid=datarow['DataRow ID']), | |
annotations = [checklist_prediction]) | |
label_list.append(label_prediction) | |
prediction_import = model_run.add_predictions( | |
name="prediction_upload_job"+str(uuid.uuid4()), | |
predictions=label_list) | |
prediction_import.wait_until_done() | |
st.write(prediction_import.errors == []) | |
if prediction_import.errors == []: | |
return "Model Trained and inference ran successfully" | |
st.title("Enter Applicable IDs and keys below") | |
api_key = st.text_input("Enter your api key:", type="password") | |
model_run_id = st.text_input("Enter your model run ID:") | |
ontology_id = st.text_input("Enter your ontology ID:") | |
if st.button("Train and run inference"): | |
st.write('Starting Up...') | |
# Check if the key is not empty | |
if api_key + model_run_id + ontology_id: | |
result = train_and_inference(api_key, ontology_id, model_run_id) | |
st.write(result) | |
else: | |
st.warning("Please enter all keys.") | |