File size: 9,368 Bytes
bf955de
e3b75a8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bc3c0e3
e3b75a8
14a5102
e3b75a8
 
 
14a5102
e3b75a8
 
 
 
14a5102
e3b75a8
14a5102
e3b75a8
14a5102
e3b75a8
14a5102
e3b75a8
14a5102
e3b75a8
14a5102
e3b75a8
14a5102
e3b75a8
14a5102
 
fcd588e
 
 
 
 
 
 
 
 
 
 
 
e3b75a8
 
fcd588e
 
 
 
 
cb4e839
fcd588e
 
 
 
 
e3b75a8
 
 
 
 
 
 
14a5102
e3b75a8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14a5102
e3b75a8
 
 
 
 
 
14a5102
 
e3b75a8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14a5102
e3b75a8
14a5102
e3b75a8
 
 
 
 
14a5102
e3b75a8
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
import streamlit as st

x = st.slider("Select a value")
st.write(x, "squared is", x * x)

# -*- coding: utf-8 -*-
"""Accelerator_Model_Training_Notebook.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1CSyAE9DhwGTl7bLaSoo7QSyMuoEqJpCj

##This is the Image Classification Model Training Accelerator Notebook

 In this notebook, you will input your labelbox API Key, the Model Run ID and Ontology ID associated with the dataset you created using the labelbox platform.

 Please note this Notebook will run through given you have followed the beginning of the accelerator tutorial and set up a project that labels **images as one option of a radio classification list**.

 label names must be lower case.

 Inout your API_Key, Ontology_ID, and Model_Run_ID
"""

def train_and_inference(api_key, ontology_id, model_run_id):
    # st.write('thisisstarting')
    api_key = api_key # insert Labelbox API key
    ontology_id = ontology_id # get the ontology ID from the Settings tab at the top left of your model run
    model_run_id = model_run_id #get the model run ID from the settings gear icon on the right side of your Model Run
    # st.write('1')
    import pydantic
    st.write(pydantic.__version__)

    import numpy as np
    # st.write('2')
    import tensorflow as tf
    # st.write('3')
    from tensorflow.keras import layers
    # st.write('4')
    from tensorflow.keras.models import Sequential
    # st.write('5')
    from tensorflow.keras.preprocessing.image import ImageDataGenerator
    # st.write('6')
    import os
    # st.write('7')
    import labelbox
    # st.write('zat')
    from labelbox import Client
    # st.write('8')
    # st.write('9')
    import numpy as np
    import tensorflow as tf
    from tensorflow.keras import layers
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.preprocessing.image import ImageDataGenerator
    import os
    from labelbox.schema.ontology import OntologyBuilder, Tool, Classification, Option
    from labelbox import Client, LabelingFrontend, LabelImport, MALPredictionImport
    from labelbox.data.annotation_types import (
        Label, ImageData, ObjectAnnotation, MaskData,
        Rectangle, Point, Line, Mask, Polygon,
        Radio, Checklist, Text,
        ClassificationAnnotation, ClassificationAnswer
    )
    from labelbox import MediaType
    from labelbox.data.serialization import NDJsonConverter
    import pandas as pd
    import shutil
    import labelbox.data
    import scipy

    import json
    import uuid
    import time
    import requests
    import pandas as pd
    import shutil

    import json
    import uuid
    import time
    import requests
    # st.write('imports')

    """Connect to labelbox client
    Define Model Variables
    """

    client = Client(api_key)
    EPOCHS = 10

    """#Setup Training

    Export Classifications from Model Run
    """

    model_run = client.get_model_run(model_run_id)

    client.enable_experimental = True
    data_json = model_run.export_labels(download=True)
    print(data_json)

    """Separate datarows into folders."""

    import requests
    import os

    def download_and_save_image(url, destination_folder, filename):
        if not os.path.exists(destination_folder):
            os.makedirs(destination_folder)

        response = requests.get(url, stream=True)
        response.raise_for_status()

        with open(os.path.join(destination_folder, filename), 'wb') as file:
            for chunk in response.iter_content(8192):
                file.write(chunk)

    BASE_DIR = 'dataset'

    for entry in data_json:
        data_split = entry['Data Split']
        if data_split not in ['training', 'validation']:  # we are skipping 'test' for now
            continue

        image_url = entry['Labeled Data']
        label = entry['Label']['classifications'][0]['answer']['value']

        destination_folder = os.path.join(BASE_DIR, data_split, label)
        filename = os.path.basename(image_url)

        download_and_save_image(image_url, destination_folder, filename)

    """#Train Model"""

    import tensorflow as tf
    from tensorflow.keras.preprocessing.image import ImageDataGenerator
    from tensorflow.keras.applications import MobileNetV2
    from tensorflow.keras.layers import Dense, GlobalAveragePooling2D
    from tensorflow.keras.models import Model
    from tensorflow.keras.optimizers import Adam

    TRAIN_DIR = 'dataset/training'
    VALIDATION_DIR = 'dataset/validation'
    IMG_HEIGHT, IMG_WIDTH = 224, 224  # default size for MobileNetV2
    BATCH_SIZE = 32

    train_datagen = ImageDataGenerator(
        rescale=1./255,
        rotation_range=20,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
        fill_mode='nearest'
    )

    validation_datagen = ImageDataGenerator(rescale=1./255)

    train_ds = train_datagen.flow_from_directory(
        TRAIN_DIR,
        target_size=(IMG_HEIGHT, IMG_WIDTH),
        batch_size=BATCH_SIZE,
        class_mode='categorical'
    )

    validation_ds = validation_datagen.flow_from_directory(
        VALIDATION_DIR,
        target_size=(IMG_HEIGHT, IMG_WIDTH),
        batch_size=BATCH_SIZE,
        class_mode='categorical'
    )

    base_model = MobileNetV2(input_shape=(IMG_HEIGHT, IMG_WIDTH, 3),
                            include_top=False,
                            weights='imagenet')

    # Freeze the base model
    for layer in base_model.layers:
        layer.trainable = False

    # Create custom classification head
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu')(x)
    predictions = Dense(train_ds.num_classes, activation='softmax')(x)

    model = Model(inputs=base_model.input, outputs=predictions)

    model.compile(optimizer=Adam(learning_rate=0.0001),
                loss='categorical_crossentropy',
                metrics=['accuracy'])

    st.write("training")
    history = model.fit(
        train_ds,
        validation_data=validation_ds,
        epochs=EPOCHS
    )

    """Run Inference on Model run Datarows"""
    st.write('running Inference')

    import numpy as np
    import requests
    from tensorflow.keras.preprocessing import image
    from PIL import Image
    from io import BytesIO
    # Fetch the image from the URL
    def load_image_from_url(img_url, target_size=(224, 224)):
        response = requests.get(img_url)
        img = Image.open(BytesIO(response.content))
        img = img.resize(target_size)
        img_array = image.img_to_array(img)
        return np.expand_dims(img_array, axis=0)
    def make_prediction(img_url):
    # Image URL
        img_url = img_url

        # Load and preprocess the image
        img_data = load_image_from_url(img_url)
        img_data = img_data / 255.0  # Normalize the image data to [0,1]

        # Make predictions
        predictions = model.predict(img_data)
        predicted_class = np.argmax(predictions[0])

        # Retrieve the confidence score (probability) for the predicted class
        confidence = predictions[0][predicted_class]

        # Map the predicted class index to its corresponding label
        class_map = train_ds.class_indices
        inverse_map = {v: k for k, v in class_map.items()}
        predicted_label = inverse_map[predicted_class]

        return predicted_label, confidence

    from tensorflow.errors import InvalidArgumentError  # Add this import
    ontology = client.get_ontology(ontology_id)
    label_list = []
    for datarow in model_run.export_labels(download=True):
        try:
            label, confidence = make_prediction(datarow['Labeled Data'])
        except InvalidArgumentError as e:
            print(f"InvalidArgumentError: {e}. Skipping this data row.")
            continue  # Skip to the next datarow if an exception occurs
        my_checklist_answer = ClassificationAnswer(
                    name = label,
                    confidence=confidence)
        checklist_prediction = ClassificationAnnotation(
    name=ontology.classifications()[0].instructions,
        value=Radio(
            answer = my_checklist_answer
    ))
    # print(datarow["DataRow ID"])
        label_prediction = Label(
        data=ImageData(uid=datarow['DataRow ID']),
        annotations = [checklist_prediction])
        label_list.append(label_prediction)

    prediction_import = model_run.add_predictions(
        name="prediction_upload_job"+str(uuid.uuid4()),
        predictions=label_list)

    prediction_import.wait_until_done()

    st.write(prediction_import.errors == [])
    if prediction_import.errors == []:
        return "Model Trained and inference ran successfully"

st.title("Enter Applicable IDs and keys below")
api_key = st.text_input("Enter your api key:", type="password")
model_run_id = st.text_input("Enter your model run ID:")
ontology_id = st.text_input("Enter your ontology ID:")

if st.button("Train and run inference"):
    st.write('Starting Up...')
        # Check if the key is not empty
    if api_key + model_run_id + ontology_id:
        result = train_and_inference(api_key, ontology_id, model_run_id)
        st.write(result)
    else:
        st.warning("Please enter all keys.")