File size: 3,152 Bytes
8872483 746c72a 8872483 746c72a 8872483 746c72a 8872483 746c72a 8872483 746c72a 8872483 746c72a 8872483 746c72a 8872483 746c72a 8872483 746c72a 8872483 746c72a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
import numpy as np
from tensorflow.keras.preprocessing import image
# Define image size and batch size
IMG_SIZE = 224
BATCH_SIZE = 32
# Define train and validation directories
train_dir = 'm2rncvif2arzs1w3q44gfn\images.cv_m2rncvif2arzs1w3q44gfn\data\train\burrito'
val_dir = 'm2rncvif2arzs1w3q44gfn\images.cv_m2rncvif2arzs1w3q44gfn\data\val\burrito'
# Use ImageDataGenerator for data augmentation
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=20,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
val_datagen = ImageDataGenerator(rescale=1./255)
# Generate batches of augmented data from directories
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(IMG_SIZE, IMG_SIZE),
batch_size=BATCH_SIZE,
class_mode='categorical')
val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(IMG_SIZE, IMG_SIZE),
batch_size=BATCH_SIZE,
class_mode='categorical')
# Define the model architecture
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(IMG_SIZE, IMG_SIZE, 3)))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))
# Compile the model with categorical crossentropy loss and Adam optimizer
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# Define the number of training and validation steps per epoch
train_steps_per_epoch = train_generator.samples // BATCH_SIZE
val_steps_per_epoch = val_generator.samples // BATCH_SIZE
# Train the model with fit_generator
history = model.fit_generator(
train_generator,
steps_per_epoch=train_steps_per_epoch,
epochs=10,
validation_data=val_generator,
validation_steps=val_steps_per_epoch)
# Path to directory with burrito images
dir_path = 'm2rncvif2arzs1w3q44gfn\images.cv_m2rncvif2arzs1w3q44gfn\data\test\burrito'
# Loop through all images in the directory
for img_file in os.listdir(dir_path):
# Load and preprocess the image
img_path = os.path.join(dir_path, img_file)
img = image.load_img(img_path, target_size=(IMG_SIZE, IMG_SIZE))
img_array = image.img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
img_array /= 255.0
# Make a prediction
prediction = model.predict(img_array)
# Print the prediction result
if prediction[0][0] > prediction[0][1]:
print('{}: Not a burrito'.format(img_file))
else:
print('{}: Burrito!'.format(img_file)) |