Newer
Older
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
from keras import layers, models
path = 'Data'
output = 'Model/keras_model.h5'
start_time = time.time()
validation_split=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
)
test_datagen = ImageDataGenerator(rescale=1. / 255)
# Step 2: Label the Data
train_set = datagen.flow_from_directory(
class_mode='categorical',
subset='training'
test_set = datagen.flow_from_directory(
class_mode='categorical',
subset='validation'
)
# Step 4: Build the Model
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(300, 300, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Dense(units=256, activation='relu'))
model.add(Dense(units=count_sub_folders(path), activation='softmax'))
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
# def createLayers(input_shape=(224, 224, 3)):
# inputs = tf.keras.Input(shape=input_shape)
#
# x = layers.Conv2D(48, (1, 1), padding='same', use_bias=False, name='block_1_expand')(inputs)
# x = layers.BatchNormalization(name='block_1_expand_BN')(x)
# x = layers.ReLU(6., name='block_1_expand_relu')(x)
#
# x = layers.DepthwiseConv2D((3, 3), padding='same', use_bias=False, name='block_1_depthwise')(x)
# x = layers.BatchNormalization(name='block_1_depthwise_BN')(x)
# x = layers.ReLU(6., name='block_1_depthwise_relu')(x)
#
# x = layers.Conv2D(8, (1, 1), padding='same', use_bias=False, name='block_1_project')(x)
# x = layers.BatchNormalization(name='block_1_project_BN')(x)
#
# for i in range(2,5):
# x1 = layers.Conv2D(48, (1, 1), padding='same', use_bias=False, name=f'block_{i}_expand')(x)
# x1 = layers.BatchNormalization(name=f'block_{i}_expand_BN')(x1)
# x1 = layers.ReLU(6., name=f'block_{i}_expand_relu')(x1)
#
# x1 = layers.DepthwiseConv2D((3, 3), padding='same', use_bias=False, name=f'block_{i}_depthwise')(x1)
# x1 = layers.BatchNormalization(name=f'block_{i}_depthwise_BN')(x1)
# x1 = layers.ReLU(6., name=f'block_{i}_depthwise_relu')(x1)
#
# x1 = layers.Conv2D(8, (1, 1), padding='same', use_bias=False, name=f'block_{i}_project')(x1)
# x1 = layers.BatchNormalization(name=f'block_{i}_project_BN')(x1)
#
# x = layers.Add(name=f'block_{i}_add')([x, x1])
#
# x = tf.keras.layers.GlobalAveragePooling2D()(x)
# outputs = tf.keras.layers.Dense(count_sub_folders(path), activation='softmax')(x)
# model = models.Model(inputs, outputs, name='testModel')
#
# return model
#
#
# model = createLayers()
model.compile(optimizer=Adam(learning_rate=0.001),
loss=CategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
model.fit(train_set, validation_data=test_set, epochs=10)
# Step 7: Evaluate the Model
loss, accuracy = model.evaluate(test_set)
print(f'Test loss: {loss}, Test accuracy: {accuracy}')
end_time = time.time()
execute_time = (end_time - start_time) / 60