Skip to content
Snippets Groups Projects
Commit 6684f82a authored by Michiel_VE's avatar Michiel_VE
Browse files

made so code can be trained on GPU

+ update readme to install environment
parent b44a602a
No related branches found
No related tags found
1 merge request!1made so code can be trained on GPU
...@@ -2,4 +2,6 @@ ...@@ -2,4 +2,6 @@
venv venv
Data Data
Data_test Data_test
\ No newline at end of file Model
.ipynb_checkpoints
\ No newline at end of file
%% Cell type:code id:62c6c5c3-d2ba-4e79-b533-828c3083e8ea tags:
``` python
import time
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from keras.preprocessing.image import ImageDataGenerator
from keras.losses import CategoricalCrossentropy
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.optimizers import Adam
import tensorflow as tf
from keras import layers, models
from Func.getSubFolders import count_sub_folders
path = 'Data'
output = 'Model/keras_model.h5'
start_time = time.time()
# runtime error
# physical_devices = tf.config.experimental.list_physical_devices('GPU')
# print("Num GPUs Available: ", len(physical_devices))
# if len(physical_devices) > 0:
# tf.config.experimental.set_memory_growth(physical_devices[0], True)
# else:
# print("No GPU available. Using CPU.")
# Step 1: Load and Preprocess Images
datagen = ImageDataGenerator(
rescale=1. / 255,
validation_split=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
)
test_datagen = ImageDataGenerator(rescale=1. / 255)
# Step 2: Label the Data
train_set = datagen.flow_from_directory(
path,
target_size=(224, 224),
batch_size=32,
class_mode='categorical',
subset='training'
)
test_set = datagen.flow_from_directory(
path,
target_size=(224, 224),
batch_size=32,
class_mode='categorical',
subset='validation'
)
# Step 4: Build the Model
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(224, 224, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(units=256, activation='relu'))
model.add(Dense(units=128, activation='relu'))
model.add(Dense(units=count_sub_folders(path), activation='softmax'))
# def createLayers(input_shape=(224, 224, 3)):
# inputs = tf.keras.Input(shape=input_shape)
#
# x = layers.Conv2D(48, (1, 1), padding='same', use_bias=False, name='block_1_expand')(inputs)
# x = layers.BatchNormalization(name='block_1_expand_BN')(x)
# x = layers.ReLU(6., name='block_1_expand_relu')(x)
#
# x = layers.DepthwiseConv2D((3, 3), padding='same', use_bias=False, name='block_1_depthwise')(x)
# x = layers.BatchNormalization(name='block_1_depthwise_BN')(x)
# x = layers.ReLU(6., name='block_1_depthwise_relu')(x)
#
# x = layers.Conv2D(8, (1, 1), padding='same', use_bias=False, name='block_1_project')(x)
# x = layers.BatchNormalization(name='block_1_project_BN')(x)
#
# for i in range(2,5):
# x1 = layers.Conv2D(48, (1, 1), padding='same', use_bias=False, name=f'block_{i}_expand')(x)
# x1 = layers.BatchNormalization(name=f'block_{i}_expand_BN')(x1)
# x1 = layers.ReLU(6., name=f'block_{i}_expand_relu')(x1)
#
# x1 = layers.DepthwiseConv2D((3, 3), padding='same', use_bias=False, name=f'block_{i}_depthwise')(x1)
# x1 = layers.BatchNormalization(name=f'block_{i}_depthwise_BN')(x1)
# x1 = layers.ReLU(6., name=f'block_{i}_depthwise_relu')(x1)
#
# x1 = layers.Conv2D(8, (1, 1), padding='same', use_bias=False, name=f'block_{i}_project')(x1)
# x1 = layers.BatchNormalization(name=f'block_{i}_project_BN')(x1)
#
# x = layers.Add(name=f'block_{i}_add')([x, x1])
#
# x = tf.keras.layers.GlobalAveragePooling2D()(x)
# outputs = tf.keras.layers.Dense(count_sub_folders(path), activation='softmax')(x)
# model = models.Model(inputs, outputs, name='testModel')
#
# return model
#
#
# model = createLayers()
# Compile the Model after pruning
model.compile(optimizer=Adam(learning_rate=0.001),
loss=CategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
# Step 6: Train the Model
early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
model.fit(train_set, validation_data=test_set, epochs=10, callbacks=early_stopping)
# Step 7: Evaluate the Model
loss, accuracy = model.evaluate(test_set)
print(f'Test loss: {loss}, Test accuracy: {accuracy}')
# Save the trained model
model.save(output)
end_time = time.time()
execute_time = (end_time - start_time) / 60
model.summary()
# Print the result
print(f"It took: {execute_time:0.2f} minutes")
```
%% Output
Found 1529 images belonging to 6 classes.
Found 380 images belonging to 6 classes.
Epoch 1/10
48/48 [==============================] - 29s 470ms/step - loss: 1.0985 - accuracy: 0.5284 - val_loss: 0.7411 - val_accuracy: 0.6868
Epoch 2/10
48/48 [==============================] - 20s 417ms/step - loss: 0.5369 - accuracy: 0.7757 - val_loss: 0.5103 - val_accuracy: 0.7921
Epoch 3/10
48/48 [==============================] - 20s 417ms/step - loss: 0.2837 - accuracy: 0.8875 - val_loss: 0.8421 - val_accuracy: 0.7105
Epoch 4/10
48/48 [==============================] - 20s 417ms/step - loss: 0.1952 - accuracy: 0.9228 - val_loss: 0.2502 - val_accuracy: 0.8947
Epoch 5/10
48/48 [==============================] - 20s 419ms/step - loss: 0.1007 - accuracy: 0.9647 - val_loss: 0.0922 - val_accuracy: 0.9632
Epoch 6/10
48/48 [==============================] - 19s 404ms/step - loss: 0.0690 - accuracy: 0.9719 - val_loss: 0.1608 - val_accuracy: 0.9316
Epoch 7/10
48/48 [==============================] - 19s 406ms/step - loss: 0.0707 - accuracy: 0.9745 - val_loss: 0.1129 - val_accuracy: 0.9500
Epoch 8/10
48/48 [==============================] - 19s 401ms/step - loss: 0.0674 - accuracy: 0.9791 - val_loss: 0.0428 - val_accuracy: 0.9868
Epoch 9/10
48/48 [==============================] - 19s 400ms/step - loss: 0.0260 - accuracy: 0.9922 - val_loss: 0.0619 - val_accuracy: 0.9711
Epoch 10/10
48/48 [==============================] - 20s 404ms/step - loss: 0.0142 - accuracy: 0.9961 - val_loss: 0.0146 - val_accuracy: 0.9947
12/12 [==============================] - 4s 329ms/step - loss: 0.0094 - accuracy: 0.9974
Test loss: 0.009386303834617138, Test accuracy: 0.9973683953285217
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 222, 222, 32) 896
max_pooling2d (MaxPooling2D (None, 111, 111, 32) 0
)
conv2d_1 (Conv2D) (None, 109, 109, 64) 18496
max_pooling2d_1 (MaxPooling (None, 54, 54, 64) 0
2D)
conv2d_2 (Conv2D) (None, 52, 52, 128) 73856
max_pooling2d_2 (MaxPooling (None, 26, 26, 128) 0
2D)
conv2d_3 (Conv2D) (None, 24, 24, 256) 295168
max_pooling2d_3 (MaxPooling (None, 12, 12, 256) 0
2D)
flatten (Flatten) (None, 36864) 0
dense (Dense) (None, 256) 9437440
dense_1 (Dense) (None, 128) 32896
dense_2 (Dense) (None, 6) 774
=================================================================
Total params: 9,859,526
Trainable params: 9,859,526
Non-trainable params: 0
_________________________________________________________________
It took: 3.55 minutes
%% Cell type:code id:67803a21-51f7-4bec-98fc-4efa8f75bab9 tags:
``` python
```
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment