{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "62c6c5c3-d2ba-4e79-b533-828c3083e8ea", "metadata": { "is_executing": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Num GPUs Available: 1\n", "Found 1529 images belonging to 6 classes.\n", "Found 380 images belonging to 6 classes.\n", "Epoch 1/50\n", "96/96 [==============================] - 28s 232ms/step - loss: 1.0307 - accuracy: 0.5520 - val_loss: 0.6397 - val_accuracy: 0.7658\n", "Epoch 2/50\n", "96/96 [==============================] - 21s 220ms/step - loss: 0.4214 - accuracy: 0.8457 - val_loss: 0.2777 - val_accuracy: 0.8895\n", "Epoch 3/50\n", "96/96 [==============================] - 21s 222ms/step - loss: 0.1863 - accuracy: 0.9346 - val_loss: 0.2255 - val_accuracy: 0.9105\n", "Epoch 4/50\n", "96/96 [==============================] - 21s 216ms/step - loss: 0.1238 - accuracy: 0.9575 - val_loss: 0.1356 - val_accuracy: 0.9579\n", "Epoch 5/50\n", "96/96 [==============================] - 21s 217ms/step - loss: 0.0868 - accuracy: 0.9647 - val_loss: 0.2071 - val_accuracy: 0.9237\n", "Epoch 6/50\n", "96/96 [==============================] - 21s 217ms/step - loss: 0.0541 - accuracy: 0.9817 - val_loss: 0.4429 - val_accuracy: 0.8974\n", "Epoch 7/50\n", "96/96 [==============================] - 21s 215ms/step - loss: 0.0515 - accuracy: 0.9797 - val_loss: 0.2518 - val_accuracy: 0.9026\n", "Epoch 8/50\n", "96/96 [==============================] - 21s 219ms/step - loss: 0.0409 - accuracy: 0.9869 - val_loss: 0.0942 - val_accuracy: 0.9632\n", "Epoch 9/50\n", "96/96 [==============================] - 20s 210ms/step - loss: 0.0573 - accuracy: 0.9856 - val_loss: 0.2120 - val_accuracy: 0.9421\n", "Epoch 10/50\n", "96/96 [==============================] - 20s 209ms/step - loss: 0.0591 - accuracy: 0.9804 - val_loss: 0.0389 - val_accuracy: 0.9842\n", "Epoch 11/50\n", "96/96 [==============================] - 20s 205ms/step - loss: 0.0145 - accuracy: 0.9948 - val_loss: 0.0340 - val_accuracy: 0.9921\n", "Epoch 12/50\n", "96/96 [==============================] - 20s 205ms/step - loss: 0.0280 - accuracy: 0.9869 - val_loss: 0.2500 - val_accuracy: 0.9263\n", "Epoch 13/50\n", "96/96 [==============================] - 20s 208ms/step - loss: 0.0361 - accuracy: 0.9876 - val_loss: 0.0801 - val_accuracy: 0.9711\n", "Epoch 14/50\n", "96/96 [==============================] - 20s 209ms/step - loss: 0.0388 - accuracy: 0.9856 - val_loss: 0.0241 - val_accuracy: 0.9974\n", "Epoch 15/50\n", "96/96 [==============================] - 20s 207ms/step - loss: 0.0052 - accuracy: 0.9980 - val_loss: 0.0155 - val_accuracy: 0.9947\n", "Epoch 16/50\n", "96/96 [==============================] - 21s 215ms/step - loss: 0.0367 - accuracy: 0.9902 - val_loss: 0.0824 - val_accuracy: 0.9658\n", "Epoch 17/50\n", "96/96 [==============================] - 20s 204ms/step - loss: 0.0461 - accuracy: 0.9830 - val_loss: 0.1926 - val_accuracy: 0.9553\n", "Epoch 18/50\n", "96/96 [==============================] - 21s 214ms/step - loss: 0.0307 - accuracy: 0.9908 - val_loss: 0.0641 - val_accuracy: 0.9789\n", "Epoch 19/50\n", "96/96 [==============================] - 20s 209ms/step - loss: 0.0017 - accuracy: 1.0000 - val_loss: 0.0305 - val_accuracy: 0.9947\n", "Epoch 20/50\n", "96/96 [==============================] - 20s 208ms/step - loss: 0.0060 - accuracy: 0.9980 - val_loss: 0.0251 - val_accuracy: 0.9895\n", "24/24 [==============================] - 4s 167ms/step - loss: 0.0236 - accuracy: 0.9868\n", "Test loss: 0.02361617051064968, Test accuracy: 0.9868420958518982\n", "Model: \"sequential\"\n", "_________________________________________________________________\n", " Layer (type) Output Shape Param # \n", "=================================================================\n", " conv2d (Conv2D) (None, 222, 222, 32) 896 \n", " \n", " max_pooling2d (MaxPooling2D (None, 111, 111, 32) 0 \n", " ) \n", " \n", " conv2d_1 (Conv2D) (None, 109, 109, 64) 18496 \n", " \n", " max_pooling2d_1 (MaxPooling (None, 54, 54, 64) 0 \n", " 2D) \n", " \n", " conv2d_2 (Conv2D) (None, 52, 52, 128) 73856 \n", " \n", " max_pooling2d_2 (MaxPooling (None, 26, 26, 128) 0 \n", " 2D) \n", " \n", " conv2d_3 (Conv2D) (None, 24, 24, 256) 295168 \n", " \n", " max_pooling2d_3 (MaxPooling (None, 12, 12, 256) 0 \n", " 2D) \n", " \n", " flatten (Flatten) (None, 36864) 0 \n", " \n", " dense (Dense) (None, 256) 9437440 \n", " \n", " dense_1 (Dense) (None, 128) 32896 \n", " \n", " dense_2 (Dense) (None, 6) 774 \n", " \n", "=================================================================\n", "Total params: 9,859,526\n", "Trainable params: 9,859,526\n", "Non-trainable params: 0\n", "_________________________________________________________________\n", "It took: 7.04 minutes\n" ] } ], "source": [ "import time\n", "from keras.models import Sequential\n", "from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense\n", "from keras.preprocessing.image import ImageDataGenerator\n", "from keras.losses import CategoricalCrossentropy\n", "from tensorflow.keras.callbacks import EarlyStopping\n", "from tensorflow.keras.optimizers import Adam\n", "import tensorflow as tf\n", "\n", "from Func.getSubFolders import count_sub_folders\n", "from Func.DrawImages import plot_images\n", "\n", "\n", "path = 'Data'\n", "output = 'Model/keras_model.h5'\n", "start_time = time.time()\n", "\n", "physical_devices = tf.config.list_physical_devices('GPU')\n", "print(\"Num GPUs Available: \", len(physical_devices))\n", "if len(physical_devices) > 0:\n", " try:\n", " # Set memory growth to true\n", " for device in physical_devices:\n", " tf.config.experimental.set_memory_growth(device, True)\n", " except RuntimeError as e:\n", " print('runtime gpu', e)\n", "else:\n", " print(\"No GPU available. Using CPU.\")\n", "\n", "\n", "# Step 1: Load and Preprocess Images\n", "datagen = ImageDataGenerator(\n", " rescale=1. / 255,\n", " validation_split=0.2,\n", " width_shift_range=0.2,\n", " height_shift_range=0.2,\n", " shear_range=0.2,\n", " zoom_range=0.2,\n", ")\n", "\n", "\n", "# Step 2: Label the Data\n", "train_set = datagen.flow_from_directory(\n", " path,\n", " target_size=(224, 224),\n", " batch_size=16,\n", " class_mode='categorical',\n", " subset='training'\n", ")\n", "\n", "test_set = datagen.flow_from_directory(\n", " path,\n", " target_size=(224, 224),\n", " batch_size=16,\n", " class_mode='categorical',\n", " subset='validation'\n", ")\n", "\n", "# draw Images\n", "#images, labels = next(train_set)\n", "\n", "#class_indices = test_set.class_indices\n", "#plot_images(images, labels, class_indices)\n", "\n", "\n", "\n", "# Step 4: Build the Model\n", "model = Sequential()\n", "model.add(Conv2D(32, (3, 3), input_shape=(224, 224, 3), activation='relu'))\n", "model.add(MaxPooling2D(pool_size=(2, 2)))\n", "\n", "model.add(Conv2D(64, (3, 3), activation='relu'))\n", "model.add(MaxPooling2D((2, 2)))\n", "\n", "model.add(Conv2D(128, (3, 3), activation='relu'))\n", "model.add(MaxPooling2D((2, 2)))\n", "\n", "model.add(Conv2D(256, (3, 3), activation='relu'))\n", "model.add(MaxPooling2D((2, 2)))\n", "\n", "model.add(Flatten())\n", "model.add(Dense(units=256, activation='relu'))\n", "model.add(Dense(units=128, activation='relu'))\n", "model.add(Dense(units=count_sub_folders(path), activation='softmax'))\n", "# def createLayers(input_shape=(224, 224, 3)):\n", "# inputs = tf.keras.Input(shape=input_shape)\n", "#\n", "# x = layers.Conv2D(48, (1, 1), padding='same', use_bias=False, name='block_1_expand')(inputs)\n", "# x = layers.BatchNormalization(name='block_1_expand_BN')(x)\n", "# x = layers.ReLU(6., name='block_1_expand_relu')(x)\n", "#\n", "# x = layers.DepthwiseConv2D((3, 3), padding='same', use_bias=False, name='block_1_depthwise')(x)\n", "# x = layers.BatchNormalization(name='block_1_depthwise_BN')(x)\n", "# x = layers.ReLU(6., name='block_1_depthwise_relu')(x)\n", "#\n", "# x = layers.Conv2D(8, (1, 1), padding='same', use_bias=False, name='block_1_project')(x)\n", "# x = layers.BatchNormalization(name='block_1_project_BN')(x)\n", "#\n", "# for i in range(2,5):\n", "# x1 = layers.Conv2D(48, (1, 1), padding='same', use_bias=False, name=f'block_{i}_expand')(x)\n", "# x1 = layers.BatchNormalization(name=f'block_{i}_expand_BN')(x1)\n", "# x1 = layers.ReLU(6., name=f'block_{i}_expand_relu')(x1)\n", "#\n", "# x1 = layers.DepthwiseConv2D((3, 3), padding='same', use_bias=False, name=f'block_{i}_depthwise')(x1)\n", "# x1 = layers.BatchNormalization(name=f'block_{i}_depthwise_BN')(x1)\n", "# x1 = layers.ReLU(6., name=f'block_{i}_depthwise_relu')(x1)\n", "#\n", "# x1 = layers.Conv2D(8, (1, 1), padding='same', use_bias=False, name=f'block_{i}_project')(x1)\n", "# x1 = layers.BatchNormalization(name=f'block_{i}_project_BN')(x1)\n", "#\n", "# x = layers.Add(name=f'block_{i}_add')([x, x1])\n", "#\n", "# x = tf.keras.layers.GlobalAveragePooling2D()(x)\n", "# outputs = tf.keras.layers.Dense(count_sub_folders(path), activation='softmax')(x)\n", "# model = models.Model(inputs, outputs, name='testModel')\n", "#\n", "# return model\n", "#\n", "#\n", "# model = createLayers()\n", "\n", "# Compile the Model after pruning\n", "model.compile(optimizer=Adam(learning_rate=0.001),\n", " loss=CategoricalCrossentropy(from_logits=False),\n", " metrics=['accuracy'])\n", "\n", "# Step 6: Train the Model\n", "early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)\n", "\n", "model.fit(train_set, validation_data=test_set, epochs=50, callbacks=early_stopping)\n", "\n", "# Step 7: Evaluate the Model\n", "loss, accuracy = model.evaluate(test_set)\n", "print(f'Test loss: {loss}, Test accuracy: {accuracy}')\n", "\n", "# Save the trained model\n", "model.save(output)\n", "end_time = time.time()\n", "\n", "execute_time = (end_time - start_time) / 60\n", "\n", "model.summary()\n", "\n", "# Print the result\n", "print(f\"It took: {execute_time:0.2f} minutes\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "ec102c63-b7d9-4f22-9f3e-a6bd12995a4f", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "17105ae9-d877-488e-ad4d-aac7a9a15680", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.19" } }, "nbformat": 4, "nbformat_minor": 5 }