Skip to content
Snippets Groups Projects
trainModel.ipynb 11.6 KiB
Newer Older
{
 "cells": [
  {
   "cell_type": "code",
Michiel_VE's avatar
Michiel_VE committed
   "execution_count": 51,
   "id": "62c6c5c3-d2ba-4e79-b533-828c3083e8ea",
Michiel_VE's avatar
Michiel_VE committed
   "metadata": {
    "is_executing": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
Michiel_VE's avatar
Michiel_VE committed
      "Num GPUs Available:  1\n",
      "Physical devices cannot be modified after being initialized\n",
      "Found 1529 images belonging to 6 classes.\n",
      "Found 380 images belonging to 6 classes.\n",
      "Epoch 1/10\n",
Michiel_VE's avatar
Michiel_VE committed
      "24/24 [==============================] - 21s 851ms/step - loss: 1.1108 - accuracy: 0.5134 - val_loss: 0.8370 - val_accuracy: 0.6842\n",
      "Epoch 2/10\n",
Michiel_VE's avatar
Michiel_VE committed
      "24/24 [==============================] - 21s 862ms/step - loss: 0.6156 - accuracy: 0.7613 - val_loss: 0.4063 - val_accuracy: 0.8447\n",
      "Epoch 3/10\n",
Michiel_VE's avatar
Michiel_VE committed
      "24/24 [==============================] - 22s 915ms/step - loss: 0.2704 - accuracy: 0.9006 - val_loss: 0.2283 - val_accuracy: 0.9316\n",
      "Epoch 4/10\n",
Michiel_VE's avatar
Michiel_VE committed
      "24/24 [==============================] - 22s 909ms/step - loss: 0.1532 - accuracy: 0.9477 - val_loss: 0.1145 - val_accuracy: 0.9579\n",
      "Epoch 5/10\n",
Michiel_VE's avatar
Michiel_VE committed
      "24/24 [==============================] - 22s 905ms/step - loss: 0.1324 - accuracy: 0.9516 - val_loss: 0.2525 - val_accuracy: 0.9026\n",
      "Epoch 6/10\n",
Michiel_VE's avatar
Michiel_VE committed
      "24/24 [==============================] - 22s 912ms/step - loss: 0.0737 - accuracy: 0.9719 - val_loss: 0.0519 - val_accuracy: 0.9816\n",
      "Epoch 7/10\n",
Michiel_VE's avatar
Michiel_VE committed
      "24/24 [==============================] - 23s 952ms/step - loss: 0.0663 - accuracy: 0.9797 - val_loss: 0.0544 - val_accuracy: 0.9842\n",
      "Epoch 8/10\n",
Michiel_VE's avatar
Michiel_VE committed
      "24/24 [==============================] - 22s 918ms/step - loss: 0.0482 - accuracy: 0.9817 - val_loss: 0.0489 - val_accuracy: 0.9711\n",
      "Epoch 9/10\n",
Michiel_VE's avatar
Michiel_VE committed
      "24/24 [==============================] - 22s 928ms/step - loss: 0.0240 - accuracy: 0.9908 - val_loss: 0.0337 - val_accuracy: 0.9868\n",
      "Epoch 10/10\n",
Michiel_VE's avatar
Michiel_VE committed
      "24/24 [==============================] - 22s 911ms/step - loss: 0.0224 - accuracy: 0.9922 - val_loss: 0.0411 - val_accuracy: 0.9895\n",
      "6/6 [==============================] - 4s 717ms/step - loss: 0.0078 - accuracy: 1.0000\n",
      "Test loss: 0.007844111882150173, Test accuracy: 1.0\n",
      "Model: \"sequential_32\"\n",
      "_________________________________________________________________\n",
      " Layer (type)                Output Shape              Param #   \n",
      "=================================================================\n",
Michiel_VE's avatar
Michiel_VE committed
      " conv2d_128 (Conv2D)         (None, 222, 222, 32)      896       \n",
      "                                                                 \n",
Michiel_VE's avatar
Michiel_VE committed
      " max_pooling2d_128 (MaxPooli  (None, 111, 111, 32)     0         \n",
      " ng2D)                                                           \n",
      "                                                                 \n",
Michiel_VE's avatar
Michiel_VE committed
      " conv2d_129 (Conv2D)         (None, 109, 109, 64)      18496     \n",
      "                                                                 \n",
Michiel_VE's avatar
Michiel_VE committed
      " max_pooling2d_129 (MaxPooli  (None, 54, 54, 64)       0         \n",
      " ng2D)                                                           \n",
      "                                                                 \n",
Michiel_VE's avatar
Michiel_VE committed
      " conv2d_130 (Conv2D)         (None, 52, 52, 128)       73856     \n",
      "                                                                 \n",
Michiel_VE's avatar
Michiel_VE committed
      " max_pooling2d_130 (MaxPooli  (None, 26, 26, 128)      0         \n",
      " ng2D)                                                           \n",
      "                                                                 \n",
Michiel_VE's avatar
Michiel_VE committed
      " conv2d_131 (Conv2D)         (None, 24, 24, 256)       295168    \n",
      "                                                                 \n",
Michiel_VE's avatar
Michiel_VE committed
      " max_pooling2d_131 (MaxPooli  (None, 12, 12, 256)      0         \n",
      " ng2D)                                                           \n",
      "                                                                 \n",
Michiel_VE's avatar
Michiel_VE committed
      " flatten_32 (Flatten)        (None, 36864)             0         \n",
      "                                                                 \n",
Michiel_VE's avatar
Michiel_VE committed
      " dense_96 (Dense)            (None, 256)               9437440   \n",
      "                                                                 \n",
Michiel_VE's avatar
Michiel_VE committed
      " dense_97 (Dense)            (None, 128)               32896     \n",
      "                                                                 \n",
Michiel_VE's avatar
Michiel_VE committed
      " dense_98 (Dense)            (None, 6)                 774       \n",
      "                                                                 \n",
      "=================================================================\n",
      "Total params: 9,859,526\n",
      "Trainable params: 9,859,526\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n",
Michiel_VE's avatar
Michiel_VE committed
      "It took: 3.74 minutes\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "from keras.models import Sequential\n",
    "from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense\n",
    "from keras.preprocessing.image import ImageDataGenerator\n",
    "from keras.losses import CategoricalCrossentropy\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "import tensorflow as tf\n",
    "\n",
    "from Func.getSubFolders import count_sub_folders\n",
    "\n",
Michiel_VE's avatar
Michiel_VE committed
    "\n",
    "path = 'Data'\n",
    "output = 'Model/keras_model.h5'\n",
    "start_time = time.time()\n",
    "\n",
Michiel_VE's avatar
Michiel_VE committed
    "physical_devices = tf.config.list_physical_devices('GPU')\n",
    "print(\"Num GPUs Available: \", len(physical_devices))\n",
    "if len(physical_devices) > 0:\n",
    "    try:\n",
    "        # Set memory growth to true\n",
    "        for device in physical_devices:\n",
    "            tf.config.experimental.set_memory_growth(device, True)\n",
    "        \n",
    "        # Optionally, you can set a memory limit if necessary\n",
    "        # tf.config.set_logical_device_configuration(\n",
    "        #     physical_devices[0],\n",
    "        #     [tf.config.LogicalDeviceConfiguration(memory_limit=4096)])  # Set to 4GB\n",
    "    except RuntimeError as e:\n",
    "        print(e)\n",
    "else:\n",
    "    print(\"No GPU available. Using CPU.\")\n",
    "\n",
    "\n",
    "# Step 1: Load and Preprocess Images\n",
    "datagen = ImageDataGenerator(\n",
    "    rescale=1. / 255,\n",
    "    validation_split=0.2,\n",
    "    width_shift_range=0.2,\n",
    "    height_shift_range=0.2,\n",
    "    shear_range=0.2,\n",
    "    zoom_range=0.2,\n",
    ")\n",
    "\n",
    "test_datagen = ImageDataGenerator(rescale=1. / 255)\n",
    "\n",
Michiel_VE's avatar
Michiel_VE committed
    "\n",
    "# Step 2: Label the Data\n",
    "train_set = datagen.flow_from_directory(\n",
    "    path,\n",
Michiel_VE's avatar
Michiel_VE committed
    "    target_size=(300, 300),\n",
    "    batch_size=64,\n",
    "    class_mode='categorical',\n",
    "    subset='training'\n",
    ")\n",
    "\n",
    "test_set = datagen.flow_from_directory(\n",
    "    path,\n",
Michiel_VE's avatar
Michiel_VE committed
    "    target_size=(300, 300),\n",
    "    batch_size=64,\n",
    "    class_mode='categorical',\n",
    "    subset='validation'\n",
    ")\n",
    "\n",
Michiel_VE's avatar
Michiel_VE committed
    "# draw Images\n",
    "# images, labels = next(test_set)\n",
    "\n",
    "# class_indices = test_set.class_indices\n",
    "# plot_images(images, labels, class_indices)\n",
    "\n",
    "\n",
    "\n",
    "# Step 4: Build the Model\n",
    "model = Sequential()\n",
Michiel_VE's avatar
Michiel_VE committed
    "model.add(Conv2D(32, (3, 3), input_shape=(300, 300, 3), activation='relu'))\n",
    "model.add(MaxPooling2D(pool_size=(2, 2)))\n",
    "\n",
    "model.add(Conv2D(64, (3, 3), activation='relu'))\n",
    "model.add(MaxPooling2D((2, 2)))\n",
    "\n",
    "model.add(Conv2D(128, (3, 3), activation='relu'))\n",
    "model.add(MaxPooling2D((2, 2)))\n",
    "\n",
    "model.add(Conv2D(256, (3, 3), activation='relu'))\n",
    "model.add(MaxPooling2D((2, 2)))\n",
    "\n",
    "model.add(Flatten())\n",
    "model.add(Dense(units=256, activation='relu'))\n",
    "model.add(Dense(units=128, activation='relu'))\n",
    "model.add(Dense(units=count_sub_folders(path), activation='softmax'))\n",
    "# def createLayers(input_shape=(224, 224, 3)):\n",
    "#     inputs = tf.keras.Input(shape=input_shape)\n",
    "#\n",
    "#     x = layers.Conv2D(48, (1, 1), padding='same', use_bias=False, name='block_1_expand')(inputs)\n",
    "#     x = layers.BatchNormalization(name='block_1_expand_BN')(x)\n",
    "#     x = layers.ReLU(6., name='block_1_expand_relu')(x)\n",
    "#\n",
    "#     x = layers.DepthwiseConv2D((3, 3), padding='same', use_bias=False, name='block_1_depthwise')(x)\n",
    "#     x = layers.BatchNormalization(name='block_1_depthwise_BN')(x)\n",
    "#     x = layers.ReLU(6., name='block_1_depthwise_relu')(x)\n",
    "#\n",
    "#     x = layers.Conv2D(8, (1, 1), padding='same', use_bias=False, name='block_1_project')(x)\n",
    "#     x = layers.BatchNormalization(name='block_1_project_BN')(x)\n",
    "#\n",
    "#     for i in range(2,5):\n",
    "#         x1 = layers.Conv2D(48, (1, 1), padding='same', use_bias=False, name=f'block_{i}_expand')(x)\n",
    "#         x1 = layers.BatchNormalization(name=f'block_{i}_expand_BN')(x1)\n",
    "#         x1 = layers.ReLU(6., name=f'block_{i}_expand_relu')(x1)\n",
    "#\n",
    "#         x1 = layers.DepthwiseConv2D((3, 3), padding='same', use_bias=False, name=f'block_{i}_depthwise')(x1)\n",
    "#         x1 = layers.BatchNormalization(name=f'block_{i}_depthwise_BN')(x1)\n",
    "#         x1 = layers.ReLU(6., name=f'block_{i}_depthwise_relu')(x1)\n",
    "#\n",
    "#         x1 = layers.Conv2D(8, (1, 1), padding='same', use_bias=False, name=f'block_{i}_project')(x1)\n",
    "#         x1 = layers.BatchNormalization(name=f'block_{i}_project_BN')(x1)\n",
    "#\n",
    "#         x = layers.Add(name=f'block_{i}_add')([x, x1])\n",
    "#\n",
    "#     x = tf.keras.layers.GlobalAveragePooling2D()(x)\n",
    "#     outputs = tf.keras.layers.Dense(count_sub_folders(path), activation='softmax')(x)\n",
    "#     model = models.Model(inputs, outputs, name='testModel')\n",
    "#\n",
    "#     return model\n",
    "#\n",
    "#\n",
    "# model = createLayers()\n",
    "\n",
    "# Compile the Model after pruning\n",
    "model.compile(optimizer=Adam(learning_rate=0.001),\n",
    "              loss=CategoricalCrossentropy(from_logits=False),\n",
    "              metrics=['accuracy'])\n",
    "\n",
    "# Step 6: Train the Model\n",
    "early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)\n",
    "\n",
    "model.fit(train_set, validation_data=test_set, epochs=10, callbacks=early_stopping)\n",
    "\n",
    "# Step 7: Evaluate the Model\n",
    "loss, accuracy = model.evaluate(test_set)\n",
    "print(f'Test loss: {loss}, Test accuracy: {accuracy}')\n",
    "\n",
    "# Save the trained model\n",
    "model.save(output)\n",
    "end_time = time.time()\n",
    "\n",
    "execute_time = (end_time - start_time) / 60\n",
    "\n",
    "model.summary()\n",
    "\n",
    "# Print the result\n",
    "print(f\"It took: {execute_time:0.2f} minutes\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
Michiel_VE's avatar
Michiel_VE committed
   "id": "ec102c63-b7d9-4f22-9f3e-a6bd12995a4f",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}