Skip to content
Snippets Groups Projects
trainModel.ipynb 10.9 KiB
Newer Older
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "62c6c5c3-d2ba-4e79-b533-828c3083e8ea",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Found 1529 images belonging to 6 classes.\n",
      "Found 380 images belonging to 6 classes.\n",
      "Epoch 1/10\n",
      "48/48 [==============================] - 29s 470ms/step - loss: 1.0985 - accuracy: 0.5284 - val_loss: 0.7411 - val_accuracy: 0.6868\n",
      "Epoch 2/10\n",
      "48/48 [==============================] - 20s 417ms/step - loss: 0.5369 - accuracy: 0.7757 - val_loss: 0.5103 - val_accuracy: 0.7921\n",
      "Epoch 3/10\n",
      "48/48 [==============================] - 20s 417ms/step - loss: 0.2837 - accuracy: 0.8875 - val_loss: 0.8421 - val_accuracy: 0.7105\n",
      "Epoch 4/10\n",
      "48/48 [==============================] - 20s 417ms/step - loss: 0.1952 - accuracy: 0.9228 - val_loss: 0.2502 - val_accuracy: 0.8947\n",
      "Epoch 5/10\n",
      "48/48 [==============================] - 20s 419ms/step - loss: 0.1007 - accuracy: 0.9647 - val_loss: 0.0922 - val_accuracy: 0.9632\n",
      "Epoch 6/10\n",
      "48/48 [==============================] - 19s 404ms/step - loss: 0.0690 - accuracy: 0.9719 - val_loss: 0.1608 - val_accuracy: 0.9316\n",
      "Epoch 7/10\n",
      "48/48 [==============================] - 19s 406ms/step - loss: 0.0707 - accuracy: 0.9745 - val_loss: 0.1129 - val_accuracy: 0.9500\n",
      "Epoch 8/10\n",
      "48/48 [==============================] - 19s 401ms/step - loss: 0.0674 - accuracy: 0.9791 - val_loss: 0.0428 - val_accuracy: 0.9868\n",
      "Epoch 9/10\n",
      "48/48 [==============================] - 19s 400ms/step - loss: 0.0260 - accuracy: 0.9922 - val_loss: 0.0619 - val_accuracy: 0.9711\n",
      "Epoch 10/10\n",
      "48/48 [==============================] - 20s 404ms/step - loss: 0.0142 - accuracy: 0.9961 - val_loss: 0.0146 - val_accuracy: 0.9947\n",
      "12/12 [==============================] - 4s 329ms/step - loss: 0.0094 - accuracy: 0.9974\n",
      "Test loss: 0.009386303834617138, Test accuracy: 0.9973683953285217\n",
      "Model: \"sequential\"\n",
      "_________________________________________________________________\n",
      " Layer (type)                Output Shape              Param #   \n",
      "=================================================================\n",
      " conv2d (Conv2D)             (None, 222, 222, 32)      896       \n",
      "                                                                 \n",
      " max_pooling2d (MaxPooling2D  (None, 111, 111, 32)     0         \n",
      " )                                                               \n",
      "                                                                 \n",
      " conv2d_1 (Conv2D)           (None, 109, 109, 64)      18496     \n",
      "                                                                 \n",
      " max_pooling2d_1 (MaxPooling  (None, 54, 54, 64)       0         \n",
      " 2D)                                                             \n",
      "                                                                 \n",
      " conv2d_2 (Conv2D)           (None, 52, 52, 128)       73856     \n",
      "                                                                 \n",
      " max_pooling2d_2 (MaxPooling  (None, 26, 26, 128)      0         \n",
      " 2D)                                                             \n",
      "                                                                 \n",
      " conv2d_3 (Conv2D)           (None, 24, 24, 256)       295168    \n",
      "                                                                 \n",
      " max_pooling2d_3 (MaxPooling  (None, 12, 12, 256)      0         \n",
      " 2D)                                                             \n",
      "                                                                 \n",
      " flatten (Flatten)           (None, 36864)             0         \n",
      "                                                                 \n",
      " dense (Dense)               (None, 256)               9437440   \n",
      "                                                                 \n",
      " dense_1 (Dense)             (None, 128)               32896     \n",
      "                                                                 \n",
      " dense_2 (Dense)             (None, 6)                 774       \n",
      "                                                                 \n",
      "=================================================================\n",
      "Total params: 9,859,526\n",
      "Trainable params: 9,859,526\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n",
      "It took: 3.55 minutes\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "from keras.models import Sequential\n",
    "from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense\n",
    "from keras.preprocessing.image import ImageDataGenerator\n",
    "from keras.losses import CategoricalCrossentropy\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "import tensorflow as tf\n",
    "from keras import layers, models\n",
    "\n",
    "from Func.getSubFolders import count_sub_folders\n",
    "\n",
    "path = 'Data'\n",
    "output = 'Model/keras_model.h5'\n",
    "start_time = time.time()\n",
    "\n",
    "# runtime error\n",
    "# physical_devices = tf.config.experimental.list_physical_devices('GPU')\n",
    "# print(\"Num GPUs Available: \", len(physical_devices))\n",
    "# if len(physical_devices) > 0:\n",
    "#     tf.config.experimental.set_memory_growth(physical_devices[0], True)\n",
    "# else:\n",
    "#     print(\"No GPU available. Using CPU.\")\n",
    "\n",
    "\n",
    "# Step 1: Load and Preprocess Images\n",
    "datagen = ImageDataGenerator(\n",
    "    rescale=1. / 255,\n",
    "    validation_split=0.2,\n",
    "    width_shift_range=0.2,\n",
    "    height_shift_range=0.2,\n",
    "    shear_range=0.2,\n",
    "    zoom_range=0.2,\n",
    ")\n",
    "\n",
    "test_datagen = ImageDataGenerator(rescale=1. / 255)\n",
    "\n",
    "# Step 2: Label the Data\n",
    "train_set = datagen.flow_from_directory(\n",
    "    path,\n",
    "    target_size=(224, 224),\n",
    "    batch_size=32,\n",
    "    class_mode='categorical',\n",
    "    subset='training'\n",
    ")\n",
    "\n",
    "test_set = datagen.flow_from_directory(\n",
    "    path,\n",
    "    target_size=(224, 224),\n",
    "    batch_size=32,\n",
    "    class_mode='categorical',\n",
    "    subset='validation'\n",
    ")\n",
    "\n",
    "# Step 4: Build the Model\n",
    "model = Sequential()\n",
    "model.add(Conv2D(32, (3, 3), input_shape=(224, 224, 3), activation='relu'))\n",
    "model.add(MaxPooling2D(pool_size=(2, 2)))\n",
    "\n",
    "model.add(Conv2D(64, (3, 3), activation='relu'))\n",
    "model.add(MaxPooling2D((2, 2)))\n",
    "\n",
    "model.add(Conv2D(128, (3, 3), activation='relu'))\n",
    "model.add(MaxPooling2D((2, 2)))\n",
    "\n",
    "model.add(Conv2D(256, (3, 3), activation='relu'))\n",
    "model.add(MaxPooling2D((2, 2)))\n",
    "\n",
    "model.add(Flatten())\n",
    "model.add(Dense(units=256, activation='relu'))\n",
    "model.add(Dense(units=128, activation='relu'))\n",
    "model.add(Dense(units=count_sub_folders(path), activation='softmax'))\n",
    "# def createLayers(input_shape=(224, 224, 3)):\n",
    "#     inputs = tf.keras.Input(shape=input_shape)\n",
    "#\n",
    "#     x = layers.Conv2D(48, (1, 1), padding='same', use_bias=False, name='block_1_expand')(inputs)\n",
    "#     x = layers.BatchNormalization(name='block_1_expand_BN')(x)\n",
    "#     x = layers.ReLU(6., name='block_1_expand_relu')(x)\n",
    "#\n",
    "#     x = layers.DepthwiseConv2D((3, 3), padding='same', use_bias=False, name='block_1_depthwise')(x)\n",
    "#     x = layers.BatchNormalization(name='block_1_depthwise_BN')(x)\n",
    "#     x = layers.ReLU(6., name='block_1_depthwise_relu')(x)\n",
    "#\n",
    "#     x = layers.Conv2D(8, (1, 1), padding='same', use_bias=False, name='block_1_project')(x)\n",
    "#     x = layers.BatchNormalization(name='block_1_project_BN')(x)\n",
    "#\n",
    "#     for i in range(2,5):\n",
    "#         x1 = layers.Conv2D(48, (1, 1), padding='same', use_bias=False, name=f'block_{i}_expand')(x)\n",
    "#         x1 = layers.BatchNormalization(name=f'block_{i}_expand_BN')(x1)\n",
    "#         x1 = layers.ReLU(6., name=f'block_{i}_expand_relu')(x1)\n",
    "#\n",
    "#         x1 = layers.DepthwiseConv2D((3, 3), padding='same', use_bias=False, name=f'block_{i}_depthwise')(x1)\n",
    "#         x1 = layers.BatchNormalization(name=f'block_{i}_depthwise_BN')(x1)\n",
    "#         x1 = layers.ReLU(6., name=f'block_{i}_depthwise_relu')(x1)\n",
    "#\n",
    "#         x1 = layers.Conv2D(8, (1, 1), padding='same', use_bias=False, name=f'block_{i}_project')(x1)\n",
    "#         x1 = layers.BatchNormalization(name=f'block_{i}_project_BN')(x1)\n",
    "#\n",
    "#         x = layers.Add(name=f'block_{i}_add')([x, x1])\n",
    "#\n",
    "#     x = tf.keras.layers.GlobalAveragePooling2D()(x)\n",
    "#     outputs = tf.keras.layers.Dense(count_sub_folders(path), activation='softmax')(x)\n",
    "#     model = models.Model(inputs, outputs, name='testModel')\n",
    "#\n",
    "#     return model\n",
    "#\n",
    "#\n",
    "# model = createLayers()\n",
    "\n",
    "# Compile the Model after pruning\n",
    "model.compile(optimizer=Adam(learning_rate=0.001),\n",
    "              loss=CategoricalCrossentropy(from_logits=False),\n",
    "              metrics=['accuracy'])\n",
    "\n",
    "# Step 6: Train the Model\n",
    "early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)\n",
    "\n",
    "model.fit(train_set, validation_data=test_set, epochs=10, callbacks=early_stopping)\n",
    "\n",
    "# Step 7: Evaluate the Model\n",
    "loss, accuracy = model.evaluate(test_set)\n",
    "print(f'Test loss: {loss}, Test accuracy: {accuracy}')\n",
    "\n",
    "# Save the trained model\n",
    "model.save(output)\n",
    "end_time = time.time()\n",
    "\n",
    "execute_time = (end_time - start_time) / 60\n",
    "\n",
    "model.summary()\n",
    "\n",
    "# Print the result\n",
    "print(f\"It took: {execute_time:0.2f} minutes\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "67803a21-51f7-4bec-98fc-4efa8f75bab9",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}