Newer
Older
"id": "62c6c5c3-d2ba-4e79-b533-828c3083e8ea",
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Found 4706 images belonging to 7 classes.\n",
"Found 1172 images belonging to 7 classes.\n",
"294/294 [==============================] - 63s 211ms/step - loss: 1.9757 - accuracy: 0.2465 - val_loss: 1.5139 - val_accuracy: 0.5300 - lr: 1.0000e-04\n",
"294/294 [==============================] - 62s 209ms/step - loss: 1.3091 - accuracy: 0.4876 - val_loss: 0.9494 - val_accuracy: 0.7115 - lr: 1.0000e-04\n",
"294/294 [==============================] - 62s 211ms/step - loss: 0.8478 - accuracy: 0.6616 - val_loss: 0.6259 - val_accuracy: 0.8116 - lr: 1.0000e-04\n",
"294/294 [==============================] - 62s 210ms/step - loss: 0.5243 - accuracy: 0.7959 - val_loss: 0.3788 - val_accuracy: 0.9092 - lr: 1.0000e-04\n",
"294/294 [==============================] - 62s 210ms/step - loss: 0.3400 - accuracy: 0.8804 - val_loss: 0.2694 - val_accuracy: 0.9315 - lr: 1.0000e-04\n",
"294/294 [==============================] - 62s 211ms/step - loss: 0.2558 - accuracy: 0.9072 - val_loss: 0.2527 - val_accuracy: 0.9392 - lr: 1.0000e-07\n",
"294/294 [==============================] - 62s 209ms/step - loss: 0.2518 - accuracy: 0.9113 - val_loss: 0.2496 - val_accuracy: 0.9289 - lr: 1.0000e-10\n",
"294/294 [==============================] - 62s 210ms/step - loss: 0.2543 - accuracy: 0.9158 - val_loss: 0.2648 - val_accuracy: 0.9289 - lr: 1.0000e-13\n",
"294/294 [==============================] - 61s 209ms/step - loss: 0.2648 - accuracy: 0.9066 - val_loss: 0.2575 - val_accuracy: 0.9332 - lr: 1.0000e-16\n",
"294/294 [==============================] - 62s 211ms/step - loss: 0.2514 - accuracy: 0.9092 - val_loss: 0.2234 - val_accuracy: 0.9426 - lr: 1.0000e-19\n",
"Epoch 11/30\n",
"294/294 [==============================] - 62s 210ms/step - loss: 0.2404 - accuracy: 0.9230 - val_loss: 0.2438 - val_accuracy: 0.9392 - lr: 1.0000e-21\n",
"Epoch 12/30\n",
"294/294 [==============================] - 62s 211ms/step - loss: 0.2337 - accuracy: 0.9198 - val_loss: 0.2526 - val_accuracy: 0.9375 - lr: 1.0000e-23\n",
"Epoch 13/30\n",
"294/294 [==============================] - 62s 210ms/step - loss: 0.2602 - accuracy: 0.9064 - val_loss: 0.2423 - val_accuracy: 0.9384 - lr: 1.0000e-25\n",
"Epoch 14/30\n",
"294/294 [==============================] - 62s 212ms/step - loss: 0.2494 - accuracy: 0.9119 - val_loss: 0.2510 - val_accuracy: 0.9341 - lr: 1.0000e-27\n",
"Epoch 15/30\n",
"294/294 [==============================] - 62s 212ms/step - loss: 0.2637 - accuracy: 0.9098 - val_loss: 0.2397 - val_accuracy: 0.9401 - lr: 1.0000e-29\n",
"74/74 [==============================] - 14s 190ms/step - loss: 0.2511 - accuracy: 0.9334\n",
"Test loss: 0.2510630190372467, Test accuracy: 0.9334471225738525\n",
"Model: \"sequential_3\"\n",
"_________________________________________________________________\n",
" Layer (type) Output Shape Param # \n",
"=================================================================\n",
" dense_15 (Dense) (None, 1024) 25691136 \n",
" \n",
"=================================================================\n",
"Total params: 41,095,751\n",
"Trainable params: 26,381,063\n",
"_________________________________________________________________\n",
"Last build: 10/12/24\n",
"It took: 15.76 minutes\n"
]
}
],
"source": [
"import time\n",
"from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n",
"from keras.preprocessing.image import ImageDataGenerator\n",
"from keras.losses import CategoricalCrossentropy\n",
"from tensorflow.keras.callbacks import EarlyStopping,LearningRateScheduler\n",
"from tensorflow.keras.optimizers import Adam\n",
"import tensorflow as tf\n",
"from tensorflow.keras import layers, models\n",
"from tensorflow.keras.applications import VGG16\n",
"\n",
"from Func.getSubFolders import count_sub_folders\n",
"path = 'Data'\n",
"output = 'Model/keras_model.h5'\n",
"start_time = time.time()\n",
"\n",
"physical_devices = tf.config.list_physical_devices('GPU')\n",
"print(\"Num GPUs Available: \", len(physical_devices))\n",
"if len(physical_devices) > 0:\n",
" try:\n",
" # Set memory growth to true\n",
" for device in physical_devices:\n",
" tf.config.experimental.set_memory_growth(device, True)\n",
" except RuntimeError as e:\n",
"else:\n",
" print(\"No GPU available. Using CPU.\")\n",
"def lr_schedule(epoch, lr):\n",
" if epoch < 5:\n",
" return lr\n",
" elif epoch < 10:\n",
" return lr * 0.001\n",
" else:\n",
" return lr * 0.01\n",
"\n",
"\n",
"\n",
"# Step 1: Load and Preprocess Images\n",
"datagen = ImageDataGenerator(\n",
" rescale=1. / 255,\n",
" validation_split=0.2,\n",
" width_shift_range=0.2,\n",
" height_shift_range=0.2,\n",
" shear_range=0.2,\n",
" zoom_range=0.2,\n",
"# Step 2: Label the Data\n",
"train_set = datagen.flow_from_directory(\n",
" path,\n",
" target_size=(224, 224),\n",
" batch_size=16,\n",
" class_mode='categorical',\n",
" subset='training'\n",
")\n",
"\n",
"test_set = datagen.flow_from_directory(\n",
" path,\n",
" target_size=(224, 224),\n",
" batch_size=16,\n",
" class_mode='categorical',\n",
" subset='validation'\n",
")\n",
"\n",
"#class_indices = test_set.class_indices\n",
"#plot_images(images, labels, class_indices)\n",
"base_model = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3)) # EXPLAINNNNN\n",
"base_model.trainable = False\n",
"# Step 4: Build the Model\n",
"\n",
"model = Sequential([\n",
" base_model,\n",
" Flatten(),\n",
" Dense(units=1024, activation='relu'),\n",
" Dropout(0.5),\n",
" Dense(units=512, activation='relu'),\n",
" Dropout(0.5),\n",
" Dense(units=256, activation='relu'),\n",
" Dropout(0.5),\n",
" Dense(units=128, activation='relu'),\n",
" Dropout(0.5),\n",
" Dense(units=count_sub_folders(path), activation='softmax')\n",
"])\n",
"# Compile the Model\n",
"model.compile(optimizer=Adam(learning_rate=0.0001),\n",
" loss=CategoricalCrossentropy(from_logits=False),\n",
" metrics=['accuracy'])\n",
"\n",
"# Step 6: Train the Model\n",
"early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)\n",
"lr_scheduler = LearningRateScheduler(lr_schedule)\n",
"\n",
"model.fit(\n",
" train_set,\n",
" steps_per_epoch=train_set.samples // train_set.batch_size,\n",
" validation_data=test_set,\n",
" validation_steps=test_set.samples // test_set.batch_size,\n",
" epochs=30,\n",
" callbacks=[early_stopping, lr_scheduler]\n",
")\n",
"\n",
"# Step 7: Evaluate the Model\n",
"loss, accuracy = model.evaluate(test_set)\n",
"print(f'Test loss: {loss}, Test accuracy: {accuracy}')\n",
"\n",
"# Save the trained model\n",
"model.save(output)\n",
"end_time = time.time()\n",
"\n",
"execute_time = (end_time - start_time) / 60\n",
"\n",
"model.summary()\n",
"\n",
"# Print the result\n",
"print(\"Last build:\", datetime.datetime.now().strftime(\"%d/%m/%y\"))\n",
"print(f\"It took: {execute_time:0.2f} minutes\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.19"
}
},
"nbformat": 4,
"nbformat_minor": 5
}