diff --git a/Func/getSubFolders.py b/Func/getSubFolders.py
new file mode 100644
index 0000000000000000000000000000000000000000..217d43f512c4ea586a2c48bc63d5f1a77551e810
--- /dev/null
+++ b/Func/getSubFolders.py
@@ -0,0 +1,12 @@
+import os
+
+
+def count_sub_folders(folder_path):
+    if not os.path.exists(folder_path):
+        print(f"The folder {folder_path} does not exist.")
+        return
+
+    sub_folders = [f for f in os.listdir(folder_path) if os.path.isdir(os.path.join(folder_path, f))]
+    num_sub_folders = len(sub_folders)
+
+    return num_sub_folders
\ No newline at end of file
diff --git a/Model/Model.h5 b/Model/Model.h5
new file mode 100644
index 0000000000000000000000000000000000000000..9ceec6f509698fd6b74f9a0e202fc98c3940daf6
Binary files /dev/null and b/Model/Model.h5 differ
diff --git a/main.py b/main.py
index e16c5f557a519752a10d03737792dac165165f83..4df15358d2aac4a09d2c385e88f66bf55dd86edf 100644
--- a/main.py
+++ b/main.py
@@ -11,7 +11,8 @@ size = 300
 
 capture = cv2.VideoCapture(0)
 detector = HandDetector(maxHands=1)
-classifier = Classifier("Model/keras_model.h5", "Model/labels.txt")
+classifier = Classifier("Model/Model.h5", "Model/labels.txt")
+# classifier = Classifier("Model/keras_model.h5", "Model/labels.txt")
 letters = GetLabels("Model/labels.txt")
 
 
@@ -47,6 +48,7 @@ while True:
             prediction, index = GetPrediction(classifier, imgFixed)
 
         print(prediction)
+        print(index)
 
         cv2.putText(imgOutput, letters[index], (x, y), cv2.FONT_HERSHEY_TRIPLEX, 2, (255, 0, 255), 2)
 
diff --git a/trainModel.py b/trainModel.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e459adbe4339fc72ae5c18c85ffbecafe650684
--- /dev/null
+++ b/trainModel.py
@@ -0,0 +1,55 @@
+from keras.models import Sequential
+from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
+from keras.preprocessing.image import ImageDataGenerator
+from Func.getSubFolders import count_sub_folders
+
+# Step 1: Load and Preprocess Images
+# You can use ImageDataGenerator for on-the-fly data augmentation and normalization
+train_datagen = ImageDataGenerator(
+    rescale=1. / 255,
+    shear_range=0.2,
+    zoom_range=0.2,
+    horizontal_flip=True
+)
+
+test_datagen = ImageDataGenerator(rescale=1. / 255)
+
+# Step 2: Label the Data
+# Assume you have two classes: 'cat' and 'dog'
+train_set = train_datagen.flow_from_directory(
+    'Data/',
+    target_size=(224, 224),  # image size
+    batch_size=32,  # batch size
+    class_mode='categorical'  # multiple folders in Data
+)
+
+test_set = test_datagen.flow_from_directory(
+    'Data',
+    target_size=(224, 224),
+    batch_size=32,
+    class_mode='categorical'  # Use categorical for multiple classes
+)
+
+
+# Step 4: Build the Model
+model = Sequential()
+model.add(Conv2D(32, (3, 3), input_shape=(224, 224, 3), activation='relu'))
+model.add(MaxPooling2D(pool_size=(2, 2)))
+model.add(Flatten())
+model.add(Dense(units=128, activation='relu'))
+model.add(Dense(units=count_sub_folders('Data'), activation='softmax'))
+
+# Step 5: Compile the Model
+model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
+
+# Step 6: Train the Model
+# model.fit(train_set, epochs=25, validation_data=test_set)
+
+# Step 7: Evaluate the Model
+# loss, accuracy = model.evaluate(test_set)
+# print(f'Test loss: {loss}, Test accuracy: {accuracy}')
+
+# Step 8: Make Predictions
+
+# Save the trained model
+# model.save('Model/Model.h5')