diff --git a/.gitignore b/.gitignore
index 5f55171f1b5fa37ed641a1435403681f7832622f..fc422ea2af93a5a461a8250c8e88ac27ddcf559c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,7 @@
 .idea
+.ipynb_checkpoints
 
 venv
 Data
 Data_test
-Model/*.h5
\ No newline at end of file
+Model/keras_model.h5
\ No newline at end of file
diff --git a/Func/ClassifierModule.py b/Func/ClassifierModule.py
new file mode 100644
index 0000000000000000000000000000000000000000..484f66262bd91ad0131d01070dc64acdab26f7b5
--- /dev/null
+++ b/Func/ClassifierModule.py
@@ -0,0 +1,34 @@
+import keras
+import numpy as np
+import cv2
+
+
+class Classifier:
+
+    def __init__(self, model_path, label_path):
+        self.model_path = model_path
+
+        np.set_printoptions(suppress=True)
+
+        self.model = keras.models.load_model(self.model_path)
+        self.data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
+        self.labels_path = label_path
+
+        if self.labels_path:
+            with open(self.labels_path, "r") as label_file:
+                self.list_labels = [line.strip() for line in label_file]
+        else:
+            print("No Labels Found")
+
+    def getPrediction(self, img):
+        img_resized = cv2.resize(img, (224, 224))
+        image_array = np.asarray(img_resized)
+        normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
+
+        self.data[0] = normalized_image_array
+
+        prediction = self.model.predict(self.data)
+        index_val = np.argmax(prediction)
+
+        return list(prediction[0]), index_val
+
diff --git a/Func/CloseProgram.py b/Func/CloseProgram.py
index 3109b6e62a86acfbc5006bcd528acac050df6d5c..c0718008506f92b145b947c79237c71e001569ce 100644
--- a/Func/CloseProgram.py
+++ b/Func/CloseProgram.py
@@ -5,5 +5,3 @@ def Close(cap):
     cap.release()
     cv2.destroyAllWindows()
     print('closing')
-
-
diff --git a/Func/DrawImages.py b/Func/DrawImages.py
new file mode 100644
index 0000000000000000000000000000000000000000..10734d7f66d05d8ea02c53a4c753446b07aa7877
--- /dev/null
+++ b/Func/DrawImages.py
@@ -0,0 +1,20 @@
+import matplotlib.pyplot as plt
+import numpy as np
+
+
+def plot_images(images, labels, class_indices):
+    num_images = len(images)
+    grid_size = int(np.ceil(np.sqrt(num_images)))
+
+    plt.figure()
+    for i in range(num_images):
+        plt.subplot(grid_size, grid_size, i + 1)
+        img = images[i]
+        plt.imshow(img)
+
+        label = np.argmax(labels[i])
+        label_name = list(class_indices.keys())[list(class_indices.values()).index(label)]
+        plt.title(label_name)
+        plt.axis('off')
+    plt.tight_layout()
+    plt.show()
\ No newline at end of file
diff --git a/Func/Helpers.py b/Func/Helpers.py
index a096e33848f725397fb9ecce8f39dfaa3acc3594..8bcef42641c0b6a7616815caf0bb08d807d12a28 100644
--- a/Func/Helpers.py
+++ b/Func/Helpers.py
@@ -21,7 +21,7 @@ def CalculateHeight(size, h, w, imgCropped, imgFixed):
 
 
 def GetPrediction(classifier, imgFixed):
-    prediction, index = classifier.getPrediction(imgFixed, draw=False)
+    prediction, index = classifier.getPrediction(imgFixed)
 
     return prediction, index
 
@@ -35,4 +35,4 @@ def GetLabels(path):
                 letter = parts[1]
                 letters.append(letter)
 
-    return letters
+    return letters
\ No newline at end of file
diff --git a/Func/__pycache__/ClassifierModule.cpython-310.pyc b/Func/__pycache__/ClassifierModule.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..61972d14ababbf865e495115e19a2eeaf4a8429f
Binary files /dev/null and b/Func/__pycache__/ClassifierModule.cpython-310.pyc differ
diff --git a/Func/__pycache__/CloseProgram.cpython-310.pyc b/Func/__pycache__/CloseProgram.cpython-310.pyc
index 193e77a6f8fe784366b7dd6541c45f009831ca5e..f82a12ecd273f4928c117988f81d4c9fbf961397 100644
Binary files a/Func/__pycache__/CloseProgram.cpython-310.pyc and b/Func/__pycache__/CloseProgram.cpython-310.pyc differ
diff --git a/Func/__pycache__/DrawImages.cpython-39.pyc b/Func/__pycache__/DrawImages.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4e4a809e8c792365f5148b5d78d958cb7f391da6
Binary files /dev/null and b/Func/__pycache__/DrawImages.cpython-39.pyc differ
diff --git a/Func/__pycache__/Helpers.cpython-310.pyc b/Func/__pycache__/Helpers.cpython-310.pyc
index 18d4dc8cffb888a52a6190b06248d4f5d9a5da1d..1b51f2d7f1d969276a1bf72146c8139be77e961d 100644
Binary files a/Func/__pycache__/Helpers.cpython-310.pyc and b/Func/__pycache__/Helpers.cpython-310.pyc differ
diff --git a/Func/__pycache__/Helpers.cpython-39.pyc b/Func/__pycache__/Helpers.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..801f41dd2a8b1ae149515d6ad5c343e8129bbd70
Binary files /dev/null and b/Func/__pycache__/Helpers.cpython-39.pyc differ
diff --git a/Func/__pycache__/getSubFolders.cpython-39.pyc b/Func/__pycache__/getSubFolders.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..85ffb0b59cf861a08ef538a2833dc54e1f02f8eb
Binary files /dev/null and b/Func/__pycache__/getSubFolders.cpython-39.pyc differ
diff --git a/Func/getSubFolders.py b/Func/getSubFolders.py
index 217d43f512c4ea586a2c48bc63d5f1a77551e810..a2dabc591b56c183ab80df6f1df2f0e2c3e44443 100644
--- a/Func/getSubFolders.py
+++ b/Func/getSubFolders.py
@@ -9,4 +9,4 @@ def count_sub_folders(folder_path):
     sub_folders = [f for f in os.listdir(folder_path) if os.path.isdir(os.path.join(folder_path, f))]
     num_sub_folders = len(sub_folders)
 
-    return num_sub_folders
\ No newline at end of file
+    return num_sub_folders
diff --git a/Model/keras_model.h5 b/Model/keras_model_pohja.h5
similarity index 79%
rename from Model/keras_model.h5
rename to Model/keras_model_pohja.h5
index 0169f5a80825b3b126671903fdf5fa711e782af3..40969d8808ef5914aa2f5b55e5fe5d97bdd5091f 100644
Binary files a/Model/keras_model.h5 and b/Model/keras_model_pohja.h5 differ
diff --git a/Model/labels.txt b/Model/labels.txt
index ca427253d6f818ccda5af95a99305e035f85b407..5fbcfab644bb75991252a4d2f6ce35c41d649e4d 100644
--- a/Model/labels.txt
+++ b/Model/labels.txt
@@ -2,4 +2,5 @@
 1 B
 2 C
 3 D
-4 E
\ No newline at end of file
+4 E
+5 F
\ No newline at end of file
diff --git a/README.md b/README.md
index 4683ba620955161a996aea40c18cd706c6386aac..379176b923965621a49211dde712b501336a7a08 100644
--- a/README.md
+++ b/README.md
@@ -2,91 +2,59 @@
 
 
 
-## Getting started
-
-To make it easy for you to get started with GitLab, here's a list of recommended next steps.
-
-Already a pro? Just edit this README.md and make it your own. Want to make it easy? [Use the template at the bottom](#editing-this-readme)!
-
-## Add your files
-
-- [ ] [Create](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#create-a-file) or [upload](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#upload-a-file) files
-- [ ] [Add files using the command line](https://docs.gitlab.com/ee/gitlab-basics/add-file.html#add-a-file-using-the-command-line) or push an existing Git repository with the following command:
-
+## Environment
+This uses the Miniconda environment. To prepare a personalized environment you will need to install the following:
 ```
-cd existing_repo
-git remote add origin https://gitlab.labranet.jamk.fi/AC4908/thesis-idea.git
-git branch -M main
-git push -uf origin main
+conda create --name "env name" python=3.9 -y
 ```
+```
+conda activate "env name"
+```
+```
+conda install -c conda-forge cudatoolkit=11.2 cudnn=8.1.0 -y
+```
+```
+pip install "tensorflow<2.11"
+pip install jupyter notebook
+pip install "keras"
+conda install pillow
+conda install scipy
+```
+> Note the flag *-y* is optional to use
 
-## Integrate with your tools
-
-- [ ] [Set up project integrations](https://gitlab.labranet.jamk.fi/AC4908/thesis-idea/-/settings/integrations)
-
-## Collaborate with your team
-
-- [ ] [Invite team members and collaborators](https://docs.gitlab.com/ee/user/project/members/)
-- [ ] [Create a new merge request](https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html)
-- [ ] [Automatically close issues from merge requests](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically)
-- [ ] [Enable merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/)
-- [ ] [Set auto-merge](https://docs.gitlab.com/ee/user/project/merge_requests/merge_when_pipeline_succeeds.html)
-
-## Test and Deploy
-
-Use the built-in continuous integration in GitLab.
-
-- [ ] [Get started with GitLab CI/CD](https://docs.gitlab.com/ee/ci/quick_start/index.html)
-- [ ] [Analyze your code for known vulnerabilities with Static Application Security Testing(SAST)](https://docs.gitlab.com/ee/user/application_security/sast/)
-- [ ] [Deploy to Kubernetes, Amazon EC2, or Amazon ECS using Auto Deploy](https://docs.gitlab.com/ee/topics/autodevops/requirements.html)
-- [ ] [Use pull-based deployments for improved Kubernetes management](https://docs.gitlab.com/ee/user/clusters/agent/)
-- [ ] [Set up protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html)
-
-***
-
-# Editing this README
-
-When you're ready to make this README your own, just edit this file and use the handy template below (or feel free to structure it however you want - this is just a starting point!). Thank you to [makeareadme.com](https://www.makeareadme.com/) for this template.
-
-## Suggestions for a good README
-Every project is different, so consider which of these sections apply to yours. The sections used in the template are suggestions for most open source projects. Also keep in mind that while a README can be too long and detailed, too long is better than too short. If you think your README is too long, consider utilizing another form of documentation rather than cutting out information.
-
-## Name
-Choose a self-explaining name for your project.
-
-## Description
-Let people know what your project can do specifically. Provide context and add a link to any reference visitors might be unfamiliar with. A list of Features or a Background subsection can also be added here. If there are alternatives to your project, this is a good place to list differentiating factors.
-
-## Badges
-On some READMEs, you may see small images that convey metadata, such as whether or not all the tests are passing for the project. You can use Shields to add some to your README. Many services also have instructions for adding a badge.
-
-## Visuals
-Depending on what you are making, it can be a good idea to include screenshots or even a video (you'll frequently see GIFs rather than actual videos). Tools like ttygif can help, but check out Asciinema for a more sophisticated method.
+<br>
 
-## Installation
-Within a particular ecosystem, there may be a common way of installing things, such as using Yarn, NuGet, or Homebrew. However, consider the possibility that whoever is reading your README is a novice and would like more guidance. Listing specific steps helps remove ambiguity and gets people to using your project as quickly as possible. If it only runs in a specific context like a particular programming language version or operating system or has dependencies that have to be installed manually, also add a Requirements subsection.
+```
+jupyter notebook --notebook-dir='your working directory'
+```
+> again --notebook-dir='your working directory' is optional if you have configured it beforehand.
 
-## Usage
-Use examples liberally, and show the expected output if you can. It's helpful to have inline the smallest example of usage that you can demonstrate, while providing links to more sophisticated examples if they are too long to reasonably include in the README.
+### Reopening the environment
+After opening the anaconda prompt, you can follow these commands to navigate back to your environment.
+#### List all created environments
+```
+conda info --envs
+```
 
-## Support
-Tell people where they can go to for help. It can be any combination of an issue tracker, a chat room, an email address, etc.
+#### Opening commands
+```
+conda activate "env name"
+jupyter notebook --notebook-dir='your working directory'
+```
 
-## Roadmap
-If you have ideas for releases in the future, it is a good idea to list them in the README.
 
-## Contributing
-State if you are open to contributions and what your requirements are for accepting them.
+#### Drawing images
+To see what images are used you can use matplot to draw them and verify
 
-For people who want to make changes to your project, it's helpful to have some documentation on how to get started. Perhaps there is a script that they should run or some environment variables that they need to set. Make these steps explicit. These instructions could also be useful to your future self.
+```
+conda install matplotlib
+```
 
-You can also document commands to lint the code or run tests. These steps help to ensure high code quality and reduce the likelihood that the changes inadvertently break something. Having instructions for running tests is especially helpful if it requires external setup, such as starting a Selenium server for testing in a browser.
+#### Flask
+Displaying the stream into a webapp.
 
-## Authors and acknowledgment
-Show your appreciation to those who have contributed to the project.
+```
+pip install Flask
+```
 
-## License
-For open source projects, say how it is licensed.
 
-## Project status
-If you have run out of energy or time for your project, put a note at the top of the README saying that development has slowed down or stopped completely. Someone may choose to fork your project or volunteer to step in as a maintainer or owner, allowing your project to keep going. You can also make an explicit request for maintainers.
diff --git a/SaveData.py b/SaveData.py
index 41620787bc765e4f5f66ed71bb354fc692e32b16..4b2fd5e97842de07fd123f3161a42ed44b35a1e9 100644
--- a/SaveData.py
+++ b/SaveData.py
@@ -9,7 +9,7 @@ from cvzone.HandTrackingModule import HandDetector
 
 offset = 20
 size = 300
-folder = "Data/E"
+folder = "Data/F"
 counter = 0
 
 cap = cv2.VideoCapture(0)
@@ -23,7 +23,7 @@ while True:
     success, img = cap.read()
 
     if not success:
-        print('can\'t read image')
+        print('can\'t read image', cap)
         break
 
     hands, img = detector.findHands(img)
@@ -49,11 +49,16 @@ while True:
     cv2.imshow("img", img)
     key = cv2.waitKey(1)
 
+
     if key == ord('p'):
         Close(cap)
         break
 
     if key == ord('s'):
-        counter += 1
-        cv2.imwrite(f'{folder}/Image_{time.time()}.jpg', imgFixed)
-        print(counter)
+        try:
+            counter += 1
+            cv2.imwrite(f'{folder}/Image_{time.time()}.jpg', imgFixed)
+            print(f'{folder}/Image_{time.time()}.jpg')
+            print(counter)
+        except Exception as e:
+            print(f"Error saving image: {e}")
diff --git a/__pycache__/main.cpython-310.pyc b/__pycache__/main.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b089001421bc66c30e96a1ad2ddda056508c9f2b
Binary files /dev/null and b/__pycache__/main.cpython-310.pyc differ
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..458e999206623d9ee49e29a0aef8acbcb215ab32
--- /dev/null
+++ b/app.py
@@ -0,0 +1,27 @@
+from flask import Flask, render_template, Response
+from main import process_frame
+
+app = Flask(__name__)
+
+
+def generate():
+    while True:
+        frame = process_frame()
+        if frame is None:
+            break
+        yield (b'--frame\r\n'
+               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
+
+
+@app.route('/')
+def index():
+    return render_template('index.html')
+
+
+@app.route('/video')
+def video_feed():
+    return Response(generate(), mimetype='multipart/x-mixed-replace; boundary=frame')
+
+
+if __name__ == '__main__':
+    app.run(debug=True)
diff --git a/main.py b/main.py
index 4df15358d2aac4a09d2c385e88f66bf55dd86edf..5b37e0d0f671935234e222ea8eae5ac5d3dd9b6e 100644
--- a/main.py
+++ b/main.py
@@ -1,9 +1,9 @@
-import cv2
 import numpy as np
+
 from cvzone.HandTrackingModule import HandDetector
-from cvzone.ClassificationModule import Classifier
 
-from Func.CloseProgram import Close
+from Func.ClassifierModule import Classifier
+
 from Func.Helpers import *
 
 offset = 20
@@ -11,22 +11,23 @@ size = 300
 
 capture = cv2.VideoCapture(0)
 detector = HandDetector(maxHands=1)
-classifier = Classifier("Model/Model.h5", "Model/labels.txt")
-# classifier = Classifier("Model/keras_model.h5", "Model/labels.txt")
-letters = GetLabels("Model/labels.txt")
+model_path = "Model/keras_model.h5"
+label_path = "Model/labels.txt"
 
+classifier = Classifier(model_path, label_path)
+letters = GetLabels(label_path)
 
 if not capture.isOpened():
     print("Failed to open video capture.")
     exit()
 
-while True:
+
+def process_frame():
     success, img = capture.read()
     imgOutput = img.copy()
 
     if not success:
-        print('can\'t read image')
-        break
+        return None
 
     hands, img = detector.findHands(img)
 
@@ -47,14 +48,8 @@ while True:
             CalculateHeight(size, h, w, imgCropped, imgFixed)
             prediction, index = GetPrediction(classifier, imgFixed)
 
-        print(prediction)
-        print(index)
-
         cv2.putText(imgOutput, letters[index], (x, y), cv2.FONT_HERSHEY_TRIPLEX, 2, (255, 0, 255), 2)
 
-    cv2.imshow("img", imgOutput)
-    key = cv2.waitKey(1)
-
-    if key == ord('p'):
-        Close(capture)
-        break
+    _, buffer = cv2.imencode('.jpg', imgOutput)
+    frame = buffer.tobytes()
+    return frame
diff --git a/templates/index.html b/templates/index.html
new file mode 100644
index 0000000000000000000000000000000000000000..f715c11bbfd1f7ecec43f155c61b340bcb99b8d0
--- /dev/null
+++ b/templates/index.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+    <meta charset="UTF-8">
+    <title>Hand Gesture Recognition</title>
+</head>
+<body>
+    <h1>Hand Gesture Recognition</h1>
+    <img src="{{ url_for('video_feed') }}" alt="Video Feed">
+</body>
+</html>
diff --git a/trainModel.ipynb b/trainModel.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..657f4cb17323ec22d3840ed623fcbd26d5926d4c
--- /dev/null
+++ b/trainModel.ipynb
@@ -0,0 +1,241 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "id": "62c6c5c3-d2ba-4e79-b533-828c3083e8ea",
+   "metadata": {
+    "is_executing": true
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Num GPUs Available:  1\n",
+      "Found 4458 images belonging to 6 classes.\n",
+      "Found 1111 images belonging to 6 classes.\n",
+      "Epoch 1/30\n",
+      "278/278 [==============================] - 60s 213ms/step - loss: 1.8360 - accuracy: 0.2733 - val_loss: 1.3567 - val_accuracy: 0.4556 - lr: 1.0000e-04\n",
+      "Epoch 2/30\n",
+      "278/278 [==============================] - 59s 213ms/step - loss: 1.2122 - accuracy: 0.5000 - val_loss: 0.8902 - val_accuracy: 0.8062 - lr: 1.0000e-04\n",
+      "Epoch 3/30\n",
+      "278/278 [==============================] - 59s 214ms/step - loss: 0.7822 - accuracy: 0.6952 - val_loss: 0.6691 - val_accuracy: 0.8469 - lr: 1.0000e-04\n",
+      "Epoch 4/30\n",
+      "278/278 [==============================] - 59s 213ms/step - loss: 0.4722 - accuracy: 0.8253 - val_loss: 0.4470 - val_accuracy: 0.9004 - lr: 1.0000e-04\n",
+      "Epoch 5/30\n",
+      "278/278 [==============================] - 59s 214ms/step - loss: 0.2882 - accuracy: 0.9070 - val_loss: 0.3356 - val_accuracy: 0.9167 - lr: 1.0000e-04\n",
+      "Epoch 6/30\n",
+      "278/278 [==============================] - 59s 214ms/step - loss: 0.2105 - accuracy: 0.9397 - val_loss: 0.3678 - val_accuracy: 0.9176 - lr: 1.0000e-07\n",
+      "Epoch 7/30\n",
+      "278/278 [==============================] - 59s 212ms/step - loss: 0.2022 - accuracy: 0.9372 - val_loss: 0.3469 - val_accuracy: 0.9149 - lr: 1.0000e-10\n",
+      "Epoch 8/30\n",
+      "278/278 [==============================] - 59s 212ms/step - loss: 0.1898 - accuracy: 0.9433 - val_loss: 0.3509 - val_accuracy: 0.9149 - lr: 1.0000e-13\n",
+      "Epoch 9/30\n",
+      "278/278 [==============================] - 59s 212ms/step - loss: 0.1995 - accuracy: 0.9412 - val_loss: 0.3384 - val_accuracy: 0.9221 - lr: 1.0000e-16\n",
+      "Epoch 10/30\n",
+      "278/278 [==============================] - 59s 213ms/step - loss: 0.1918 - accuracy: 0.9464 - val_loss: 0.3548 - val_accuracy: 0.9139 - lr: 1.0000e-19\n",
+      "70/70 [==============================] - 12s 169ms/step - loss: 0.3298 - accuracy: 0.9289\n",
+      "Test loss: 0.3297525942325592, Test accuracy: 0.9288929104804993\n",
+      "Model: \"sequential_4\"\n",
+      "_________________________________________________________________\n",
+      " Layer (type)                Output Shape              Param #   \n",
+      "=================================================================\n",
+      " vgg16 (Functional)          (None, 7, 7, 512)         14714688  \n",
+      "                                                                 \n",
+      " flatten_4 (Flatten)         (None, 25088)             0         \n",
+      "                                                                 \n",
+      " dense_20 (Dense)            (None, 1024)              25691136  \n",
+      "                                                                 \n",
+      " dropout_16 (Dropout)        (None, 1024)              0         \n",
+      "                                                                 \n",
+      " dense_21 (Dense)            (None, 512)               524800    \n",
+      "                                                                 \n",
+      " dropout_17 (Dropout)        (None, 512)               0         \n",
+      "                                                                 \n",
+      " dense_22 (Dense)            (None, 256)               131328    \n",
+      "                                                                 \n",
+      " dropout_18 (Dropout)        (None, 256)               0         \n",
+      "                                                                 \n",
+      " dense_23 (Dense)            (None, 128)               32896     \n",
+      "                                                                 \n",
+      " dropout_19 (Dropout)        (None, 128)               0         \n",
+      "                                                                 \n",
+      " dense_24 (Dense)            (None, 6)                 774       \n",
+      "                                                                 \n",
+      "=================================================================\n",
+      "Total params: 41,095,622\n",
+      "Trainable params: 26,380,934\n",
+      "Non-trainable params: 14,714,688\n",
+      "_________________________________________________________________\n",
+      "Last build: 29/07/24\n",
+      "It took: 10.10 minutes\n"
+     ]
+    }
+   ],
+   "source": [
+    "import time\n",
+    "import datetime\n",
+    "from keras.models import Sequential\n",
+    "from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization\n",
+    "from keras.preprocessing.image import ImageDataGenerator\n",
+    "from keras.losses import CategoricalCrossentropy\n",
+    "from tensorflow.keras.callbacks import EarlyStopping,LearningRateScheduler\n",
+    "from tensorflow.keras.optimizers import Adam\n",
+    "import tensorflow as tf\n",
+    "from tensorflow.keras import layers, models\n",
+    "from tensorflow.keras.applications import VGG16\n",
+    "\n",
+    "from Func.getSubFolders import count_sub_folders\n",
+    "from Func.DrawImages import plot_images\n",
+    "\n",
+    "\n",
+    "path = 'Data'\n",
+    "output = 'Model/keras_model.h5'\n",
+    "start_time = time.time()\n",
+    "\n",
+    "physical_devices = tf.config.list_physical_devices('GPU')\n",
+    "print(\"Num GPUs Available: \", len(physical_devices))\n",
+    "if len(physical_devices) > 0:\n",
+    "    try:\n",
+    "        # Set memory growth to true\n",
+    "        for device in physical_devices:\n",
+    "            tf.config.experimental.set_memory_growth(device, True)\n",
+    "    except RuntimeError as e:\n",
+    "        print('runtime gpu', e)\n",
+    "else:\n",
+    "    print(\"No GPU available. Using CPU.\")\n",
+    "\n",
+    "\n",
+    "def lr_schedule(epoch, lr):\n",
+    "    if epoch < 5:\n",
+    "        return lr\n",
+    "    elif epoch < 10:\n",
+    "        return lr * 0.001\n",
+    "    else:\n",
+    "        return lr * 0.01\n",
+    "\n",
+    "\n",
+    "\n",
+    "# Step 1: Load and Preprocess Images\n",
+    "datagen = ImageDataGenerator(\n",
+    "    rescale=1. / 255,\n",
+    "    validation_split=0.2,\n",
+    "    width_shift_range=0.2,\n",
+    "    height_shift_range=0.2,\n",
+    "    shear_range=0.2,\n",
+    "    zoom_range=0.2,\n",
+    "    fill_mode='nearest',\n",
+    ")\n",
+    "\n",
+    "\n",
+    "# Step 2: Label the Data\n",
+    "train_set = datagen.flow_from_directory(\n",
+    "    path,\n",
+    "    target_size=(224, 224),\n",
+    "    batch_size=16,\n",
+    "    class_mode='categorical',\n",
+    "    subset='training'\n",
+    ")\n",
+    "\n",
+    "test_set = datagen.flow_from_directory(\n",
+    "    path,\n",
+    "    target_size=(224, 224),\n",
+    "    batch_size=16,\n",
+    "    class_mode='categorical',\n",
+    "    subset='validation'\n",
+    ")\n",
+    "\n",
+    "# draw Images\n",
+    "#images, labels = next(train_set)\n",
+    "\n",
+    "#class_indices = test_set.class_indices\n",
+    "#plot_images(images, labels, class_indices)\n",
+    "\n",
+    "base_model = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3)) # EXPLAINNNNN\n",
+    "base_model.trainable = False\n",
+    "\n",
+    "# Step 4: Build the Model\n",
+    "\n",
+    "model = Sequential([\n",
+    "    base_model,\n",
+    "    Flatten(),\n",
+    "    Dense(units=1024, activation='relu'),\n",
+    "    Dropout(0.5),\n",
+    "    Dense(units=512, activation='relu'),\n",
+    "    Dropout(0.5),\n",
+    "    Dense(units=256, activation='relu'),\n",
+    "    Dropout(0.5),\n",
+    "    Dense(units=128, activation='relu'),\n",
+    "    Dropout(0.5),\n",
+    "    Dense(units=count_sub_folders(path), activation='softmax')\n",
+    "])\n",
+    "\n",
+    "\n",
+    "\n",
+    "# Compile the Model\n",
+    "model.compile(optimizer=Adam(learning_rate=0.0001),\n",
+    "              loss=CategoricalCrossentropy(from_logits=False),\n",
+    "              metrics=['accuracy'])\n",
+    "\n",
+    "# Step 6: Train the Model\n",
+    "early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)\n",
+    "lr_scheduler = LearningRateScheduler(lr_schedule)\n",
+    "\n",
+    "model.fit(\n",
+    "    train_set,\n",
+    "    steps_per_epoch=train_set.samples // train_set.batch_size,\n",
+    "    validation_data=test_set,\n",
+    "    validation_steps=test_set.samples // test_set.batch_size,\n",
+    "    epochs=30,\n",
+    "    callbacks=[early_stopping, lr_scheduler]\n",
+    ")\n",
+    "\n",
+    "# Step 7: Evaluate the Model\n",
+    "loss, accuracy = model.evaluate(test_set)\n",
+    "print(f'Test loss: {loss}, Test accuracy: {accuracy}')\n",
+    "\n",
+    "# Save the trained model\n",
+    "model.save(output)\n",
+    "end_time = time.time()\n",
+    "\n",
+    "execute_time = (end_time - start_time) / 60\n",
+    "\n",
+    "model.summary()\n",
+    "\n",
+    "# Print the result\n",
+    "print(\"Last build:\", datetime.datetime.now().strftime(\"%d/%m/%y\"))\n",
+    "print(f\"It took: {execute_time:0.2f} minutes\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "121f1652-29e2-4fdc-b78b-b28d84965cad",
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.9.19"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/trainModel.py b/trainModel.py
index 47a1da4ae475bf3331f5a1dc506ade22cc1e5014..9e819418de803d869c6506012aef4dfdd2c7c18d 100644
--- a/trainModel.py
+++ b/trainModel.py
@@ -1,55 +1,112 @@
+import time
 from keras.models import Sequential
 from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
 from keras.preprocessing.image import ImageDataGenerator
 from keras.losses import CategoricalCrossentropy
+from keras.src.callbacks import EarlyStopping
+from keras.src.optimizers import Adam
+import tensorflow as tf
+from keras import layers, models
 
 from Func.getSubFolders import count_sub_folders
 
-path = 'Data_test'
-output = 'Model/pruned.h5'
+path = 'Data'
+output = 'Model/keras_model.h5'
+start_time = time.time()
 
 # Step 1: Load and Preprocess Images
-train_datagen = ImageDataGenerator(
+datagen = ImageDataGenerator(
     rescale=1. / 255,
+    validation_split=0.2,
+    width_shift_range=0.2,
+    height_shift_range=0.2,
     shear_range=0.2,
     zoom_range=0.2,
-    horizontal_flip=True
 )
 
 test_datagen = ImageDataGenerator(rescale=1. / 255)
 
 # Step 2: Label the Data
-train_set = train_datagen.flow_from_directory(
+train_set = datagen.flow_from_directory(
     path,
     target_size=(224, 224),
     batch_size=32,
-    class_mode='categorical'
+    class_mode='categorical',
+    subset='training'
 )
 
-test_set = test_datagen.flow_from_directory(
+test_set = datagen.flow_from_directory(
     path,
     target_size=(224, 224),
     batch_size=32,
-    class_mode='categorical'
+    class_mode='categorical',
+    subset='validation'
 )
 
 # Step 4: Build the Model
 model = Sequential()
 model.add(Conv2D(32, (3, 3), input_shape=(224, 224, 3), activation='relu'))
 model.add(MaxPooling2D(pool_size=(2, 2)))
+
+model.add(Conv2D(64, (3, 3), activation='relu'))
+model.add(MaxPooling2D((2, 2)))
+
+model.add(Conv2D(128, (3, 3), activation='relu'))
+model.add(MaxPooling2D((2, 2)))
+
+model.add(Conv2D(256, (3, 3), activation='relu'))
+model.add(MaxPooling2D((2, 2)))
+
 model.add(Flatten())
+model.add(Dense(units=256, activation='relu'))
 model.add(Dense(units=128, activation='relu'))
 model.add(Dense(units=count_sub_folders(path), activation='softmax'))
-
+# def createLayers(input_shape=(224, 224, 3)):
+#     inputs = tf.keras.Input(shape=input_shape)
+#
+#     x = layers.Conv2D(48, (1, 1), padding='same', use_bias=False, name='block_1_expand')(inputs)
+#     x = layers.BatchNormalization(name='block_1_expand_BN')(x)
+#     x = layers.ReLU(6., name='block_1_expand_relu')(x)
+#
+#     x = layers.DepthwiseConv2D((3, 3), padding='same', use_bias=False, name='block_1_depthwise')(x)
+#     x = layers.BatchNormalization(name='block_1_depthwise_BN')(x)
+#     x = layers.ReLU(6., name='block_1_depthwise_relu')(x)
+#
+#     x = layers.Conv2D(8, (1, 1), padding='same', use_bias=False, name='block_1_project')(x)
+#     x = layers.BatchNormalization(name='block_1_project_BN')(x)
+#
+#     for i in range(2,5):
+#         x1 = layers.Conv2D(48, (1, 1), padding='same', use_bias=False, name=f'block_{i}_expand')(x)
+#         x1 = layers.BatchNormalization(name=f'block_{i}_expand_BN')(x1)
+#         x1 = layers.ReLU(6., name=f'block_{i}_expand_relu')(x1)
+#
+#         x1 = layers.DepthwiseConv2D((3, 3), padding='same', use_bias=False, name=f'block_{i}_depthwise')(x1)
+#         x1 = layers.BatchNormalization(name=f'block_{i}_depthwise_BN')(x1)
+#         x1 = layers.ReLU(6., name=f'block_{i}_depthwise_relu')(x1)
+#
+#         x1 = layers.Conv2D(8, (1, 1), padding='same', use_bias=False, name=f'block_{i}_project')(x1)
+#         x1 = layers.BatchNormalization(name=f'block_{i}_project_BN')(x1)
+#
+#         x = layers.Add(name=f'block_{i}_add')([x, x1])
+#
+#     x = tf.keras.layers.GlobalAveragePooling2D()(x)
+#     outputs = tf.keras.layers.Dense(count_sub_folders(path), activation='softmax')(x)
+#     model = models.Model(inputs, outputs, name='testModel')
+#
+#     return model
+#
+#
+# model = createLayers()
 
 # Compile the Model after pruning
-model.compile(optimizer='adam',
+model.compile(optimizer=Adam(learning_rate=0.001),
               loss=CategoricalCrossentropy(from_logits=False),
               metrics=['accuracy'])
 
 # Step 6: Train the Model
-model.fit(train_set, epochs=10, validation_data=test_set)
+early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
 
+model.fit(train_set, validation_data=test_set, epochs=10)
 
 # Step 7: Evaluate the Model
 loss, accuracy = model.evaluate(test_set)
@@ -57,3 +114,11 @@ print(f'Test loss: {loss}, Test accuracy: {accuracy}')
 
 # Save the trained model
 model.save(output)
+end_time = time.time()
+
+execute_time = (end_time - start_time) / 60
+
+model.summary()
+
+# Print the result
+print(f"It took: {execute_time:0.2f} minutes")