import numpy as np
from cvzone.HandTrackingModule import HandDetector
from Func.ClassifierModule import Classifier

from Func.CloseProgram import Close
from Func.Helpers import *

offset = 20
size = 300

capture = cv2.VideoCapture(0)
detector = HandDetector(maxHands=1)
model_path = "Model/keras_model.h5"
label_path = "Model/labels.txt"

classifier = Classifier(model_path, label_path)
letters = GetLabels(label_path)

if not capture.isOpened():
    print("Failed to open video capture.")
    exit()

while True:
    success, img = capture.read()
    imgOutput = img.copy()

    if not success:
        print('can\'t read image')
        break

    hands, img = detector.findHands(img)

    if hands:
        hand = hands[0]
        x, y, w, h = hand['bbox']

        imgFixed = np.ones((size, size, 3), np.uint8)
        imgCropped = img[y - offset:y + h + offset, x - offset:x + w + offset]

        aspectRatio = h / w

        if aspectRatio > 1:
            CalculateWidth(size, h, w, imgCropped, imgFixed)
            prediction, index = GetPrediction(classifier, imgFixed)

        else:
            CalculateHeight(size, h, w, imgCropped, imgFixed)
            cv2.imshow("imgFixed", imgFixed)
            prediction, index = GetPrediction(classifier, imgFixed)

        print('pred', prediction)
        print('index', index)

        cv2.putText(imgOutput, letters[index], (x, y), cv2.FONT_HERSHEY_TRIPLEX, 2, (255, 0, 255), 2)

    cv2.imshow("img main", imgOutput)
    key = cv2.waitKey(1)

    if key == ord('p'):
        Close(capture)
        break