Skip to content
Snippets Groups Projects
Commit b84214c9 authored by Michiel_VE's avatar Michiel_VE
Browse files

setup pruning

parent 8ff30ecd
No related branches found
No related tags found
No related merge requests found
.idea .idea
venv venv
Data Data
\ No newline at end of file Data_test
Model/*.h5
\ No newline at end of file
File added
from keras.models import Sequential from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from keras.preprocessing.image import ImageDataGenerator from keras.preprocessing.image import ImageDataGenerator
from keras.losses import CategoricalCrossentropy
from Func.getSubFolders import count_sub_folders from Func.getSubFolders import count_sub_folders
path = 'Data_test'
output = 'Model/pruned.h5'
# Step 1: Load and Preprocess Images # Step 1: Load and Preprocess Images
# You can use ImageDataGenerator for on-the-fly data augmentation and normalization
train_datagen = ImageDataGenerator( train_datagen = ImageDataGenerator(
rescale=1. / 255, rescale=1. / 255,
shear_range=0.2, shear_range=0.2,
...@@ -15,41 +19,41 @@ train_datagen = ImageDataGenerator( ...@@ -15,41 +19,41 @@ train_datagen = ImageDataGenerator(
test_datagen = ImageDataGenerator(rescale=1. / 255) test_datagen = ImageDataGenerator(rescale=1. / 255)
# Step 2: Label the Data # Step 2: Label the Data
# Assume you have two classes: 'cat' and 'dog'
train_set = train_datagen.flow_from_directory( train_set = train_datagen.flow_from_directory(
'Data/', path,
target_size=(224, 224), # image size target_size=(224, 224),
batch_size=32, # batch size batch_size=32,
class_mode='categorical' # multiple folders in Data class_mode='categorical'
) )
test_set = test_datagen.flow_from_directory( test_set = test_datagen.flow_from_directory(
'Data', path,
target_size=(224, 224), target_size=(224, 224),
batch_size=32, batch_size=32,
class_mode='categorical' # Use categorical for multiple classes class_mode='categorical'
) )
# Step 4: Build the Model # Step 4: Build the Model
model = Sequential() model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(224, 224, 3), activation='relu')) model.add(Conv2D(32, (3, 3), input_shape=(224, 224, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2))) model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) model.add(Flatten())
model.add(Dense(units=128, activation='relu')) model.add(Dense(units=128, activation='relu'))
model.add(Dense(units=count_sub_folders('Data'), activation='softmax')) model.add(Dense(units=count_sub_folders(path), activation='softmax'))
# Step 5: Compile the Model # Compile the Model after pruning
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.compile(optimizer='adam',
loss=CategoricalCrossentropy(from_logits=False),
metrics=['accuracy'])
# Step 6: Train the Model # Step 6: Train the Model
# model.fit(train_set, epochs=25, validation_data=test_set) model.fit(train_set, epochs=10, validation_data=test_set)
# Step 7: Evaluate the Model
# loss, accuracy = model.evaluate(test_set)
# print(f'Test loss: {loss}, Test accuracy: {accuracy}')
# Step 8: Make Predictions # Step 7: Evaluate the Model
loss, accuracy = model.evaluate(test_set)
print(f'Test loss: {loss}, Test accuracy: {accuracy}')
# Save the trained model # Save the trained model
# model.save('Model/Model.h5') model.save(output)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment