-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcnn_model.py
More file actions
108 lines (79 loc) · 2.8 KB
/
cnn_model.py
File metadata and controls
108 lines (79 loc) · 2.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
# Part 1 - Building the CNN
#importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense, Dropout
from keras import optimizers
# Initialing the CNN
classifier = Sequential()
# Step 1 - Convolutio Layer
classifier.add(Convolution2D(32, 3, (2, 2), input_shape = (64, 64, 3), activation = 'relu'))
#step 2 - Pooling
classifier.add(MaxPooling2D(pool_size =(1,1)))
# Adding second convolution layer
classifier.add(Convolution2D(32, 3, (2, 2), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size =(2,2)))
#Adding 3rd Concolution Layer
classifier.add(Convolution2D(32, 3, (2, 2), activation = 'relu'))#64
classifier.add(MaxPooling2D(pool_size =(2,2)))
#Step 3 - Flattening
classifier.add(Flatten())
#Step 4 - Full Connection
classifier.add(Dense(256, activation = 'relu'))
classifier.add(Dropout(0.5))
classifier.add(Dense(26, activation = 'softmax'))#26
#Compiling The CNN
classifier.compile(
optimizer = optimizers.SGD(learning_rate= 0.01),
loss = 'categorical_crossentropy',
metrics = ['accuracy'])
#Part 2 Fittting the CNN to the image
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(
'mydata/training_set',
target_size=(64, 64),
batch_size=32,
class_mode='categorical')
test_set = test_datagen.flow_from_directory(
'mydata/test_set',
target_size=(64, 64),
batch_size=32,
class_mode='categorical')
# classifier.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model = classifier.fit(
training_set,
steps_per_epoch=800,
epochs=5,#25 n
validation_data = test_set,
validation_steps = 6500
)
#Saving the model
import h5py
classifier.save('Trained_model_ddm.h5')
print(classifier.summary())
# print(model.history.keys())
# import matplotlib.pyplot as plt
# # summarize history for accuracy
# plt.plot(model.history['acc'])
# plt.plot(model.history['val_acc'])
# plt.title('model accuracy')
# plt.ylabel('accuracy')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.show()
# # summarize history for loss
# plt.plot(model.history['loss'])
# plt.plot(model.history['val_loss'])
# plt.title('model loss')
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.show()