-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathparam_search.py
More file actions
180 lines (149 loc) · 7.46 KB
/
param_search.py
File metadata and controls
180 lines (149 loc) · 7.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
#set seed for reproducability
np.random.seed(1337)
import itertools
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.python import keras as kt
import keras
from sklearn.utils import shuffle
from keras.models import Model, Sequential
from keras.layers import Input, LSTM, Dense, RNN, SimpleRNN, GRU
from keras.utils import to_categorical
import time
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
from keras.layers import Dropout
from keras import backend as be
from keras.callbacks import CSVLogger
import gc
from variational_autoencoder import transform_data_with_VAE, VAE
### NOW FOR training
def create_model(learn_rate=0.001, clip_value=1, cell_type='LSTM', num_units=100, dropout=False, add_conv=False, input_dim=None, num_filters=32, kernel_size=10, pool_size=5, stride_size=4):
model = Sequential()
if add_conv:
model.add(keras.layers.Conv1D(num_filters, kernel_size, input_shape=input_dim, strides=stride_size, activation='relu'))
if dropout:
model.add(Dropout(0.5))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Conv1D(num_filters, kernel_size, input_shape=input_dim, strides=stride_size, activation='relu'))
if dropout:
model.add(Dropout(0.5))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.MaxPool1D(pool_size=pool_size))
# if cell_type == 'LSTM':
# model.add(keras.layers.CuDNNLSTM(num_units, input_shape=input_dim))
# elif cell_type == 'GRU':
# model.add(keras.layers.CuDNNGRU(num_units, input_shape=input_dim))
# if dropout:
# model.add(Dropout(0.5))
# model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Flatten())
model.add(Dense(4, activation="softmax"))
optimizer = keras.optimizers.RMSprop(lr=learn_rate, clipvalue=1)
model.compile(loss="categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
return model
def crop_data_aug(X, y, crop):
crop_size = crop # 3 # 100
original_train_X = X # test_array_x # p0_train_X
original_train_y = y # test_array_y # p0_train_y
N, T, C = original_train_X.shape
print("Original Data:", original_train_X.shape, original_train_y.shape)
cropped_train_X = np.zeros((N*(T-crop_size+1), C, crop_size))
cropped_train_y = np.zeros(N*(T-crop_size+1))
crops_per_sample = T-crop_size+1
for n in np.arange(N):
crop_count = 0
for t in np.arange(T-crop_size+1):
idx = n*crops_per_sample + crop_count
cropped_train_X[idx] = original_train_X[n, :, t:t+crop_size]
cropped_train_y[idx] = original_train_y[n]
crop_count = crop_count + 1
print("Cropped Data:", cropped_train_X.shape, cropped_train_y.shape)
return cropped_train_X, cropped_train_y
def transform_data(X, y, crop=False):
X = np.swapaxes(X, 1, 2)
print("Swapped axes:", X.shape)
X = X[: , : , :22 ]
print("Removed VOG channels:", X.shape)
# encode output labels
print("Raw labels:", y[0:10])
y = y- 769
print("Fixed:", y[0:10])
y = to_categorical(y, 4)
print("Categorical one-hot encoding:\n",y[0:3])
return (X, y)
if __name__ == '__main__':
logfile = 'training_vae.log'
X_test = np.load("X_test.npy")
y_test = np.load("y_test.npy")
person_train_valid = np.load("person_train_valid.npy")
X_train_valid = np.load("X_train_valid.npy")
y_train_valid = np.load("y_train_valid.npy")
person_test = np.load("person_test.npy")
print ('Training/Valid data shape: {}'.format(X_train_valid.shape))
print ('Test data shape: {}'.format(X_test.shape))
print ('Training/Valid target shape: {}'.format(y_train_valid.shape))
print ('Test target shape: {}'.format(y_test.shape))
print ('Person train/valid shape: {}'.format(person_train_valid.shape))
print ('Person test shape: {}'.format(person_test.shape))
X_train, X_valid, y_train, y_valid = train_test_split(X_train_valid, y_train_valid, test_size=0.2)
X_train, y_train = transform_data(X_train, y_train)
X_valid, y_valid = transform_data(X_valid, y_valid)
X_test, y_test= transform_data(X_test, y_test)
use_vae=True
if use_vae:
print("Tranforming data using VAE")
vae = VAE(X_train.shape[2], 64, 3)
vae.load_weights("vae_mlp_eeg_weights.h5")
X_train = transform_data_with_VAE(vae, X_train)
X_valid = transform_data_with_VAE(vae, X_valid)
X_test = transform_data_with_VAE(vae, X_test)
X_train, y_train= shuffle(np.concatenate((X_train, X_train, X_train, X_train, X_train, X_train)), np.concatenate((y_train, y_train, y_train, y_train, y_train, y_train)))
input_dim = X_train.shape[1:]
lr_scheduler = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1)
early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=20, verbose=1)
csv_logger = CSVLogger(logfile)
print("DATA READY FOR TRAINING!!!!")
start = time.time()
model = create_model(learn_rate=0.01, cell_type='LSTM', num_units=64, dropout=True, add_conv=True, input_dim=input_dim, num_filters=32, pool_size=4, kernel_size=16, stride_size=4)
history = model.fit(X_train, y_train, epochs=250, batch_size=128, validation_data=(X_valid, y_valid), verbose=1, callbacks=[lr_scheduler, csv_logger, early_stopping])
print("Trained in {}".format(time.time()-start))
print("acc max: {:.3f} last: {:.3f}".format(max(history.history['acc']), history.history['acc'][-1]))
print("val_acc max: {:.3f} last: {:.3f}".format(max(history.history['val_acc']), history.history['val_acc'][-1]))
print("\nTEST SET accuracy:")
print(model.evaluate(X_test, y_test))
model.save_weights("best_weights_vae.h5")
# lrs = [0.01, 0.03, 0.001, 0.003, 0.001]#, 0.0003, 0.0001]
# lrs = [0.01, 0.03, 0.001, 0.003]
# num_hidden_dim = [64]
# cell_types = ['LSTM', 'GRU']
# use_dropout = [True]
# pool_sizes = [2, 4,8,]
# kernel_sizes =[4, 8, 16, 32]
# stride_sizes = [2, 4, 8]
# filter_sizes = [32, 64]
# results = dict()
# for key in itertools.product(lrs, num_hidden_dim, cell_types, use_dropout, pool_sizes, kernel_sizes, stride_sizes, filter_sizes):
# start = time.time()
# lr, h_dim, c_t, d, pool_size, kernel_size, ss, num_filters = key
# start = time.time()
# model = create_model(learn_rate=lr, cell_type=c_t, num_units = h_dim, dropout=d, add_conv = True, input_dim=input_dim, num_filters=num_filters, pool_size=pool_size, kernel_size=kernel_size, stride_size=ss)
# print("Params: {} ".format(key))
# try:
# history = model.fit(X_train, y_train, epochs=15, batch_size=128, validation_data=(X_valid, y_valid), verbose=0, callbacks=[lr_scheduler])
# print("acc max: {:.3f} last: {:.3f}".format(max(history.history['acc']), history.history['acc'][-1]))
# print("val_acc max: {:.3f} last: {:.3f}".format(max(history.history['val_acc']), history.history['val_acc'][-1]))
# results[key] = history.history
# except:
# results[key] = "EXCEPTION"
# print("Failed")
# print("Trained in {0:.3f}\n".format(time.time()-start))
# import pickle
# f = open("dict.pkl", "wb+")
# pickle.dump(results, f)
# f.close()