-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathML-Recipe-Classification-Test.py
More file actions
116 lines (80 loc) · 3.16 KB
/
ML-Recipe-Classification-Test.py
File metadata and controls
116 lines (80 loc) · 3.16 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
#!/usr/bin/env python
# coding: utf-8
import csv
import numpy as np
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout, Flatten
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from tensorflow.keras.models import model_from_json
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.callbacks import EarlyStopping
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
import warnings
warnings.simplefilter("ignore", UserWarning)
# fix random seed for reproducibility
np.random.seed(7)
ROOT_PATH = './'
def load_data(path, name_of_data):
data, labels = [], []
if name_of_data == "test":
with open(path + 'data/' + name_of_data + '.txt') as f1:
reader1 = csv.reader(f1, delimiter = "\n")
for line in reader1:
data.append(line[0])
return data
else:
with open(path + 'data/' + name_of_data + '.txt') as f1:
reader1 = csv.reader(f1, delimiter = "\n")
for line in reader1:
data.append(line[0])
with open(path + 'data/' + name_of_data +'.labels') as f2:
reader2 = csv.reader(f2)
for line in reader2:
labels.append(int(line[0]))
return data, labels
X_train, y_train = load_data(ROOT_PATH, "train")
X_test = load_data(ROOT_PATH, "test")
print("Train Records: ", len(X_train), len(y_train))
print("Test Records: ", len(X_test))
tokenizer = Tokenizer(num_words=10000, lower = True)
tokenizer.fit_on_texts(X_train)
X_test = tokenizer.texts_to_sequences(X_test)
# Adding 1 because of reserved 0 index
vocab_size = len(tokenizer.word_index) + 1
# truncate and pad input sequences
max_review_length = 1000
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length, padding='post')
X_test = np.array(X_test)
print(X_test.dtype)
print(X_test.shape)
print("Vocab size =", vocab_size)
MODEL_INDEX = 35
modelname='PR2_' + str(MODEL_INDEX)
json_file = open(ROOT_PATH + 'Models/_' + modelname + '.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(filepath = ROOT_PATH + 'Models/_' + modelname + '.hdf5')
print("Loaded " + modelname +" from disk")
opt = Adam(learning_rate=0.001)
# evaluate loaded model on test data
loaded_model.compile(loss= 'categorical_crossentropy',
optimizer = opt,
metrics=['accuracy'])
predicted_labels = loaded_model.predict([X_test,X_test])
final_predictions = np.argmax(predicted_labels, axis=-1)
print(final_predictions.shape)
final_predictions = final_predictions + 1
with open(ROOT_PATH + "Results/" + modelname + "_Results.txt", 'w', newline='') as w1:
writer = csv.writer(w1, delimiter=' ')
for p in list(final_predictions):
writer.writerow([p])
print("File saved at ", ROOT_PATH + "Results/" + modelname + "_Results.txt")