-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpredict_model_chem.py
More file actions
123 lines (95 loc) · 2.94 KB
/
predict_model_chem.py
File metadata and controls
123 lines (95 loc) · 2.94 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import json
from numpy import array
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
with open('chem_2.json') as f:
chem_data = json.load(f)
with open('drought.json') as f:
drought_data = json.load(f)
train_data = []
train_labels = []
test_data = []
test_labels = []
counter = 0
for fip in drought_data:
for year in drought_data:
if fip not in chem_data or year not in chem_data[fip]:
continue
data = []
data.append(int(fip))
counter += 1
data.extend(chem_data[fip][year])
label = drought_data[fip][year]
if int(year) < 2015:
train_data.append(data)
train_labels.append(label)
else:
test_data.append(data)
test_labels.append(label)
train_data = array(train_data)
train_labels = array(train_labels)
test_data = array(test_data)
test_labels = array(test_labels)
print(test_data)
mean = train_data.mean(axis=0)
std = train_data.std(axis=0)
train_data = (train_data - mean) / std
test_data = (test_data - mean) / std
print(test_data)
#print(train_data)
#print(test_data)
def get_length(map_data):
for fip in map_data:
for year in map_data[fip]:
return len(map_data[fip][year])
dim = 1 + get_length(chem_data)
def build_model():
model = keras.Sequential([
keras.layers.Dense(dim , activation=tf.nn.relu,
input_shape=(train_data.shape[1],)),
keras.layers.Dense(dim, activation=tf.nn.relu),
keras.layers.Dense(6)
])
optimizer = tf.train.RMSPropOptimizer(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae'])
return model
model = build_model()
EPOCHS = 500
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=20)
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
history = model.fit(train_data, train_labels, epochs=EPOCHS,
validation_split=0.2, verbose=0,
callbacks=[early_stop, PrintDot()])
model.summary()
def plot_history(history):
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [1000$]')
plt.plot(history.epoch, np.array(history.history['mean_absolute_error']),
label='Train Loss')
plt.plot(history.epoch, np.array(history.history['val_mean_absolute_error']),
label = 'Val loss')
plt.legend()
plt.show()
plot_history(history)
test_predictions = model.predict(test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [1000$]')
plt.ylabel('Predictions [1000$]')
plt.axis('equal')
plt.xlim(plt.xlim())
plt.ylim(plt.ylim())
plt.plot([-100, 100], [-100, 100])
plt.show()
error = test_predictions - test_labels.flatten()
plt.hist(error, bins = 50)
plt.xlabel("Prediction Error [1000$]")
plt.ylabel("Count")
plt.show()