-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathPerceptronLearningAlgorithm.py
More file actions
108 lines (82 loc) · 2.29 KB
/
PerceptronLearningAlgorithm.py
File metadata and controls
108 lines (82 loc) · 2.29 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import numpy as np
import pandas as pd
import random
def eval_perceptron(inp, w):
return sgn(mult(inp,w))
def mult(x,y):
res = 0.0
for (e,f) in zip(x,y):
res += e*f
return res
def smult(a,x):
t = []
for i in range(len(x)):
t.append(a*x[i])
return t
def add(x,y):
res = []
for (e,f) in zip(x,y):
res.append(e+f)
return res
def sgn(x):
return 1 if x > 0 else -1 if x < 0 else 0
def conv(df, head):
df[head] = df[head].astype('category')
df[head].cat.categories = range(len(df[head].cat.categories))
df[head] = df[head].astype('float64')
headers = ["age", "job", "marital", "education", "default", "balance", "housing", "loan", "contact", "day", "month", "duration", "campaign", "pdays", "previous", "poutcome", "y"]
df = pd.read_csv('bank.csv', names=headers)
total_records = len(df)
c_h = ["job", "marital", "education", "default", "housing", "loan", "contact", "month", "duration", "previous", "poutcome"]
map(lambda x:conv(df,x), c_h)
df["y"] = df["y"].astype('category')
df["y"].cat.categories = [-1,1]
df["y"] = df["y"].astype('float64')
msk = np.random.randn(total_records) <= 0.5
df_tr = df.sample(frac=0.5,random_state=627)
df_te = df.drop(df_tr.index)
tar_tr = df_tr['y']
tar_te = df_te['y']
del df_tr['y']
del df_te['y']
## l = df_tr.values.tolist()
## tl = tar_tr.tolist()
## print l[0]
## print tl[0]
d = (df_tr - df_tr.mean() / (df_tr.max()-df_tr.min()))
l_tr = d.values.tolist()
t_tr = tar_tr.tolist()
d = (df_te - df_te.mean() / (df_te.max()-df_te.min()))
l_te = d.values.tolist()
t_te = tar_te.tolist()
## print d.head()
inputs = l_tr
outputs = t_tr
learning_rate = 0.1
n_inputs = len(inputs[0])
epochs = 100
w = []
r = random.Random(21)#2612)
for i in range(n_inputs):
w.append(r.random()*2-1)
print w
score = 0.0
for (inp,out) in zip(l_te,t_te):
if out == eval_perceptron(inp,w):
score += 1.0
print score*100/len(t_te)
for i in range(epochs):
for (inp,out) in zip(inputs,outputs):
pred = eval_perceptron(inp,w)
w = add(w,smult(learning_rate*(out-pred),inp))
print w
score = 0.0
for (inp,out) in zip(l_te,t_te):
if out == eval_perceptron(inp,w):
score += 1.0
print score*100/len(t_te)
score = 0.0
for (inp,out) in zip(l_tr,t_tr):
if out == eval_perceptron(inp,w):
score += 1.0
print score*100/len(t_tr)