-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathDTree.py
More file actions
107 lines (92 loc) · 3.46 KB
/
DTree.py
File metadata and controls
107 lines (92 loc) · 3.46 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import numpy as np
class DTree(object):
"""This is basic implementation of decision tree.
As of now it only works on data that has descrete feature values.
If any feature of input data has continuos value then this will not work.
TODO: Add support for continuous values as well.
"""
def __init__(self):
pass
def make_tree(self, data, classes, feature_names=None, maxlevel=-1, level=0, forest=0):
num_data = data.shape[0]
num_features = data.shape[1]
new_classes = np.unique(classes)
frequency = np.zeros(len(new_classes), dtype=float)
total_entropy = 0
total_gini = 0
classIndex = 0
for aclass in new_classes:
frequency[classIndex] = np.sum(classes == aclass)
total_entropy += self.calc_entropy(frequency[classIndex] / num_data)
total_gini += (frequency[classIndex] / num_data ** 2)
classIndex += 1
total_gini = 1 - total_gini
default = int(new_classes[frequency.argmax()])
if num_data == 0 or num_features == 0 or (maxlevel>=0 and level>maxlevel):
return default
elif len(new_classes) == 1:
return int(new_classes[0])
else:
gain = np.zeros(num_features)
ggain = np.zeros(num_features)
featureSet = range(num_features)
if forest != 0:
np.random.shuffle(featureSet)
featureSet = featureSet[:forest]
for feature in featureSet:
g, gg = self.calc_info_gain(data, classes, feature)
gain[feature] = total_entropy - g
ggain[feature] = total_gini - gg
best_feature = gain.argmax()
f_values = np.unique(data[:, best_feature])
tree = {best_feature: {}}
for value in f_values:
points = np.where(data[:, best_feature] == value)[0]
new_data = np.concatenate([data[points, :best_feature], data[points, best_feature+1:]], axis=1)
new_data_classes = classes[points]
#new_feature_names = feature_names[:best_feature]
#new_feature_names.extend(feature_names[best_feature+1:])
subtree = self.make_tree(
new_data, new_data_classes, maxlevel=maxlevel, level=level,
forest=forest)
tree[best_feature][value] = subtree
return tree
def classify(self, tree, data):
if type(tree) == type({}):
feature = tree.keys()[0]
value = data[feature]
cls = self.classify(tree[feature][value], data)
return cls
else:
return tree
def calc_entropy(self, p):
if p != 0:
return -1 * p * np.log2(p)
else:
return 0
def calc_info_gain(self, data, classes, feature, continuous=False):
if not continuous:
f_values = np.unique(data[:, feature])
f_classes = np.unique(classes)
entropy = np.zeros((len(f_values), len(f_classes)), dtype=float)
for valueIndex in range(len(f_values)):
points = np.where(data[:, feature] == f_values[valueIndex])[0]
entropy[valueIndex, :] = np.sum(classes[points] == f_classes, axis=0)
else:
mean = np.mean(data[:, feature], axis=0)
f_classes = np.unique(classes)
entropy = np.zeros((2, len(f_classes)), dtype=float)
points = np.where(data[:, feature] <= mean)
entropy[0, :] = np.sum(classes[points] == f_classes, axis=0)
points = np.where(data[:, feature] > mean)
entropy[1, :] = np.sum(classes[points] == f_classes, axis=0)
points_per_value = entropy.sum(axis=1)
entropy /= points_per_value.reshape(len(points_per_value), 1)
logp = entropy.copy()
logp[logp == 0] = 1
logp = np.log2(logp)
ggain = np.sum(entropy ** 2, axis=1)
entropy = (-1 * entropy * logp)
gain = np.sum(entropy.sum(axis=1) * points_per_value / data.shape[0])
ggain = np.sum(ggain * points_per_value / data.shape[0])
return gain, 1 - ggain