-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathperceptron.py
More file actions
99 lines (76 loc) · 2.99 KB
/
perceptron.py
File metadata and controls
99 lines (76 loc) · 2.99 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
# import numpy as np
# # Sigmoid Activation function
# def sigmoid(x):
# return 1 / (1 + np.exp(-x))
# # Derivative of sigmoid for backpropagation
# def sigmoid_derivative(x):
# return x * (1 - x)
# # Perceptron Class
# class SingleLayerPerceptron:
# def __init__(self, input_size, output_size, learning_rate=0.01):
# self.input_size = input_size
# self.output_size = output_size
# self.learning_rate = learning_rate
# self.weights = np.random.randn(input_size, output_size) # Random initial weights
# self.bias = np.random.randn(output_size) # Bias initialized randomly
# # Feedforward process
# def forward(self, X):
# self.input = X
# self.output = sigmoid(np.dot(self.input, self.weights) + self.bias)
# return self.output
# # Training the Perceptron using Backpropagation
# def train(self, X, Y, epochs=1000):
# for epoch in range(epochs):
# # Forward pass
# output = self.forward(X)
# # Calculate the error
# error = Y - output
# # Backpropagation (Gradient Descent)
# d_output = error * sigmoid_derivative(output)
# self.weights += np.dot(self.input.T, d_output) * self.learning_rate
# self.bias += np.sum(d_output, axis=0) * self.learning_rate
# if epoch % 100 == 0: # Print error every 100 epochs
# print(f"Epoch {epoch}, Error: {np.mean(np.abs(error))}")
# # Predict using trained model
# def predict(self, X):
# return np.round(self.forward(X))
# # Example usage
# if __name__ == "__main__":
# # Sample data (AND gate)
# X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
# # Outputs (AND gate result)
# Y = np.array([[0],[0], [0], [1]])
# # Create a perceptron instance
# perceptron = SingleLayerPerceptron(input_size=2, output_size=1, learning_rate=0.1)
# # Train the model
# perceptron.train(X, Y, epochs=1000)
# # Test the model
# predictions = perceptron.predict(X)
# print(f"Predictions:\n{predictions}")
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_deriv(x):
return x * (1 - x)
# Simple perceptron class
class Perceptron:
def __init__(self):
self.w = np.random.rand(2, 1)
self.b = np.random.rand(1)
def train(self, X, Y, lr=0.1, epochs=1000):
for epoch in range(epochs):
z = sigmoid(np.dot(X, self.w) + self.b)
error = Y - z
update = error * sigmoid_deriv(z)
self.w += np.dot(X.T, update) * lr
self.b += np.sum(update) * lr
if epoch % 100 == 0:
print("Epoch", epoch, "Error:", round(np.mean(abs(error)), 4))
def predict(self, X):
return np.round(sigmoid(np.dot(X, self.w) + self.b))
# AND gate
X = np.array([[0,0], [0,1], [1,0], [1,1]])
Y = np.array([[0], [0], [0], [1]])
p = Perceptron()
p.train(X, Y)
print("Predictions:\n", p.predict(X))