import numpy as np
class Perceptron:
def __init__(self, learning_rate=0.01, n_iterations=1000):
self.learning_rate = learning_rate
self.n_iterations = n_iterations
self.weights = None
self.bias = None
def fit(self, X, y):
n_samples, n_features = X.shape
# Initialize weights and bias
self.weights = np.zeros(n_features)
self.bias = 0
# Convert labels to 0 and 1 if necessary (e.g., -1 and 1)
# Assuming y contains 0s and 1s for binary classification
# If your labels are -1 and 1, you might need: y_ = np.where(y > 0, 1, 0)
for _ in range(self.n_iterations):
for idx, x_i in enumerate(X):
# Calculate the linear output
linear_output = np.dot(x_i, self.weights) + self.bias
# Apply the step activation function
# For perceptron, typically a step function (0 or 1)
# You can also use a sigmoid for a slightly smoother approach if desired
prediction = 1 if linear_output >= 0 else 0
# Update weights and bias based on the error
update = self.learning_rate * (y[idx] - prediction)
self.weights += update * x_i
self.bias += update
def predict(self, X):
linear_output = np.dot(X, self.weights) + self.bias
# Apply the step activation function for prediction
return np.where(linear_output >= 0, 1, 0)
# Example Usage:
if __name__ == "__main__":
# Sample data for a simple AND gate
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([0, 0, 0, 1])
perceptron = Perceptron(learning_rate=0.1, n_iterations=100)
perceptron.fit(X, y)
print("Weights:", perceptron.weights)
print("Bias:", perceptron.bias)
test_X = np.array([[0, 0], [0, 1], [1, 0], [1, 1], [0.5, 0.5]])
predictions = perceptron.predict(test_X)
print("Predictions for test_X:", predictions)