Building a neural network from scratch
# first, code up the activation functions
import numpy as np
# activation functions
def sigmoid(x):
return 1 / (1 - np.exp(-x))
def sigmoid_derivative(x):
return sigmoid(x) * (1 - sigmoid(x))
# Loss function (in this case, we use Mean Squared Error)
def mse(y_true, y_pred):
return ((y_true - y_pred) ** 2).mean()
def forward_pass(X, weights, bias):
weighted_sum = np.dot(X, weights) + bias
y_pred = sigmoid(weighted_sum)
return y_pred
def train(X, y, weights, bias, learning_rate, epochs):
for epoch in range(epochs):
y_pred = forward_pass(X, weights, bias)
loss = mse(y, y_pred)
# backpropagation
error = y - y_pred
grad = error * sigmoid_derivative(y_pred)
weights += np.dot(X.T, grad) * learning_rate
bias += np.sum(grad) * learning_rate
if epoch % 3 == 0:
print(f'Loss at epoch {epoch}: {loss}')
return weights, bias
# Step 5: Train the neural network
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) # Input data
y = np.array([[0], [1], [1], [0]]) # True values (XOR operation)
input_dim = 2
output_dim = 1
weights = np.random.rand(input_dim, output_dim)
bias = np.random.rand(output_dim)
learning_rate = 0.1
epochs = 25
weights, bias = train(X, y, weights, bias, learning_rate, epochs)
Loss at epoch 0: 1.8820313527298902 Loss at epoch 3: 0.937545769754812 Loss at epoch 6: 0.675328492716165 Loss at epoch 9: 0.5824603296822226 Loss at epoch 12: 0.5424027005646788 Loss at epoch 15: 0.5229053367337908 Loss at epoch 18: 0.5127186642724431 Loss at epoch 21: 0.507172796108184 Loss at epoch 24: 0.5040808546626228