import numpy as np
# Parameters
learning_rate = 0.1
max_iterations = 1000
# Activation function with thresholding
def activation_function(weighted_sum, threshold=0.5):
return 1 if weighted_sum > threshold else 0
# Inputs for XOR truth table
inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
targets = [0, 1, 1, 0] # XOR Truth Table
# Initialize weights for 2 hidden neurons and 1 output neuron
weights_input_to_hidden = np.random.rand(6) # 2 inputs, 1 bias → 2 hidden neurons (3 weights for each hidden)
weights_hidden_to_output = np.random.rand(3) # 2 hidden neurons + bias → 1 output neuron
# Training loop
epoch = 0
network_trained = False
while epoch < max_iterations:
epoch += 1
all_correct = True # Flag to track if all outputs are correct
for input_vector, target in zip(inputs, targets):
N1, N2 = input_vector
bias = 1 # Bias input
# Forward pass (for both hidden neurons)
N1_input = np.dot(np.append(input_vector, bias), weights_input_to_hidden[:3])
N1_hidden = activation_function(N1_input) # Output of first hidden neuron
N2_input = np.dot(np.append(input_vector, bias), weights_input_to_hidden[3:])
N2_hidden = activation_function(N2_input) # Output of second hidden neuron
# Combine hidden neuron outputs and pass through to output
N4_input = (N1_hidden * weights_hidden_to_output[0] +
N2_hidden * weights_hidden_to_output[1] +
bias * weights_hidden_to_output[2]) # Hidden to output
N4 = activation_function(N4_input) # Final output
# Error calculation
error = target - N4
# Weight updates if error exists
if error != 0:
all_correct = False
# Update weights for N1_hidden → N4
weights_hidden_to_output[0] += learning_rate * error * N1_hidden
weights_hidden_to_output[1] += learning_rate * error * N2_hidden
weights_hidden_to_output[2] += learning_rate * error * bias
# Update weights for input → hidden neurons
weights_input_to_hidden[:3] += learning_rate * error * N1_hidden * np.append(input_vector, bias)
weights_input_to_hidden[3:] += learning_rate * error * N2_hidden * np.append(input_vector, bias)
if all_correct:
network_trained = True
break
# Results
if network_trained:
print(f"The network learned the XOR truth table correctly after {epoch} iterations.")
else:
print(f"The network failed to learn the XOR truth table after {epoch} iterations.")
# Testing the trained network
print("\nTesting the trained network:")
for input_vector, target in zip(inputs, targets):
N1, N2 = input_vector
bias = 1
# Forward pass (for both hidden neurons)
N1_input = np.dot(np.append(input_vector, bias), weights_input_to_hidden[:3])
N1_hidden = activation_function(N1_input)
N2_input = np.dot(np.append(input_vector, bias), weights_input_to_hidden[3:])
N2_hidden = activation_function(N2_input)
# Combine hidden neuron outputs and pass through to output
N4_input = (N1_hidden * weights_hidden_to_output[0] +
N2_hidden * weights_hidden_to_output[1] +
bias * weights_hidden_to_output[2])
N4 = activation_function(N4_input)
print(f"Input: {input_vector}, Target: {target}, Output: {N4}")