import numpy as np
# Parameters
lower_threshold = 0.8
upper_threshold = 1.2
learning_rate = 0.1
max_iterations = 1000
# Training data (XOR problem)
inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
targets = np.array([0, 1, 1, 0])
# Initialize weights for connections: Input -> Hidden, Hidden -> Output
weights_input_hidden = np.random.rand(2, 2) # 2 input neurons, 2 hidden neurons
weights_hidden_output = np.random.rand(2) # 2 hidden neurons to 1 output neuron
# Function to apply the threshold activation
def activation_function(weighted_sum):
return 1 if lower_threshold < weighted_sum < upper_threshold else 0
# Training loop
epoch = 0
network_trained = False
start_weights_input_hidden = weights_input_hidden.copy()
start_weights_hidden_output = weights_hidden_output.copy()
while epoch < max_iterations:
epoch += 1
all_correct = True # Track if all outputs are correct in an epoch
for input_vector, target in zip(inputs, targets):
# Forward pass
hidden_input = np.dot(input_vector, weights_input_hidden) # Input -> Hidden
hidden_output = np.array([activation_function(h) for h in hidden_input]) # Activation of hidden neurons
final_input = np.dot(hidden_output, weights_hidden_output) # Hidden -> Output
output = activation_function(final_input) # Activation of output neuron
# Calculate error
error = target - output
# Backpropagation and weight update if error exists
if error != 0:
all_correct = False
# Update weights for Hidden -> Output
weights_hidden_output += learning_rate * error * hidden_output
# Update weights for Input -> Hidden
for i in range(2): # Loop over hidden neurons
if hidden_output[i] > 0: # Only update weights if neuron is active
weights_input_hidden[:, i] += learning_rate * error * input_vector
if all_correct:
network_trained = True
break # Stop training if all outputs are correct
# Results
if network_trained:
print(f"The network learned XOR correctly after {epoch} iterations.")
else:
print(f"The network did not learn XOR correctly after {epoch} iterations.")
print("\nTesting the trained network:")
for input_vector, target in zip(inputs, targets):
hidden_input = np.dot(input_vector, weights_input_hidden) # Input -> Hidden
hidden_output = np.array([activation_function(h) for h in hidden_input]) # Hidden neuron activations
final_input = np.dot(hidden_output, weights_hidden_output) # Hidden -> Output
output = activation_function(final_input) # Activation of output neuron
print(f"Input: {input_vector}, Target: {target}, Output: {output}")
print("\nInitial Weights (Input -> Hidden):")
print(start_weights_input_hidden)
print("\nInitial Weights (Hidden -> Output):")
print(start_weights_hidden_output)
print("\nFinal Weights (Input -> Hidden):")
print(weights_input_hidden)
print("\nFinal Weights (Hidden -> Output):")
print(weights_hidden_output)