import numpy as np
# Initial thresholds for activation function
lower_threshold = 0.8
upper_threshold = 1.2
# Learning rate
learning_rate = 0.1
# XOR training data (4 combinations of 2 binary inputs)
inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]
targets = [0, 1, 1, 0]
# Maximum number of iterations
max_iterations = 1000
epoch = 0
network_trained = False
# Initialize weights for input-to-hidden and hidden-to-output connections
weights_input_to_hidden = np.random.rand(3) * 0.5 # 2 inputs + bias (3 weights)
weights_hidden_to_output = np.random.rand(2) * 0.5 # 1 hidden neuron + bias (2 weights)
# Training loop
while epoch < max_iterations:
epoch += 1
all_correct = True # Flag to check if all outputs are correct
current_weights_input_to_hidden = weights_input_to_hidden.copy()
current_weights_hidden_to_output = weights_hidden_to_output.copy()
# Loop through each training example
for input_vector, target in zip(inputs, targets):
N1, N2 = input_vector
bias = 1 # Bias input
# Calculate the weighted sum for the hidden layer (Neuron 1)
N3_input = np.dot(np.array([N1, N2, bias]), current_weights_input_to_hidden)
# Apply the threshold activation function for hidden neuron
N3 = 1 if lower_threshold < N3_input < upper_threshold else 0
# Calculate the weighted sum for the output layer (Neuron 2)
N4_input = N3 * current_weights_hidden_to_output[0] + bias * current_weights_hidden_to_output[1]
# Apply the threshold activation function for output neuron
N4 = 1 if lower_threshold < N4_input < upper_threshold else 0
# Calculate the error (difference between target and output)
error = target - N4
# Update weights if there's an error
if error != 0:
all_correct = False
# Update the weights for the hidden-to-output connection
current_weights_hidden_to_output[0] += learning_rate * error * N3
current_weights_hidden_to_output[1] += learning_rate * error * bias
# Update the weights for the input-to-hidden connection
current_weights_input_to_hidden += learning_rate * error * N3 * np.array([N1, N2, bias])
# Check if all outputs are correct for this epoch
if all_correct:
network_trained = True
break # Stop if the network has learned the XOR function
# If after 100 iterations it's not working, reset the weights
if epoch % 100 == 0:
print(f"Nicht funktionierende Startgewichte nach {epoch} Iterationen.")
weights_input_to_hidden = np.random.rand(3) * 0.5 # Reset input-to-hidden weights
weights_hidden_to_output = np.random.rand(2) * 0.5 # Reset hidden-to-output weights
# Results
if network_trained:
print(f"Das Netzwerk hat XOR korrekt nach {epoch} Iterationen gelernt.")
else:
print(f"Das Netzwerk hat XOR nach {epoch} Iterationen nicht korrekt gelernt.")
# Testing the trained network
print("\nFinal Test Output:")
for input_vector, target in zip(inputs, targets):
N1, N2 = input_vector
bias = 1
# Calculate the weighted sum for the hidden layer (Neuron 1)
N3_input = np.dot(np.array([N1, N2, bias]), current_weights_input_to_hidden)
N3 = 1 if lower_threshold < N3_input < upper_threshold else 0
# Calculate the weighted sum for the output layer (Neuron 2)
N4_input = N3 * current_weights_hidden_to_output[0] + bias * current_weights_hidden_to_output[1]
N4 = 1 if lower_threshold < N4_input < upper_threshold else 0
print(f"Input: {input_vector}, Target: {target}, Output: {N4}")