import numpy as np
# Parameters
learning_rate = 0.1
max_iterations = 1000
lower_threshold = 0.5 # Relaxed lower threshold
upper_threshold = 1.5 # Relaxed upper threshold
# Threshold activation function with two thresholds
def activation_function(weighted_sum):
if lower_threshold < weighted_sum < upper_threshold:
return 1
else:
return 0
# Inputs for XOR truth table
inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
targets = [0, 1, 1, 0] # XOR Truth Table
# Initialize weights for one hidden neuron and one output neuron
weights_input_to_hidden = np.random.rand(3) # 2 inputs, 1 bias → 1 hidden neuron (3 weights)
weights_hidden_to_output = np.random.rand(2) # 1 hidden neuron + 1 bias → 1 output neuron
# Training loop
epoch = 0
network_trained = False
while epoch < max_iterations:
epoch += 1
all_correct = True # Flag to track if all outputs are correct
for input_vector, target in zip(inputs, targets):
N1, N2 = input_vector
bias = 1 # Bias input
# Forward pass (hidden layer)
N3_input = np.dot(np.append(input_vector, bias), weights_input_to_hidden) # Hidden layer sum
N3 = activation_function(N3_input) # Output of the hidden neuron
# Output layer
N4_input = (N3 * weights_hidden_to_output[0] + bias * weights_hidden_to_output[1]) # Weighted sum to output
N4 = activation_function(N4_input) # Output neuron decision
# Error calculation
error = target - N4
# Weight updates if error exists
if error != 0:
all_correct = False
# Update weights for hidden to output neuron
weights_hidden_to_output[0] += learning_rate * error * N3
weights_hidden_to_output[1] += learning_rate * error * bias
# Update weights for input to hidden neuron
weights_input_to_hidden += learning_rate * error * N3 * np.append(input_vector, bias)
if all_correct:
network_trained = True
break
# Results
if network_trained:
print(f"The network learned the XOR truth table correctly after {epoch} iterations.")
else:
print(f"The network failed to learn the XOR truth table after {epoch} iterations.")
# Testing the trained network
print("\nTesting the trained network:")
for input_vector, target in zip(inputs, targets):
N1, N2 = input_vector
bias = 1
# Forward pass (hidden layer)
N3_input = np.dot(np.append(input_vector, bias), weights_input_to_hidden)
N3 = activation_function(N3_input)
# Output layer
N4_input = (N3 * weights_hidden_to_output[0] + bias * weights_hidden_to_output[1])
N4 = activation_function(N4_input)
print(f"Input: {input_vector}, Target: {target}, Output: {N4}")