import numpy as np
# Parameters
learning_rate = 0.1
max_iterations = 1000
# Threshold Activation Function
def activation_function(weighted_sum, threshold=0.5):
return 1 if weighted_sum > threshold else 0
# Inputs for XOR truth table
inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
targets = [0, 1, 1, 0] # XOR Truth Table
# Initialize weights for one hidden neuron and one output neuron
weights_input_to_hidden = np.random.rand(3) # 2 inputs, 1 bias → 1 hidden neuron (3 weights)
weights_hidden_to_output = np.random.rand(2) # 1 hidden neuron + 1 bias → 1 output neuron
# Training loop
epoch = 0
network_trained = False
while epoch < max_iterations:
epoch += 1
all_correct = True # Flag to track if all outputs are correct
for input_vector, target in zip(inputs, targets):
N1, N2 = input_vector
bias = 1 # Bias input
# Forward pass (hidden layer)
N3_input = np.dot(np.append(input_vector, bias), weights_input_to_hidden) # Hidden layer sum
N3 = activation_function(N3_input) # Output of the hidden neuron
# Output layer
N4_input = (N3 * weights_hidden_to_output[0] + bias * weights_hidden_to_output[1]) # Weighted sum to output
N4 = activation_function(N4_input) # Output neuron decision
# Error calculation
error = target - N4
# Weight updates if error exists
if error != 0:
all_correct = False
# Update weights for hidden to output neuron
weights_hidden_to_output[0] += learning_rate * error * N3
weights_hidden_to_output[1] += learning_rate * error * bias
# Update weights for input to hidden neuron
weights_input_to_hidden += learning_rate * error * N3 * np.append(input_vector, bias)
if all_correct:
network_trained = True
break
# Results
if network_trained:
print(f"The network learned the XOR truth table correctly after {epoch} iterations.")
else:
print(f"The network failed to learn the XOR truth table after {epoch} iterations.")
# Testing the trained network
print("\nTesting the trained network:")
for input_vector, target in zip(inputs, targets):
N1, N2 = input_vector
bias = 1
# Forward pass (hidden layer)
N3_input = np.dot(np.append(input_vector, bias), weights_input_to_hidden)
N3 = activation_function(N3_input)
# Output layer
N4_input = (N3 * weights_hidden_to_output[0] + bias * weights_hidden_to_output[1])
N4 = activation_function(N4_input)
print(f"Input: {input_vector}, Target: {target}, Output: {N4}")
aW1wb3J0IG51bXB5IGFzIG5wCgojIFBhcmFtZXRlcnMKbGVhcm5pbmdfcmF0ZSA9IDAuMQptYXhfaXRlcmF0aW9ucyA9IDEwMDAKCiMgVGhyZXNob2xkIEFjdGl2YXRpb24gRnVuY3Rpb24KZGVmIGFjdGl2YXRpb25fZnVuY3Rpb24od2VpZ2h0ZWRfc3VtLCB0aHJlc2hvbGQ9MC41KToKICAgIHJldHVybiAxIGlmIHdlaWdodGVkX3N1bSA+IHRocmVzaG9sZCBlbHNlIDAKCiMgSW5wdXRzIGZvciBYT1IgdHJ1dGggdGFibGUKaW5wdXRzID0gbnAuYXJyYXkoW1swLCAwXSwgWzAsIDFdLCBbMSwgMF0sIFsxLCAxXV0pCnRhcmdldHMgPSBbMCwgMSwgMSwgMF0gICMgWE9SIFRydXRoIFRhYmxlCgojIEluaXRpYWxpemUgd2VpZ2h0cyBmb3Igb25lIGhpZGRlbiBuZXVyb24gYW5kIG9uZSBvdXRwdXQgbmV1cm9uCndlaWdodHNfaW5wdXRfdG9faGlkZGVuID0gbnAucmFuZG9tLnJhbmQoMykgICMgMiBpbnB1dHMsIDEgYmlhcyDihpIgMSBoaWRkZW4gbmV1cm9uICgzIHdlaWdodHMpCndlaWdodHNfaGlkZGVuX3RvX291dHB1dCA9IG5wLnJhbmRvbS5yYW5kKDIpICAjIDEgaGlkZGVuIG5ldXJvbiArIDEgYmlhcyDihpIgMSBvdXRwdXQgbmV1cm9uCgojIFRyYWluaW5nIGxvb3AKZXBvY2ggPSAwCm5ldHdvcmtfdHJhaW5lZCA9IEZhbHNlCgp3aGlsZSBlcG9jaCA8IG1heF9pdGVyYXRpb25zOgogICAgZXBvY2ggKz0gMQogICAgYWxsX2NvcnJlY3QgPSBUcnVlICAjIEZsYWcgdG8gdHJhY2sgaWYgYWxsIG91dHB1dHMgYXJlIGNvcnJlY3QKCiAgICBmb3IgaW5wdXRfdmVjdG9yLCB0YXJnZXQgaW4gemlwKGlucHV0cywgdGFyZ2V0cyk6CiAgICAgICAgTjEsIE4yID0gaW5wdXRfdmVjdG9yCiAgICAgICAgYmlhcyA9IDEgICMgQmlhcyBpbnB1dAoKICAgICAgICAjIEZvcndhcmQgcGFzcyAoaGlkZGVuIGxheWVyKQogICAgICAgIE4zX2lucHV0ID0gbnAuZG90KG5wLmFwcGVuZChpbnB1dF92ZWN0b3IsIGJpYXMpLCB3ZWlnaHRzX2lucHV0X3RvX2hpZGRlbikgICMgSGlkZGVuIGxheWVyIHN1bQogICAgICAgIE4zID0gYWN0aXZhdGlvbl9mdW5jdGlvbihOM19pbnB1dCkgICMgT3V0cHV0IG9mIHRoZSBoaWRkZW4gbmV1cm9uCgogICAgICAgICMgT3V0cHV0IGxheWVyCiAgICAgICAgTjRfaW5wdXQgPSAoTjMgKiB3ZWlnaHRzX2hpZGRlbl90b19vdXRwdXRbMF0gKyBiaWFzICogd2VpZ2h0c19oaWRkZW5fdG9fb3V0cHV0WzFdKSAgIyBXZWlnaHRlZCBzdW0gdG8gb3V0cHV0CiAgICAgICAgTjQgPSBhY3RpdmF0aW9uX2Z1bmN0aW9uKE40X2lucHV0KSAgIyBPdXRwdXQgbmV1cm9uIGRlY2lzaW9uCgogICAgICAgICMgRXJyb3IgY2FsY3VsYXRpb24KICAgICAgICBlcnJvciA9IHRhcmdldCAtIE40CgogICAgICAgICMgV2VpZ2h0IHVwZGF0ZXMgaWYgZXJyb3IgZXhpc3RzCiAgICAgICAgaWYgZXJyb3IgIT0gMDoKICAgICAgICAgICAgYWxsX2NvcnJlY3QgPSBGYWxzZQoKICAgICAgICAgICAgIyBVcGRhdGUgd2VpZ2h0cyBmb3IgaGlkZGVuIHRvIG91dHB1dCBuZXVyb24KICAgICAgICAgICAgd2VpZ2h0c19oaWRkZW5fdG9fb3V0cHV0WzBdICs9IGxlYXJuaW5nX3JhdGUgKiBlcnJvciAqIE4zCiAgICAgICAgICAgIHdlaWdodHNfaGlkZGVuX3RvX291dHB1dFsxXSArPSBsZWFybmluZ19yYXRlICogZXJyb3IgKiBiaWFzCgogICAgICAgICAgICAjIFVwZGF0ZSB3ZWlnaHRzIGZvciBpbnB1dCB0byBoaWRkZW4gbmV1cm9uCiAgICAgICAgICAgIHdlaWdodHNfaW5wdXRfdG9faGlkZGVuICs9IGxlYXJuaW5nX3JhdGUgKiBlcnJvciAqIE4zICogbnAuYXBwZW5kKGlucHV0X3ZlY3RvciwgYmlhcykKCiAgICBpZiBhbGxfY29ycmVjdDoKICAgICAgICBuZXR3b3JrX3RyYWluZWQgPSBUcnVlCiAgICAgICAgYnJlYWsKCiMgUmVzdWx0cwppZiBuZXR3b3JrX3RyYWluZWQ6CiAgICBwcmludChmIlRoZSBuZXR3b3JrIGxlYXJuZWQgdGhlIFhPUiB0cnV0aCB0YWJsZSBjb3JyZWN0bHkgYWZ0ZXIge2Vwb2NofSBpdGVyYXRpb25zLiIpCmVsc2U6CiAgICBwcmludChmIlRoZSBuZXR3b3JrIGZhaWxlZCB0byBsZWFybiB0aGUgWE9SIHRydXRoIHRhYmxlIGFmdGVyIHtlcG9jaH0gaXRlcmF0aW9ucy4iKQoKIyBUZXN0aW5nIHRoZSB0cmFpbmVkIG5ldHdvcmsKcHJpbnQoIlxuVGVzdGluZyB0aGUgdHJhaW5lZCBuZXR3b3JrOiIpCmZvciBpbnB1dF92ZWN0b3IsIHRhcmdldCBpbiB6aXAoaW5wdXRzLCB0YXJnZXRzKToKICAgIE4xLCBOMiA9IGlucHV0X3ZlY3RvcgogICAgYmlhcyA9IDEKCiAgICAjIEZvcndhcmQgcGFzcyAoaGlkZGVuIGxheWVyKQogICAgTjNfaW5wdXQgPSBucC5kb3QobnAuYXBwZW5kKGlucHV0X3ZlY3RvciwgYmlhcyksIHdlaWdodHNfaW5wdXRfdG9faGlkZGVuKQogICAgTjMgPSBhY3RpdmF0aW9uX2Z1bmN0aW9uKE4zX2lucHV0KQoKICAgICMgT3V0cHV0IGxheWVyCiAgICBONF9pbnB1dCA9IChOMyAqIHdlaWdodHNfaGlkZGVuX3RvX291dHB1dFswXSArIGJpYXMgKiB3ZWlnaHRzX2hpZGRlbl90b19vdXRwdXRbMV0pCiAgICBONCA9IGFjdGl2YXRpb25fZnVuY3Rpb24oTjRfaW5wdXQpCgogICAgcHJpbnQoZiJJbnB1dDoge2lucHV0X3ZlY3Rvcn0sIFRhcmdldDoge3RhcmdldH0sIE91dHB1dDoge040fSIpCg==
The network failed to learn the XOR truth table after 1000 iterations.
Testing the trained network:
Input: [0 0], Target: 0, Output: 0
Input: [0 1], Target: 1, Output: 1
Input: [1 0], Target: 1, Output: 0
Input: [1 1], Target: 0, Output: 0