import numpy as np
# Parameters
learning_rate = 0.1
max_iterations = 1000
lower_threshold = 0.8 # Lower threshold for activation
upper_threshold = 1.2 # Upper threshold for activation
# Threshold activation function with two thresholds
def activation_function(weighted_sum):
if lower_threshold < weighted_sum < upper_threshold:
return 1
else:
return 0
# Generate all 16 possible truth tables for 2 inputs
inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
all_targets = [
[0, 0, 0, 0], # 0
[0, 0, 0, 1], # 1
[0, 0, 1, 0], # 2
[0, 0, 1, 1], # 3
[0, 1, 0, 0], # 4
[0, 1, 0, 1], # 5
[0, 1, 1, 0], # 6
[0, 1, 1, 1], # 7
[1, 0, 0, 0], # 8
[1, 0, 0, 1], # 9
[1, 0, 1, 0], # 10
[1, 0, 1, 1], # 11
[1, 1, 0, 0], # 12
[1, 1, 0, 1], # 13
[1, 1, 1, 0], # 14
[1, 1, 1, 1], # 15
]
# Initialize weights for the neurons
weights_input_to_hidden = np.random.rand(3) # 2 inputs + 1 bias → 1 hidden neuron
weights_hidden_to_output = np.random.rand(2) # 1 hidden neuron + 1 bias → 1 output neuron
# Training loop
network_trained = False
for target in all_targets: # Train for all possible tables
epoch = 0
while epoch < max_iterations:
epoch += 1
all_correct = True # Flag to track if all outputs are correct
for input_vector, expected_target in zip(inputs, target):
N1, N2 = input_vector
bias = 1 # Bias input
# Forward pass (hidden layer)
N3_input = np.dot(np.append(input_vector, bias), weights_input_to_hidden) # Hidden layer sum
N3 = activation_function(N3_input) # Output of the hidden neuron
# Output layer
N4_input = (N3 * weights_hidden_to_output[0] + bias * weights_hidden_to_output[1]) # Weighted sum to output
N4 = activation_function(N4_input) # Output neuron decision
# Error calculation
error = expected_target - N4
# If there is an error, update the weights
if error != 0:
all_correct = False
# Update weights for hidden to output neuron
weights_hidden_to_output[0] += learning_rate * error * N3
weights_hidden_to_output[1] += learning_rate * error * bias
# Update weights for input to hidden neuron
weights_input_to_hidden += learning_rate * error * N3 * np.append(input_vector, bias)
if all_correct:
network_trained = True
break
# Results
if network_trained:
print(f"The network learned the truth tables correctly after {epoch} iterations.")
else:
print(f"The network failed to learn the truth tables after {epoch} iterations.")
# Testing the trained network
print("\nTesting the trained network:")
for target_index, target in enumerate(all_targets):
print(f"Testing Table {target_index}:")
for input_vector, expected_target in zip(inputs, target):
N1, N2 = input_vector
bias = 1
# Forward pass (hidden layer)
N3_input = np.dot(np.append(input_vector, bias), weights_input_to_hidden)
N3 = activation_function(N3_input)
# Output layer
N4_input = (N3 * weights_hidden_to_output[0] + bias * weights_hidden_to_output[1])
N4 = activation_function(N4_input)
print(f"Input: {input_vector}, Target: {expected_target}, Output: {N4}")
aW1wb3J0IG51bXB5IGFzIG5wCgojIFBhcmFtZXRlcnMKbGVhcm5pbmdfcmF0ZSA9IDAuMQptYXhfaXRlcmF0aW9ucyA9IDEwMDAKbG93ZXJfdGhyZXNob2xkID0gMC44ICAjIExvd2VyIHRocmVzaG9sZCBmb3IgYWN0aXZhdGlvbgp1cHBlcl90aHJlc2hvbGQgPSAxLjIgICMgVXBwZXIgdGhyZXNob2xkIGZvciBhY3RpdmF0aW9uCgojIFRocmVzaG9sZCBhY3RpdmF0aW9uIGZ1bmN0aW9uIHdpdGggdHdvIHRocmVzaG9sZHMKZGVmIGFjdGl2YXRpb25fZnVuY3Rpb24od2VpZ2h0ZWRfc3VtKToKICAgIGlmIGxvd2VyX3RocmVzaG9sZCA8IHdlaWdodGVkX3N1bSA8IHVwcGVyX3RocmVzaG9sZDoKICAgICAgICByZXR1cm4gMQogICAgZWxzZToKICAgICAgICByZXR1cm4gMAoKIyBHZW5lcmF0ZSBhbGwgMTYgcG9zc2libGUgdHJ1dGggdGFibGVzIGZvciAyIGlucHV0cwppbnB1dHMgPSBucC5hcnJheShbWzAsIDBdLCBbMCwgMV0sIFsxLCAwXSwgWzEsIDFdXSkKYWxsX3RhcmdldHMgPSBbCiAgICBbMCwgMCwgMCwgMF0sICAjIDAKICAgIFswLCAwLCAwLCAxXSwgICMgMQogICAgWzAsIDAsIDEsIDBdLCAgIyAyCiAgICBbMCwgMCwgMSwgMV0sICAjIDMKICAgIFswLCAxLCAwLCAwXSwgICMgNAogICAgWzAsIDEsIDAsIDFdLCAgIyA1CiAgICBbMCwgMSwgMSwgMF0sICAjIDYKICAgIFswLCAxLCAxLCAxXSwgICMgNwogICAgWzEsIDAsIDAsIDBdLCAgIyA4CiAgICBbMSwgMCwgMCwgMV0sICAjIDkKICAgIFsxLCAwLCAxLCAwXSwgICMgMTAKICAgIFsxLCAwLCAxLCAxXSwgICMgMTEKICAgIFsxLCAxLCAwLCAwXSwgICMgMTIKICAgIFsxLCAxLCAwLCAxXSwgICMgMTMKICAgIFsxLCAxLCAxLCAwXSwgICMgMTQKICAgIFsxLCAxLCAxLCAxXSwgICMgMTUKXQoKIyBJbml0aWFsaXplIHdlaWdodHMgZm9yIHRoZSBuZXVyb25zCndlaWdodHNfaW5wdXRfdG9faGlkZGVuID0gbnAucmFuZG9tLnJhbmQoMykgICMgMiBpbnB1dHMgKyAxIGJpYXMg4oaSIDEgaGlkZGVuIG5ldXJvbgp3ZWlnaHRzX2hpZGRlbl90b19vdXRwdXQgPSBucC5yYW5kb20ucmFuZCgyKSAgIyAxIGhpZGRlbiBuZXVyb24gKyAxIGJpYXMg4oaSIDEgb3V0cHV0IG5ldXJvbgoKIyBUcmFpbmluZyBsb29wCm5ldHdvcmtfdHJhaW5lZCA9IEZhbHNlCmZvciB0YXJnZXQgaW4gYWxsX3RhcmdldHM6ICAjIFRyYWluIGZvciBhbGwgcG9zc2libGUgdGFibGVzCiAgICBlcG9jaCA9IDAKICAgIHdoaWxlIGVwb2NoIDwgbWF4X2l0ZXJhdGlvbnM6CiAgICAgICAgZXBvY2ggKz0gMQogICAgICAgIGFsbF9jb3JyZWN0ID0gVHJ1ZSAgIyBGbGFnIHRvIHRyYWNrIGlmIGFsbCBvdXRwdXRzIGFyZSBjb3JyZWN0CgogICAgICAgIGZvciBpbnB1dF92ZWN0b3IsIGV4cGVjdGVkX3RhcmdldCBpbiB6aXAoaW5wdXRzLCB0YXJnZXQpOgogICAgICAgICAgICBOMSwgTjIgPSBpbnB1dF92ZWN0b3IKICAgICAgICAgICAgYmlhcyA9IDEgICMgQmlhcyBpbnB1dAoKICAgICAgICAgICAgIyBGb3J3YXJkIHBhc3MgKGhpZGRlbiBsYXllcikKICAgICAgICAgICAgTjNfaW5wdXQgPSBucC5kb3QobnAuYXBwZW5kKGlucHV0X3ZlY3RvciwgYmlhcyksIHdlaWdodHNfaW5wdXRfdG9faGlkZGVuKSAgIyBIaWRkZW4gbGF5ZXIgc3VtCiAgICAgICAgICAgIE4zID0gYWN0aXZhdGlvbl9mdW5jdGlvbihOM19pbnB1dCkgICMgT3V0cHV0IG9mIHRoZSBoaWRkZW4gbmV1cm9uCgogICAgICAgICAgICAjIE91dHB1dCBsYXllcgogICAgICAgICAgICBONF9pbnB1dCA9IChOMyAqIHdlaWdodHNfaGlkZGVuX3RvX291dHB1dFswXSArIGJpYXMgKiB3ZWlnaHRzX2hpZGRlbl90b19vdXRwdXRbMV0pICAjIFdlaWdodGVkIHN1bSB0byBvdXRwdXQKICAgICAgICAgICAgTjQgPSBhY3RpdmF0aW9uX2Z1bmN0aW9uKE40X2lucHV0KSAgIyBPdXRwdXQgbmV1cm9uIGRlY2lzaW9uCgogICAgICAgICAgICAjIEVycm9yIGNhbGN1bGF0aW9uCiAgICAgICAgICAgIGVycm9yID0gZXhwZWN0ZWRfdGFyZ2V0IC0gTjQKCiAgICAgICAgICAgICMgSWYgdGhlcmUgaXMgYW4gZXJyb3IsIHVwZGF0ZSB0aGUgd2VpZ2h0cwogICAgICAgICAgICBpZiBlcnJvciAhPSAwOgogICAgICAgICAgICAgICAgYWxsX2NvcnJlY3QgPSBGYWxzZQoKICAgICAgICAgICAgICAgICMgVXBkYXRlIHdlaWdodHMgZm9yIGhpZGRlbiB0byBvdXRwdXQgbmV1cm9uCiAgICAgICAgICAgICAgICB3ZWlnaHRzX2hpZGRlbl90b19vdXRwdXRbMF0gKz0gbGVhcm5pbmdfcmF0ZSAqIGVycm9yICogTjMKICAgICAgICAgICAgICAgIHdlaWdodHNfaGlkZGVuX3RvX291dHB1dFsxXSArPSBsZWFybmluZ19yYXRlICogZXJyb3IgKiBiaWFzCgogICAgICAgICAgICAgICAgIyBVcGRhdGUgd2VpZ2h0cyBmb3IgaW5wdXQgdG8gaGlkZGVuIG5ldXJvbgogICAgICAgICAgICAgICAgd2VpZ2h0c19pbnB1dF90b19oaWRkZW4gKz0gbGVhcm5pbmdfcmF0ZSAqIGVycm9yICogTjMgKiBucC5hcHBlbmQoaW5wdXRfdmVjdG9yLCBiaWFzKQoKICAgICAgICBpZiBhbGxfY29ycmVjdDoKICAgICAgICAgICAgbmV0d29ya190cmFpbmVkID0gVHJ1ZQogICAgICAgICAgICBicmVhawoKIyBSZXN1bHRzCmlmIG5ldHdvcmtfdHJhaW5lZDoKICAgIHByaW50KGYiVGhlIG5ldHdvcmsgbGVhcm5lZCB0aGUgdHJ1dGggdGFibGVzIGNvcnJlY3RseSBhZnRlciB7ZXBvY2h9IGl0ZXJhdGlvbnMuIikKZWxzZToKICAgIHByaW50KGYiVGhlIG5ldHdvcmsgZmFpbGVkIHRvIGxlYXJuIHRoZSB0cnV0aCB0YWJsZXMgYWZ0ZXIge2Vwb2NofSBpdGVyYXRpb25zLiIpCgojIFRlc3RpbmcgdGhlIHRyYWluZWQgbmV0d29yawpwcmludCgiXG5UZXN0aW5nIHRoZSB0cmFpbmVkIG5ldHdvcms6IikKZm9yIHRhcmdldF9pbmRleCwgdGFyZ2V0IGluIGVudW1lcmF0ZShhbGxfdGFyZ2V0cyk6CiAgICBwcmludChmIlRlc3RpbmcgVGFibGUge3RhcmdldF9pbmRleH06IikKICAgIGZvciBpbnB1dF92ZWN0b3IsIGV4cGVjdGVkX3RhcmdldCBpbiB6aXAoaW5wdXRzLCB0YXJnZXQpOgogICAgICAgIE4xLCBOMiA9IGlucHV0X3ZlY3RvcgogICAgICAgIGJpYXMgPSAxCgogICAgICAgICMgRm9yd2FyZCBwYXNzIChoaWRkZW4gbGF5ZXIpCiAgICAgICAgTjNfaW5wdXQgPSBucC5kb3QobnAuYXBwZW5kKGlucHV0X3ZlY3RvciwgYmlhcyksIHdlaWdodHNfaW5wdXRfdG9faGlkZGVuKQogICAgICAgIE4zID0gYWN0aXZhdGlvbl9mdW5jdGlvbihOM19pbnB1dCkKCiAgICAgICAgIyBPdXRwdXQgbGF5ZXIKICAgICAgICBONF9pbnB1dCA9IChOMyAqIHdlaWdodHNfaGlkZGVuX3RvX291dHB1dFswXSArIGJpYXMgKiB3ZWlnaHRzX2hpZGRlbl90b19vdXRwdXRbMV0pCiAgICAgICAgTjQgPSBhY3RpdmF0aW9uX2Z1bmN0aW9uKE40X2lucHV0KQoKICAgICAgICBwcmludChmIklucHV0OiB7aW5wdXRfdmVjdG9yfSwgVGFyZ2V0OiB7ZXhwZWN0ZWRfdGFyZ2V0fSwgT3V0cHV0OiB7TjR9IikK
The network learned the truth tables correctly after 2 iterations.
Testing the trained network:
Testing Table 0:
Input: [0 0], Target: 0, Output: 1
Input: [0 1], Target: 0, Output: 1
Input: [1 0], Target: 0, Output: 1
Input: [1 1], Target: 0, Output: 1
Testing Table 1:
Input: [0 0], Target: 0, Output: 1
Input: [0 1], Target: 0, Output: 1
Input: [1 0], Target: 0, Output: 1
Input: [1 1], Target: 1, Output: 1
Testing Table 2:
Input: [0 0], Target: 0, Output: 1
Input: [0 1], Target: 0, Output: 1
Input: [1 0], Target: 1, Output: 1
Input: [1 1], Target: 0, Output: 1
Testing Table 3:
Input: [0 0], Target: 0, Output: 1
Input: [0 1], Target: 0, Output: 1
Input: [1 0], Target: 1, Output: 1
Input: [1 1], Target: 1, Output: 1
Testing Table 4:
Input: [0 0], Target: 0, Output: 1
Input: [0 1], Target: 1, Output: 1
Input: [1 0], Target: 0, Output: 1
Input: [1 1], Target: 0, Output: 1
Testing Table 5:
Input: [0 0], Target: 0, Output: 1
Input: [0 1], Target: 1, Output: 1
Input: [1 0], Target: 0, Output: 1
Input: [1 1], Target: 1, Output: 1
Testing Table 6:
Input: [0 0], Target: 0, Output: 1
Input: [0 1], Target: 1, Output: 1
Input: [1 0], Target: 1, Output: 1
Input: [1 1], Target: 0, Output: 1
Testing Table 7:
Input: [0 0], Target: 0, Output: 1
Input: [0 1], Target: 1, Output: 1
Input: [1 0], Target: 1, Output: 1
Input: [1 1], Target: 1, Output: 1
Testing Table 8:
Input: [0 0], Target: 1, Output: 1
Input: [0 1], Target: 0, Output: 1
Input: [1 0], Target: 0, Output: 1
Input: [1 1], Target: 0, Output: 1
Testing Table 9:
Input: [0 0], Target: 1, Output: 1
Input: [0 1], Target: 0, Output: 1
Input: [1 0], Target: 0, Output: 1
Input: [1 1], Target: 1, Output: 1
Testing Table 10:
Input: [0 0], Target: 1, Output: 1
Input: [0 1], Target: 0, Output: 1
Input: [1 0], Target: 1, Output: 1
Input: [1 1], Target: 0, Output: 1
Testing Table 11:
Input: [0 0], Target: 1, Output: 1
Input: [0 1], Target: 0, Output: 1
Input: [1 0], Target: 1, Output: 1
Input: [1 1], Target: 1, Output: 1
Testing Table 12:
Input: [0 0], Target: 1, Output: 1
Input: [0 1], Target: 1, Output: 1
Input: [1 0], Target: 0, Output: 1
Input: [1 1], Target: 0, Output: 1
Testing Table 13:
Input: [0 0], Target: 1, Output: 1
Input: [0 1], Target: 1, Output: 1
Input: [1 0], Target: 0, Output: 1
Input: [1 1], Target: 1, Output: 1
Testing Table 14:
Input: [0 0], Target: 1, Output: 1
Input: [0 1], Target: 1, Output: 1
Input: [1 0], Target: 1, Output: 1
Input: [1 1], Target: 0, Output: 1
Testing Table 15:
Input: [0 0], Target: 1, Output: 1
Input: [0 1], Target: 1, Output: 1
Input: [1 0], Target: 1, Output: 1
Input: [1 1], Target: 1, Output: 1