fork download
  1. import numpy as np
  2.  
  3. # Parameters
  4. learning_rate = 0.1
  5. max_iterations = 1000
  6. lower_threshold = 0.5 # Relaxed lower threshold
  7. upper_threshold = 1.5 # Relaxed upper threshold
  8.  
  9. # Threshold activation function with two thresholds
  10. def activation_function(weighted_sum):
  11. if lower_threshold < weighted_sum < upper_threshold:
  12. return 1
  13. else:
  14. return 0
  15.  
  16. # Inputs for XOR truth table
  17. inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
  18. targets = [0, 1, 1, 0] # XOR Truth Table
  19.  
  20. # Initialize weights for one hidden neuron and one output neuron
  21. weights_input_to_hidden = np.random.rand(3) # 2 inputs, 1 bias → 1 hidden neuron (3 weights)
  22. weights_hidden_to_output = np.random.rand(2) # 1 hidden neuron + 1 bias → 1 output neuron
  23.  
  24. # Training loop
  25. epoch = 0
  26. network_trained = False
  27.  
  28. while epoch < max_iterations:
  29. epoch += 1
  30. all_correct = True # Flag to track if all outputs are correct
  31.  
  32. for input_vector, target in zip(inputs, targets):
  33. N1, N2 = input_vector
  34. bias = 1 # Bias input
  35.  
  36. # Forward pass (hidden layer)
  37. N3_input = np.dot(np.append(input_vector, bias), weights_input_to_hidden) # Hidden layer sum
  38. N3 = activation_function(N3_input) # Output of the hidden neuron
  39.  
  40. # Output layer
  41. N4_input = (N3 * weights_hidden_to_output[0] + bias * weights_hidden_to_output[1]) # Weighted sum to output
  42. N4 = activation_function(N4_input) # Output neuron decision
  43.  
  44. # Error calculation
  45. error = target - N4
  46.  
  47. # Weight updates if error exists
  48. if error != 0:
  49. all_correct = False
  50.  
  51. # Update weights for hidden to output neuron
  52. weights_hidden_to_output[0] += learning_rate * error * N3
  53. weights_hidden_to_output[1] += learning_rate * error * bias
  54.  
  55. # Update weights for input to hidden neuron
  56. weights_input_to_hidden += learning_rate * error * N3 * np.append(input_vector, bias)
  57.  
  58. if all_correct:
  59. network_trained = True
  60. break
  61.  
  62. # Results
  63. if network_trained:
  64. print(f"The network learned the XOR truth table correctly after {epoch} iterations.")
  65. else:
  66. print(f"The network failed to learn the XOR truth table after {epoch} iterations.")
  67.  
  68. # Testing the trained network
  69. print("\nTesting the trained network:")
  70. for input_vector, target in zip(inputs, targets):
  71. N1, N2 = input_vector
  72. bias = 1
  73.  
  74. # Forward pass (hidden layer)
  75. N3_input = np.dot(np.append(input_vector, bias), weights_input_to_hidden)
  76. N3 = activation_function(N3_input)
  77.  
  78. # Output layer
  79. N4_input = (N3 * weights_hidden_to_output[0] + bias * weights_hidden_to_output[1])
  80. N4 = activation_function(N4_input)
  81.  
  82. print(f"Input: {input_vector}, Target: {target}, Output: {N4}")
  83.  
Success #stdin #stdout 0.26s 28856KB
stdin
Standard input is empty
stdout
The network failed to learn the XOR truth table after 1000 iterations.

Testing the trained network:
Input: [0 0], Target: 0, Output: 0
Input: [0 1], Target: 1, Output: 0
Input: [1 0], Target: 1, Output: 0
Input: [1 1], Target: 0, Output: 0