fork download
  1. import numpy as np
  2.  
  3. # Parameters
  4. learning_rate = 0.1
  5. max_iterations = 1000
  6.  
  7. # Activation function with thresholding
  8. def activation_function(weighted_sum, threshold=0.5):
  9. return 1 if weighted_sum > threshold else 0
  10.  
  11. # Inputs for XOR truth table
  12. inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
  13. targets = [0, 1, 1, 0] # XOR Truth Table
  14.  
  15. # Initialize weights for 2 hidden neurons and 1 output neuron
  16. weights_input_to_hidden = np.random.rand(6) # 2 inputs, 1 bias → 2 hidden neurons (3 weights for each hidden)
  17. weights_hidden_to_output = np.random.rand(3) # 2 hidden neurons + bias → 1 output neuron
  18.  
  19. # Training loop
  20. epoch = 0
  21. network_trained = False
  22.  
  23. while epoch < max_iterations:
  24. epoch += 1
  25. all_correct = True # Flag to track if all outputs are correct
  26.  
  27. for input_vector, target in zip(inputs, targets):
  28. N1, N2 = input_vector
  29. bias = 1 # Bias input
  30.  
  31. # Forward pass (for both hidden neurons)
  32. N1_input = np.dot(np.append(input_vector, bias), weights_input_to_hidden[:3])
  33. N1_hidden = activation_function(N1_input) # Output of first hidden neuron
  34.  
  35. N2_input = np.dot(np.append(input_vector, bias), weights_input_to_hidden[3:])
  36. N2_hidden = activation_function(N2_input) # Output of second hidden neuron
  37.  
  38. # Combine hidden neuron outputs and pass through to output
  39. N4_input = (N1_hidden * weights_hidden_to_output[0] +
  40. N2_hidden * weights_hidden_to_output[1] +
  41. bias * weights_hidden_to_output[2]) # Hidden to output
  42. N4 = activation_function(N4_input) # Final output
  43.  
  44. # Error calculation
  45. error = target - N4
  46.  
  47. # Weight updates if error exists
  48. if error != 0:
  49. all_correct = False
  50.  
  51. # Update weights for N1_hidden → N4
  52. weights_hidden_to_output[0] += learning_rate * error * N1_hidden
  53. weights_hidden_to_output[1] += learning_rate * error * N2_hidden
  54. weights_hidden_to_output[2] += learning_rate * error * bias
  55.  
  56. # Update weights for input → hidden neurons
  57. weights_input_to_hidden[:3] += learning_rate * error * N1_hidden * np.append(input_vector, bias)
  58. weights_input_to_hidden[3:] += learning_rate * error * N2_hidden * np.append(input_vector, bias)
  59.  
  60. if all_correct:
  61. network_trained = True
  62. break
  63.  
  64. # Results
  65. if network_trained:
  66. print(f"The network learned the XOR truth table correctly after {epoch} iterations.")
  67. else:
  68. print(f"The network failed to learn the XOR truth table after {epoch} iterations.")
  69.  
  70. # Testing the trained network
  71. print("\nTesting the trained network:")
  72. for input_vector, target in zip(inputs, targets):
  73. N1, N2 = input_vector
  74. bias = 1
  75.  
  76. # Forward pass (for both hidden neurons)
  77. N1_input = np.dot(np.append(input_vector, bias), weights_input_to_hidden[:3])
  78. N1_hidden = activation_function(N1_input)
  79.  
  80. N2_input = np.dot(np.append(input_vector, bias), weights_input_to_hidden[3:])
  81. N2_hidden = activation_function(N2_input)
  82.  
  83. # Combine hidden neuron outputs and pass through to output
  84. N4_input = (N1_hidden * weights_hidden_to_output[0] +
  85. N2_hidden * weights_hidden_to_output[1] +
  86. bias * weights_hidden_to_output[2])
  87. N4 = activation_function(N4_input)
  88.  
  89. print(f"Input: {input_vector}, Target: {target}, Output: {N4}")
  90.  
Success #stdin #stdout 0.39s 28888KB
stdin
Standard input is empty
stdout
The network failed to learn the XOR truth table after 1000 iterations.

Testing the trained network:
Input: [0 0], Target: 0, Output: 0
Input: [0 1], Target: 1, Output: 1
Input: [1 0], Target: 1, Output: 0
Input: [1 1], Target: 0, Output: 0