The realization of a simple

python neural network algorithm, for your reference, the specific contents are as follows

including

 the input layer and the output layer of import numpy as NP #sigmoid function def nonlin (x, deriv = False): if (deriv = = True x* (1-x): return return (1+np.exp) 1/ (-x) #input dataset) x = np.array ([[0,0,1], [0,1,1], [1,0,1], [1,1,1]]) #output dataset y = np.array ([[0,0,1,1]]).T np.random.seed (1) #init weight value syn0 = 2*np.random.random ((3,1)) -1 for ITER in xrange (100000): l0 = x #the first layer, and the input layer L1 = nonlin (np.dot (L0, syn0)) #the second layer, and the output layer L 1_error = y-l1 l1_delta = l1_error*nonlin (L1, True) syn0 = np.dot (l0.T, l1_delta) print "outout after Training:" print L1 
 import numpy as NP #sigmoid function def nonlin (x, deriv = False): if (deriv = = True): return x* (1-x) return 1/ (1+np.exp (-x) dataset x np.array (#input) = [[0,0,1], [0,1,1], [1,0,1], [1,1,1]]) #output dataset y = np.array ([[0,0,1,1]]).T np.random.seed (1) #init weight value syn0 = 2*np.random.random ((3,1)) -1 for ITER in xrange (100000): l0 = x #the first layer, and the input layer L1 (np.dot = nonlin (L0, syn0)) #the second layer, and the output layer l1_error = y-l1 l1_delta = l1_error*nonlin (L1, True) syn0 = np.dot (l0.T, l1_delta) print "outout after Training:" print L1 

l0:

here, input layer and output layer of

l1:

syn0:

l1_error:

l1_delta: the initial weight error error correction coefficient

func nonlin:sigmoid

"

2018210123647" visible iterations the prediction results more close to the ideal value, then the longer time.

python

three layer neural network including input layer, hidden layer and output layer of

 import numpy as NP def nonlin (x, deriv = False): if (deriv = = True): return x* (1-x) else: return 1/ (1+np.exp #input dataset X (-x)) = np.array ([[0,0,1], [0,1,1], [1,0,1], [1,1,1]]) #output dataset y ([[0,1,1,0]]) = np.array.T = syn0 2*np.random.random ((3,4)) - 1 #the first-hidden layer weight value syn1 = 2* np.random.random ((4,1)) - 1 #the hidden-output layer weight value for J in range (60000): l0 = X #the first layer, and the input layer L1 = nonlin (np.dot (L0, syn0)) #the second layer, and the hidden layer (L2 = nonlin Np.dot (L1, syn1)) #the third layer, and the output layer l2_error y-l2 #the hidden-output layer error = if (j%10000) = 0: print "Error:" +str (np.mean (l2_error)) l2_delta (L2 = l2_error*nonlin, deriv = True) l1_error = l2_delta.dot (syn1.T) #the first-hidden layer error l1_delta (L1 = l1_error*nonlin deriv = syn1, True) = l1.T.dot (l2_delta) syn0 = l0.T.dot (l1_delta) print "outout after Training:" print L2 
 import numpy as NP def nonlin (x, deriv = False): if (deriv = = True): return x* (1-x) else: return 1/ (1+np.exp (-x)) #input dataset X = np.array ([[0,0,1], [0,1,1], [1,0,1], [1,1,1]]) #output dataset y ([[0,1,1,0]]) = np.array.T = syn0 2*np.random.random ((3,4)) - 1 #the first-hidden layer weight value syn1 = 2*np.random.random ((4,1)) - 1 #the hidden-output layer weight value for J in range (60000): l0 = X #the first layer, and the input layer L1 = nonlin (np.dot (L0, syn0) #the second layer, and the) hidden layer L2 = nonlin (np.dot (L1, syn1)) #the third layer, and the output layer l2_error y-l2 #the hidden-output layer error = if (j%10000) = 0: print "Error:" +str (np.mean (l2_error)) l2_delta (L2 = l2_error*nonlin, deriv = True) l1_error = l2_delta.dot (syn1.T) #the first-hidden layer error l1_delta = l1_error*nonlin (L1, deriv = True) syn1 = l1.T.dot (l2_delta) syn0 = l0.T.dot (l1_delta) print outout after Traini " Ng: "print L2 

above is all the content of this article, I hope to help everyone's learning, and also hope that a lot of people support the script home.

This paper fixed link:http://www.script-home.com/python-implementation-of-simple-neural-network-algorithm.html | Script Home | +Copy Link

Article reprint please specify:Python implementation of simple neural network algorithm | Script Home

You may also be interested in these articles!