learn Neural Networks and Deep Deep online free book.

code mainly includes two or three parts:

< span style= "color: #800000 >3"), the code test file

1):

 # data call! /usr/bin/env Python coding: UTF-8 # -*- -*- # @Time: 2017-03-12 @Author: CC @File # 15:11 #: net_load_data.py @Software: PyCharm Community Edition from numpy # import * import numpy as NP import cPickle def (load_data): "load the decompressed data, and read the" with open "('data/mnist_pkl/mnist.pkl','rb') as f: try: train_data, valid Ation_data, test_data = cPickle.load (f) print the file open sucessfully print train_data[0].shape "# # (50000784) # print train_data[1].shape # (50000, return) (train_data, validation_data, test_data) except EOFError: print'the file open error'return None def (data_transform):" the data into the calculation format "t_d, va_d, te_d = load_data (print t_d[0].shape) # # (50000784) te_d[0].shape (10000784) print # # # print va_d[0].shape # (10000784) # N1 = [np.reshape (x, 784,1) for X in t_d[0]] # 50 thousand data respectively into one by one out (784,1), n (x = [np.reshape arranged one by one. (784, 1)) for X in t_d[0]] # 50 thousand data respectively into one by one out (784,1), one by one row Print'n1'n1[0].shape # # column, print'n', n[0].shape M = [vectors (y) for y in t_d[1]] # 50 thousand label (50000,1) to train_data (1050000) = zip (n, m) # package and label data into yuan Group (x, [np.reshape = n (784, 1)) for X in va_d[0]] # 50 thousand data respectively into one by one out (784,1), validation_data = zip (n, order va_d[1]) will not # tag data vector (x, n = [np.reshape (784, 1)) for X in te_d[0]] # 50 thousand data respectively into one by one out (784,1), test_data = array zip (n, te_d[1]) # no label data vector # print train_data[0][0].shape # (784) # print "len (train_data[0]), len (train_data[0]) #2 # print" len (train_data[100]), len (train_data[100]) #2 print len (train_d # Ata[0][0] "), len (train_data[0][0]) #784 # print" train_data[0][0].shape "train_data[0][0].shape, # (784,1) # print" len "(train_data), len (train_data) #50000 print train_data[0][1].shape (10,1) # # # # print test_data[0][1] 7 return (train_data, validation_data, test_data) def vectors (y):" give the label "label = np.zeros ((10,1)) label[y] = 1 return label 

2 # floating-point calculation) network construction of

 /usr/bin/ env Python #! # -*- coding: UTF-8 -*- # @Time: 2017-03-12 @Author: CC @File # 16:07 #: net_network.py import numpy as NP import random class Network (object): # default for the base class? For inheritance: Print (network, object) isinstance def __init__ (self, sizes): self.n Um_layers = len (sizes) self.sizes = sizes # print'num_layers', self.num_layers self.weight = [np.random.randn (A1, A2) for (A1, A2) in zip (sizes[1:], sizes[, -1])] # produce a array of self.bias = [np.random.randn (a3,1) for A3 in sizes[1:]] print self.weight[0].shape # # (20,10 def SGD (self) train_data, min_batch_size, epoches, ETA, test_data=False), "1) disrupted samples, the training data is divided into 2 batches) to calculate the backpropagation gradient 3) gain weight update" if test_data: n_test = len (test_data) n = len (train_data) #50000 random.shuffle (train_data) # upset min_batches for = [train_data[k:k+min_batch_size] K in xrange (0, N, min_batch_size)] # extraction batch data for K In xrange (0, epoches): # with updated weights continue to update the random.shuffle (train_data) for min_batch in min_batches: # # disrupted by introduction of low efficiency of self.updata_parameter (min_batch, ETA) if test_data: num = self.evaluate (test_data) print the {0}th epoches: {1}/{2}.Format (k, num, len (test_data) else: print'epoches {0}) completed'.format (k) def forward (self, x): "the layer value" for W, B in zip (self.weight, self.bias): x = sigmoid (np.dot (W, x) +b) return x def updata_parameter (self, min_batch, ETA). "1) backpropagation gradient values calculated for each sample 2) cumulative gradient each batch sample values 


This concludes the body part