CS231n的课后作业非常的好,这里记录一下自己对作业一些笔记。

一、第一个是KNN的代码,这里的trick是计算距离的三种方法,核心的话还是python和machine learning中非常实用的向量化操作,可以大大的提高计算速度。

  1. import numpy as np
  2.  
  3. class KNearestNeighbor(object):#首先是定义一个处理KNN的类
  4. """ a kNN classifier with L2 distance """
  5.  
  6. def __init__(self):
  7. pass
  8.  
  9. def train(self, X, y):
  10. """
  11. Train the classifier. For k-nearest neighbors this is just
  12. memorizing the training data.
  13.  
  14. Inputs:
  15. - X: A numpy array of shape (num_train, D) containing the training data
  16. consisting of num_train samples each of dimension D.
  17. - y: A numpy array of shape (N,) containing the training labels, where
  18. y[i] is the label for X[i].
  19. """
  20. self.X_train = X
  21. self.y_train = y
  22.  
  23. def predict(self, X, k=1, num_loops=0):
  24. """
  25. Predict labels for test data using this classifier.
  26.  
  27. Inputs:
  28. - X: A numpy array of shape (num_test, D) containing test data consisting
  29. of num_test samples each of dimension D.
  30. - k: The number of nearest neighbors that vote for the predicted labels.
  31. - num_loops: Determines which implementation to use to compute distances
  32. between training points and testing points.
  33.  
  34. Returns:
  35. - y: A numpy array of shape (num_test,) containing predicted labels for the
  36. test data, where y[i] is the predicted label for the test point X[i].
  37. """
  38. if num_loops == 0:
  39. dists = self.compute_distances_no_loops(X)
  40. elif num_loops == 1:
  41. dists = self.compute_distances_one_loop(X)
  42. elif num_loops == 2:
  43. dists = self.compute_distances_two_loops(X)
  44. else:
  45. raise ValueError('Invalid value %d for num_loops' % num_loops)
  46.  
  47. return self.predict_labels(dists, k=k)
  48.  
  49. def compute_distances_two_loops(self, X):
  50. """
  51. Compute the distance between each test point in X and each training point
  52. in self.X_train using a nested loop over both the training data and the
  53. test data.
  54.  
  55. Inputs:
  56. - X: A numpy array of shape (num_test, D) containing test data.
  57.  
  58. Returns:
  59. - dists: A numpy array of shape (num_test, num_train) where dists[i, j]
  60. is the Euclidean distance between the ith test point and the jth training
  61. point.
  62. """
  63. num_test = X.shape[0]
  64. num_train = self.X_train.shape[0]
  65. dists = np.zeros((num_test, num_train))
  66. for i in xrange(num_test):
  67. for j in xrange(num_train):
  68. dists[i][j] = np.sqrt(np.sum(np.square(self.X_train[j,:] - X[i,:])))
  69. #####################################################################
  70. # TODO: #
  71. # Compute the l2 distance between the ith test point and the jth #
  72. # training point, and store the result in dists[i, j]. You should #
  73. # not use a loop over dimension. #
  74. #####################################################################
  75. #####################################################################
  76. # END OF YOUR CODE #
  77. #####################################################################
  78. return dists
  79.  
  80. def compute_distances_one_loop(self, X):
  81. """
  82. Compute the distance between each test point in X and each training point
  83. in self.X_train using a single loop over the test data.
  84.  
  85. Input / Output: Same as compute_distances_two_loops
  86. """
  87. num_test = X.shape[0]
  88. num_train = self.X_train.shape[0]
  89. dists = np.zeros((num_test, num_train))
  90. for i in xrange(num_test):
  91. #######################################################################
  92. # TODO: #
  93. # Compute the l2 distance between the ith test point and all training #
  94. # points, and store the result in dists[i, :]. #
  95. #######################################################################
  96. dists[i,:] = np.sqrt(np.sum(np.square(self.X_train-X[i,:]),axis = 1))
  97. #######################################################################
  98. # END OF YOUR CODE #
  99. #######################################################################
  100. return dists
  101.  
  102. def compute_distances_no_loops(self, X):
  103. """
  104. Compute the distance between each test point in X and each training point
  105. in self.X_train using no explicit loops.
  106.  
  107. Input / Output: Same as compute_distances_two_loops
  108. """
  109. num_test = X.shape[0]
  110. num_train = self.X_train.shape[0]
  111. dists = np.zeros((num_test, num_train))
  112. #########################################################################
  113. # TODO: #
  114. # Compute the l2 distance between all test points and all training #
  115. # points without using any explicit loops, and store the result in #
  116. # dists. #
  117. # #
  118. # You should implement this function using only basic array operations; #
  119. # in particular you should not use functions from scipy. #
  120. # #
  121. # HINT: Try to formulate the l2 distance using matrix multiplication #
  122. # and two broadcast sums. #
  123. #########################################################################
  124. dists = np.multiply(np.dot(X,self.X_train.T),-2)
  125. sq1 = np.sum(np.square(X),axis=1,keepdims = True)
  126. sq2 = np.sum(np.square(self.X_train),axis=1)
  127. dists = np.add(dists,sq1)
  128. dists = np.add(dists,sq2)
  129. dists = np.sqrt(dists)
  130. #########################################################################
  131. # END OF YOUR CODE #
  132. #########################################################################
  133. return dists
  134.  
  135. def predict_labels(self, dists, k=1):
  136. """
  137. Given a matrix of distances between test points and training points,
  138. predict a label for each test point.
  139.  
  140. Inputs:
  141. - dists: A numpy array of shape (num_test, num_train) where dists[i, j]
  142. gives the distance betwen the ith test point and the jth training point.
  143.  
  144. Returns:
  145. - y: A numpy array of shape (num_test,) containing predicted labels for the
  146. test data, where y[i] is the predicted label for the test point X[i].
  147. """
  148. num_test = dists.shape[0]
  149. y_pred = np.zeros(num_test)
  150. for i in xrange(num_test):
  151. # A list of length k storing the labels of the k nearest neighbors to
  152. # the ith test point.
  153. closest_y = []
  154. #########################################################################
  155. # TODO: #
  156. # Use the distance matrix to find the k nearest neighbors of the ith #
  157. # training point, and use self.y_train to find the labels of these #
  158. # neighbors. Store these labels in closest_y. #
  159. # Hint: Look up the function numpy.argsort. #
  160. #########################################################################
  161. closest_y = self.y_train[np.argsort(dists[i,:])[:k]]
  162.  
  163. #########################################################################
  164. # TODO: #
  165. # Now that you have found the labels of the k nearest neighbors, you #
  166. # need to find the most common label in the list closest_y of labels. #
  167. # Store this label in y_pred[i]. Break ties by choosing the smaller #
  168. # label. #
  169. #########################################################################
  170. y_pred[i] = np.argmax(np.bincount(closest_y)) #########################################################################
  171. # END OF YOUR CODE #
  172. #########################################################################
  173.  
  174. return y_pred

二、softmax

同样是需要完成naive和vector的两种操作来比较速度。

  1. import numpy as np
  2.  
  3. def softmax_loss_naive(W, X, y, reg):
  4.  
  5. # Initialize the loss and gradient to zero.
  6. loss = 0.0
  7. dW = np.zeros_like(W) # 得到一个和W同样shape的矩阵
  8. dW_each = np.zeros_like(W)
  9. num_train, dim = X.shape
  10. num_class = W.shape[1]
  11. f = X.dot(W) # N by C
  12. # Considering the Numeric Stability
  13. f_max = np.reshape(np.max(f, axis=1), (num_train, 1)) # 找到最大值然后减去,这样是为了防止后面的操作会出现数值上的一些偏差
  14. prob = np.exp(f - f_max) / np.sum(np.exp(f - f_max), axis=1, keepdims=True) # N by C
  15. y_trueClass = np.zeros_like(prob)
  16. y_trueClass[np.arange(num_train), y] = 1.0
  17. for i in xrange(num_train):
  18. for j in xrange(num_class):
  19. loss += -(y_trueClass[i, j] * np.log(prob[i, j])) # 损失函数的公式L = -(1/N)∑i∑j1(k=yi)log(exp(fk)/∑j exp(fj)) + λR(W)
  20. dW_each[:, j] = -(y_trueClass[i, j] - prob[i, j]) * X[i, :]#梯度的公式 Wk L = -(1/N)∑i xiT(pi,m-Pm) + 2λWk, where Pk = exp(fk)/∑j exp(fj
  21. dW += dW_each                  #这是把每个类的放在了一起
  22. loss /= num_train
  23. loss += 0.5 * reg * np.sum(W * W) # 加上正则
  24. dW /= num_traindW += reg * W
  25.  
  26. return loss, dW
  27.  
  28. def softmax_loss_vectorized(W, X, y, reg):
  29. """
  30. Softmax loss function, vectorized version.
  31.  
  32. Inputs and outputs are the same as softmax_loss_naive.
  33. """
  34. # Initialize the loss and gradient to zero.
  35. loss = 0.0
  36. dW = np.zeros_like(W) # D by C
  37. num_train, dim = X.shape
  38.  
  39. f = X.dot(W) # N by C
  40. # Considering the Numeric Stability
  41. f_max = np.reshape(np.max(f, axis=1), (num_train, 1)) # N by 1
  42. prob = np.exp(f - f_max) / np.sum(np.exp(f - f_max), axis=1), keepdims=True)
  43. y_trueClass = np.zeros_like(prob)
  44. y_trueClass[range(num_train), y] = 1.0 # N by C
  45. loss += -np.sum(y_trueClass * np.log(prob)) / num_train + 0.5 * reg * np.sum(W * W)#向量化直接操作即可
  46. dW += -np.dot(X.T, y_trueClass - prob) / num_train + reg * W
  47.  
  48. return loss, dW

三、SVM

  1. import numpy as np
  2. def svm_loss_naive(W, X, y, reg):
  3. """
  4. Inputs:
  5. - W: A numpy array of shape (D, C) containing weights.
  6. - X: A numpy array of shape (N, D) containing a minibatch of data.
  7. - y: A numpy array of shape (N,) containing training labels; y[i] = c means
  8. that X[i] has label c, where 0 <= c < C.
  9. - reg: (float) regularization strength
  10.  
  11. Returns a tuple of:
  12. - loss as single float
  13. - gradient with respect to weights W; an array of same shape as W
  14. """
  15. dW = np.zeros(W.shape) # initialize the gradient as zero
  16. # compute the loss and the gradient
  17. num_classes = W.shape[1]
  18. num_train = X.shape[0]
  19. loss = 0.0
  20. for i in xrange(num_train):
  21. scores = X[i].dot(W)
  22. correct_class_score = scores[y[i]]
  23. for j in xrange(num_classes):
  24. if j == y[i]: #根据公式,正确的那个不用算
  25. continue
  26. margin = scores[j] - correct_class_score + 1 # note delta = 1
  27. if margin > 0:
  28. loss += margin
  29. dW[:, y[i]] += -X[i, :] #  根据公式:∇Wyi Li = - xiT(∑j≠yi1(xiWj - xiWyi +1>0)) + 2λWyi 
  30. dW[:, j] += X[i, :] # 根据公式: ∇Wj Li = xiT 1(xiWj - xiWyi +1>0) + 2λWj , (j≠yi)
  31. # Right now the loss is a sum over all training examples, but we want it
  32. # to be an average instead so we divide by num_train.
  33. loss /= num_train
  34. dW /= num_train
  35. # Add regularization to the loss.
  36. loss += 0.5 * reg * np.sum(W * W)
  37. dW += reg * W
  38. return loss, dW
  39.  
  40. def svm_loss_vectorized(W, X, y, reg):
  41. """
  42. Structured SVM loss function, vectorized implementation.Inputs and outputs
  43. are the same as svm_loss_naive.
  44. """
  45. loss = 0.0
  46. dW = np.zeros(W.shape) # initialize the gradient as zero
  47. scores = X.dot(W) # N by C
  48. num_train = X.shape[0]
  49. num_classes = W.shape[1]
  50. scores_correct = scores[np.arange(num_train), y] # 1 by N
  51. scores_correct = np.reshape(scores_correct, (num_train, 1)) # N by 1
  52. margins = scores - scores_correct + 1.0 # N by C
  53. margins[np.arange(num_train), y] = 0.0
  54. margins[margins <= 0] = 0.0
  55. loss += np.sum(margins) / num_train
  56. loss += 0.5 * reg * np.sum(W * W)
  57. # compute the gradient
  58. margins[margins > 0] = 1.0
  59. row_sum = np.sum(margins, axis=1) # 1 by N
  60. margins[np.arange(num_train), y] = -row_sum
  61. dW += np.dot(X.T, margins)/num_train + reg * W # D by C
  62.  
  63. return loss, dW

四、linear_classifier

  从编程思路上来看,上面三个是不同的策略,确切的说是线性分类器的集中方法,所以我们用一个LinearClassifier类来调用他们。

  1. from linear_svm import *
  2. from softmax import *
  3.  
  4. class LinearClassifier(object):
  5.  
  6. def __init__(self):
  7. self.W = None
  8.  
  9. def train(self, X, y, learning_rate=1e-3, reg=1e-5, num_iters=100,
  10. batch_size=200, verbose=True): #注意这里传递的参数设置
  11. """
  12. Train this linear classifier using stochastic gradient descent.
  13.  
  14. Inputs:
  15. - X: A numpy array of shape (N, D) containing training data; there are N
  16. training samples each of dimension D.
  17. - y: A numpy array of shape (N,) containing training labels; y[i] = c
  18. means that X[i] has label 0 <= c < C for C classes.
  19. - learning_rate: (float) learning rate for optimization.
  20. - reg: (float) regularization strength.
  21. - num_iters: (integer) number of steps to take when optimizing
  22. - batch_size: (integer) number of training examples to use at each step.
  23. - verbose: (boolean) If true, print progress during optimization.
  24.  
  25. Outputs:
  26. A list containing the value of the loss function at each training iteration.
  27. """
  28. num_train, dim = X.shape
  29. # assume y takes values 0...K-1 where K is number of classes
  30. num_classes = np.max(y) + 1
  31. if self.W is None:
  32. # lazily initialize W
  33. self.W = 0.001 * np.random.randn(dim, num_classes) # 初始化W
  34.  
  35. # Run stochastic gradient descent(Mini-Batch) to optimize W
  36. loss_history = []
  37. for it in xrange(num_iters): #每次随机取batch的数据来进行梯度下降
  38. X_batch = None
  39. y_batch = None
  40. # Sampling with replacement is faster than sampling without replacement.
  41. sample_index = np.random.choice(num_train, batch_size, replace=False)
  42. X_batch = X[sample_index, :] # batch_size by D
  43. y_batch = y[sample_index] # 1 by batch_size
  44. # evaluate loss and gradient
  45. loss, grad = self.loss(X_batch, y_batch, reg)
  46. loss_history.append(loss)
  47.  
  48. # perform parameter update
  49. self.W += -learning_rate * grad
  50. if verbose and it % 100 == 0:
  51. print 'Iteration %d / %d: loss %f' % (it, num_iters, loss)
  52.  
  53. return loss_history
  54.  
  55. def predict(self, X):
  56. """
  57. Use the trained weights of this linear classifier to predict labels for
  58. data points.
  59.  
  60. Inputs:
  61. - X: D x N array of training data. Each column is a D-dimensional point.
  62.  
  63. Returns:
  64. - y_pred: Predicted labels for the data in X. y_pred is a 1-dimensional
  65. array of length N, and each element is an integer giving the
  66. predicted class.
  67. """
  68. y_pred = np.zeros(X.shape[1]) # 1 by N
  69. X=X.T
  70. y_pred = np.argmax(X.dot(self.W), axis=0) #预测直接找到最后y最大的那个值
  71.  
  72. return y_pred
  73.  
  74. def loss(self, X_batch, y_batch, reg):
  75. """
  76. Compute the loss function and its derivative.
  77. Subclasses will override this.
  78.  
  79. Inputs:
  80. - X_batch: A numpy array of shape (N, D) containing a minibatch of N
  81. data points; each point has dimension D.
  82. - y_batch: A numpy array of shape (N,) containing labels for the minibatch.
  83. - reg: (float) regularization strength.
  84.  
  85. Returns: A tuple containing:
  86. - loss as a single float
  87. - gradient with respect to self.W; an array of the same shape as W
  88. """
  89. pass
  90.  
  91. class LinearSVM(LinearClassifier):
  92. """
  93. A subclass that uses the Multiclass SVM loss function
  94. """
  95. def loss(self, X_batch, y_batch, reg):
  96. return svm_loss_vectorized(self.W, X_batch, y_batch, reg)
  97.  
  98. class Softmax(LinearClassifier):
  99. """
  100. A subclass that uses the Softmax + Cross-entropy loss function
  101. """
  102. def loss(self, X_batch, y_batch, reg):
  103. return softmax_loss_vectorized(self.W, X_batch, y_batch, reg)

五、简单的两层神经网络

这里只是一个简单的神经网络的写法,在下次作业会有一个很好很强大的神经网络等我们去构造。

BP可以看这幅图来理解:

  1. import numpy as np
  2. import matplotlib.pyplot as plt
  3.  
  4. class TwoLayerNet(object):
  5. """
  6. A two-layer fully-connected neural network. The net has an input dimension of
  7. N, a hidden layer dimension of H, and performs classification over C classes.
  8. We train the network with a softmax loss function and L2 regularization on the
  9. weight matrices. The network uses a ReLU nonlinearity after the first fully
  10. connected layer.
  11.  
  12. In other words, the network has the following architecture:
  13.  
  14. input - fully connected layer - ReLU - fully connected layer - softmax
  15.  
  16. The outputs of the second fully-connected layer are the scores for each class.
  17. """
  18. def __init__(self, input_size, hidden_size, output_size, std=1e-4):
  19. """
  20. Initialize the model. Weights are initialized to small random values and
  21. biases are initialized to zero. Weights and biases are stored in the
  22. variable self.params, which is a dictionary with the following keys:
  23.  
  24. W1: First layer weights; has shape (D, H)
  25. b1: First layer biases; has shape (H,)
  26. W2: Second layer weights; has shape (H, C)
  27. b2: Second layer biases; has shape (C,)
  28.  
  29. Inputs:
  30. - input_size: The dimension D of the input data.
  31. - hidden_size: The number of neurons H in the hidden layer.
  32. - output_size: The number of classes C.
  33. """
  34. self.params = {}
  35. self.params['W1'] = std * np.random.randn(input_size, hidden_size)
  36. self.params['b1'] = np.zeros(hidden_size)
  37. self.params['W2'] = std * np.random.randn(hidden_size, output_size)
  38. self.params['b2'] = np.zeros(output_size) #初始化神经网络的参数
  39.  
  40. def loss(self, X, y=None, reg=0.0):
  41. """
  42. Compute the loss and gradients for a two layer fully connected neural
  43. network.
  44.  
  45. Inputs:
  46. - X: Input data of shape (N, D). Each X[i] is a training sample.
  47. - y: Vector of training labels. y[i] is the label for X[i], and each y[i] is
  48. an integer in the range 0 <= y[i] < C. This parameter is optional; if it
  49. is not passed then we only return scores, and if it is passed then we
  50. instead return the loss and gradients.
  51. - reg: Regularization strength.
  52.  
  53. Returns:
  54. If y is None, return a matrix scores of shape (N, C) where scores[i, c] is
  55. the score for class c on input X[i].
  56.  
  57. If y is not None, instead return a tuple of:
  58. - loss: Loss (data loss and regularization loss) for this batch of training
  59. samples.
  60. - grads: Dictionary mapping parameter names to gradients of those parameters
  61. with respect to the loss function; has the same keys as self.params.
  62. """
  63. # Unpack variables from the params dictionary
  64. W1, b1 = self.params['W1'], self.params['b1']
  65. W2, b2 = self.params['W2'], self.params['b2']
  66. N, D = X.shape
  67.  
  68. # Compute the forward pass
  69. scores = None
  70. #############################################################################
  71. # TODO: Perform the forward pass, computing the class scores for the input. #
  72. # Store the result in the scores variable, which should be an array of #
  73. # shape (N, C). #
  74. #############################################################################
  75. h1=np.maximum(0,np.dot(X,W1)+b1) 这里是做了一个RELUactivition function
  76. scores=np.dot(h1,W2)+b2
  77. #############################################################################
  78. # END OF YOUR CODE #
  79. #############################################################################
  80.  
  81. # If the targets are not given then jump out, we're done
  82. if y is None:
  83. return scores
  84.  
  85. # Compute the loss
  86. loss = None
  87. #############################################################################
  88. # TODO: Finish the forward pass, and compute the loss. This should include #
  89. # both the data loss and L2 regularization for W1 and W2. Store the result #
  90. # in the variable loss, which should be a scalar. Use the Softmax #
  91. # classifier loss. So that your results match ours, multiply the #
  92. # regularization loss by 0.5 #
  93. #############################################################################
  94. scores_max = np.max(scores, axis=1, keepdims=True) # (N,1)
  95. # Compute the class probabilities
  96. exp_scores = np.exp(scores - scores_max) # (N,C)
  97. probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) # (N,C)
  98. # cross-entropy loss and L2-regularization
  99. correct_logprobs = -np.log(probs[range(N), y]) # (N,1)
  100. data_loss = np.sum(correct_logprobs) / N
  101. reg_loss = 0.5 * reg * np.sum(W1 * W1) + 0.5 * reg * np.sum(W2 * W2)
  102. loss = data_loss + reg_loss #计算出误差
  103. #############################################################################
  104. # END OF YOUR CODE #
  105. #############################################################################
  106.  
  107. # Backward pass: compute gradients
  108. grads = {}
  109. #############################################################################
  110. # TODO: Compute the backward pass, computing the derivatives of the weights #
  111. # and biases. Store the results in the grads dictionary. For example, #
  112. # grads['W1'] should store the gradient on W1, and be a matrix of same size #
  113. #############################################################################
  114. dscores = probs # (N,C)
  115. dscores[range(N), y] -= 1 # 这个是输出的误差敏感项也就是梯度的计算,具体可以看上面softmax 的计算
  116. dscores /= N
  117. # Backprop into W2 and b2
  118. dW2 = np.dot(h1.T, dscores) # (H,C) BP算法的计算,下面同理
  119. db2 = np.sum(dscores, axis=0, keepdims=True) # (1,C
  120. # Backprop into hidden layer
  121. dh1 = np.dot(dscores, W2.T) # (N,H)
  122. # Backprop into ReLU non-linearity
  123. dh1[h1 <= 0] = 0
  124. # Backprop into W1 and b1
  125. dW1 = np.dot(X.T, dh1) # (D,H)
  126. db1 = np.sum(dh1, axis=0, keepdims=True) # (1,H)
  127. # Add the regularization gradient contribution
  128. dW2 += reg * W2
  129. dW1 += reg * W1
  130. grads['W1'] = dW1
  131. grads['b1'] = db1
  132. grads['W2'] = dW2
  133. grads['b2'] = db2
  134. #############################################################################
  135. # END OF YOUR CODE #
  136. #############################################################################
  137.  
  138. return loss, grads
  139.  
  140. def train(self, X, y, X_val, y_val,
  141. learning_rate=1e-3, learning_rate_decay=0.95,
  142. reg=1e-5, num_iters=100,
  143. batch_size=200, verbose=False):
  144. """
  145. Train this neural network using stochastic gradient descent.
  146.  
  147. Inputs:
  148. - X: A numpy array of shape (N, D) giving training data.
  149. - y: A numpy array f shape (N,) giving training labels; y[i] = c means that
  150. X[i] has label c, where 0 <= c < C.
  151. - X_val: A numpy array of shape (N_val, D) giving validation data.
  152. - y_val: A numpy array of shape (N_val,) giving validation labels.
  153. - learning_rate: Scalar giving learning rate for optimization.
  154. - learning_rate_decay: Scalar giving factor used to decay the learning rate
  155. after each epoch.
  156. - reg: Scalar giving regularization strength.
  157. - num_iters: Number of steps to take when optimizing.
  158. - batch_size: Number of training examples to use per step.
  159. - verbose: boolean; if true print progress during optimization.
  160. """
  161. num_train = X.shape[0]
  162. iterations_per_epoch = max(num_train / batch_size, 1)
  163.  
  164. # Use SGD to optimize the parameters in self.model
  165. loss_history = []
  166. train_acc_history = []
  167. val_acc_history = []
  168.  
  169. for it in xrange(num_iters):
  170. X_batch = None
  171. y_batch = None
  172.  
  173. #########################################################################
  174. # TODO: Create a random minibatch of training data and labels, storing #
  175. # them in X_batch and y_batch respectively. #
  176. #########################################################################
  177. sample_index = np.random.choice(num_train, batch_size, replace=True)
  178. X_batch = X[sample_index, :] # (batch_size,D)
  179. y_batch = y[sample_index] # (1,batch_size)
  180.  
  181. #########################################################################
  182. # END OF YOUR CODE #
  183. #########################################################################
  184.  
  185. # Compute loss and gradients using the current minibatch
  186. loss, grads = self.loss(X_batch, y=y_batch, reg=reg)
  187. loss_history.append(loss)
  188.  
  189. #########################################################################
  190. # TODO: Use the gradients in the grads dictionary to update the #
  191. # parameters of the network (stored in the dictionary self.params) #
  192. # using stochastic gradient descent. You'll need to use the gradients #
  193. # stored in the grads dictionary defined above. #
  194. #########################################################################
  195. print grads['b2']
  196. a=grads['b2'].reshape(-1)
  197. grads['b2']=a
  198. a = grads['b1'].reshape(-1)
  199. grads['b1'] = a
  200. grads['b1'].reshape(-1)
  201. v_W2 = - learning_rate * grads['W2']
  202. self.params['W2'] += v_W2
  203. self.params['b2']-=learning_rate * grads['b2']
  204. v_W1 = - learning_rate * grads['W1']
  205. self.params['W1'] += v_W1
  206. v_b1 = - learning_rate * grads['b1']
  207. self.params['b1'] += v_b1      #对参数进行更新
  208. #########################################################################
  209. # END OF YOUR CODE #
  210. #########################################################################
  211.  
  212. if verbose and it % 100 == 0:
  213. print 'iteration %d / %d: loss %f' % (it, num_iters, loss)
  214.  
  215. # Every epoch, check train and val accuracy and decay learning rate.
  216. if it % iterations_per_epoch == 0:
  217. # Check accuracy
  218. train_acc = (self.predict(X_batch) == y_batch)
  219. val_acc = (self.predict(X_val) == y_val)
  220. train_acc_history.append(train_acc)
  221. val_acc_history.append(val_acc)
  222.  
  223. # Decay learning rate
  224. learning_rate *= learning_rate_decay
  225.  
  226. return {
  227. 'loss_history': loss_history,
  228. 'train_acc_history': train_acc_history,
  229. 'val_acc_history': val_acc_history,
  230. }
  231.  
  232. def predict(self, X):
  233. """
  234. Use the trained weights of this two-layer network to predict labels for
  235. data points. For each data point we predict scores for each of the C
  236. classes, and assign each data point to the class with the highest score.
  237.  
  238. Inputs:
  239. - X: A numpy array of shape (N, D) giving N D-dimensional data points to
  240. classify.
  241.  
  242. Returns:
  243. - y_pred: A numpy array of shape (N,) giving predicted labels for each of
  244. the elements of X. For all i, y_pred[i] = c means that X[i] is predicted
  245. to have class c, where 0 <= c < C.
  246. """
  247. y_pred = None
  248.  
  249. ###########################################################################
  250. # TODO: Implement this function; it should be VERY simple! #
  251. ###########################################################################
  252. y_pred = None
  253. h1 = np.maximum(0,(np.dot(X, self.params['W1']) + self.params['b1']))
  254. scores = np.dot(h1, self.params['W2']) + self.params['b2']
  255. y_pred = np.argmax(scores, axis=1)
  256. ###########################################################################
  257. # END OF YOUR CODE #
  258. ###########################################################################
  259.  
  260. return y_pred

总结

 这个作业是很久之前做的了,代码基本上也是借鉴的网上的,我主要就是把它搞懂和跑通,主要还是不太熟悉python这一套,numpy这个东西还是有一定的学习时间的...

 整个作业一来说,难度不大,关键是把之前学到的公式转换成代码,我觉得做完这个工作很有收获。

笔记:CS231n+assignment1(作业一)的更多相关文章

  1. 【cs231n笔记】assignment1之KNN

    k-Nearest Neighbor (kNN) 练习 这篇博文是对cs231n课程assignment1的第一个问题KNN算法的完成,参考了一些网上的博客,不具有什么创造性,以个人学习笔记为目的发布 ...

  2. 【hadoop代码笔记】hadoop作业提交之汇总

    一.概述 在本篇博文中,试图通过代码了解hadoop job执行的整个流程.即用户提交的mapreduce的jar文件.输入提交到hadoop的集群,并在集群中运行.重点在代码的角度描述整个流程,有些 ...

  3. ufldl学习笔记和编程作业:Feature Extraction Using Convolution,Pooling(卷积和汇集特征提取)

    ufldl学习笔记与编程作业:Feature Extraction Using Convolution,Pooling(卷积和池化抽取特征) ufldl出了新教程,感觉比之前的好,从基础讲起.系统清晰 ...

  4. ufldl学习笔记和编程作业:Softmax Regression(softmax回报)

    ufldl学习笔记与编程作业:Softmax Regression(softmax回归) ufldl出了新教程.感觉比之前的好,从基础讲起.系统清晰,又有编程实践. 在deep learning高质量 ...

  5. ufldl学习笔记与编程作业:Softmax Regression(vectorization加速)

    ufldl学习笔记与编程作业:Softmax Regression(vectorization加速) ufldl出了新教程,感觉比之前的好.从基础讲起.系统清晰,又有编程实践. 在deep learn ...

  6. ufldl学习笔记与编程作业:Multi-Layer Neural Network(多层神经网络+识别手写体编程)

    ufldl学习笔记与编程作业:Multi-Layer Neural Network(多层神经网络+识别手写体编程) ufldl出了新教程,感觉比之前的好,从基础讲起,系统清晰,又有编程实践. 在dee ...

  7. ufldl学习笔记与编程作业:Logistic Regression(逻辑回归)

    ufldl学习笔记与编程作业:Logistic Regression(逻辑回归) ufldl出了新教程,感觉比之前的好,从基础讲起.系统清晰,又有编程实践. 在deep learning高质量群里面听 ...

  8. ufldl学习笔记与编程作业:Linear Regression(线性回归)

    ufldl学习笔记与编程作业:Linear Regression(线性回归) ufldl出了新教程,感觉比之前的好.从基础讲起.系统清晰,又有编程实践. 在deep learning高质量群里面听一些 ...

  9. cs231n assignment1 KNN

    title: cs231n assignment1 KNN tags: - KNN - cs231n categories: - 机器学习 date: 2019年9月16日 17:03:13 利用KN ...

随机推荐

  1. ARC机制中的Strong和weak

    什么是ARC Automatic Reference Counting,自动引用计数,即ARC,可以说是WWDC2011和iOS5所引入的最大的变革和最激动人心的变化.ARC是新的LLVM 3.0编译 ...

  2. STL MAP使用注意事项

    Hat’s Words A hat’s word is a word in the dictionary that is the concatenation of exactly two other ...

  3. mysql优化之explain各参数详解:

    explain简介 explain命令可以获取Mysql如何执行select语句的信息,包括在select语句执行过程中表如何连接和连接的顺序.当我们想知道这个表操作是索引查询还是全表扫描时,我们就可 ...

  4. 转载:jsonp详解

    json相信大家都用的多,jsonp我就一直没有机会用到,但也经常看到,只知道是“用来跨域的”,一直不知道具体是个什么东西.今天总算搞明白了.下面一步步来搞清楚jsonp是个什么玩意. 同源策略 首先 ...

  5. PHP 作用域

  6. Missian指南三:创建一个Missian服务器(使用spring)

    在使用Missian时,spring是可选的,但是作者本人强烈推荐和Spring配合使用.Spring是一个伟大的项目,并且它不会对程序在运行时的效率带来任何损耗. Missian在服务器端依赖与Mi ...

  7. stark组件(1):动态生成URL

    项目启动时自动生成URL 效果图: 知识点: Django启动前通过apps下的ready方法执行一个可以生成URL的py文件 include函数主要返回有三个元素的一个元组.第一个是url配置(ur ...

  8. IIC简介(转载)

    来自:https://www.cnblogs.com/zalebool/p/4214599.html IIC简介: IIC 即Inter-Integrated Circuit(集成电路总线),这种总线 ...

  9. Qt中修改QtoolTip的样式

    Qt中的QtoolTip有几个需要注意的: 1.不能直在堆或栈中生成QToolTip对象.因为其构造函数为私有.2.从widget获取的tooltip不是tooltip对象,而是tooltip中的文本 ...

  10. BFS:HDU2054-A==B?(字符串的比较)

    A == B ? Time Limit: 1000/1000 MS (Java/Others)    Memory Limit: 32768/32768 K (Java/Others) Total S ...