Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

initial import week 4, not solved

  • Loading branch information...
commit 5811f9536034acc6e12c3659a9dba8801287ffae 1 parent a913643
Richard Schneeman authored
BIN  mlclass-ex3/mlclass-ex3/octave-core
View
Binary file not shown
BIN  mlclass-ex4/ex4.pdf
View
Binary file not shown
52 mlclass-ex4/mlclass-ex4/checkNNGradients.m
View
@@ -0,0 +1,52 @@
+function checkNNGradients(lambda)
+%CHECKNNGRADIENTS Creates a small neural network to check the
+%backpropagation gradients
+% CHECKNNGRADIENTS(lambda) Creates a small neural network to check the
+% backpropagation gradients, it will output the analytical gradients
+% produced by your backprop code and the numerical gradients (computed
+% using computeNumericalGradient). These two gradient computations should
+% result in very similar values.
+%
+
+if ~exist('lambda', 'var') || isempty(lambda)
+ lambda = 0;
+end
+
+input_layer_size = 3;
+hidden_layer_size = 5;
+num_labels = 3;
+m = 5;
+
+% We generate some 'random' test data
+Theta1 = debugInitializeWeights(hidden_layer_size, input_layer_size);
+Theta2 = debugInitializeWeights(num_labels, hidden_layer_size);
+% Reusing debugInitializeWeights to generate X
+X = debugInitializeWeights(m, input_layer_size - 1);
+y = 1 + mod(1:m, num_labels)';
+
+% Unroll parameters
+nn_params = [Theta1(:) ; Theta2(:)];
+
+% Short hand for cost function
+costFunc = @(p) nnCostFunction(p, input_layer_size, hidden_layer_size, ...
+ num_labels, X, y, lambda);
+
+[cost, grad] = costFunc(nn_params);
+numgrad = computeNumericalGradient(costFunc, nn_params);
+
+% Visually examine the two gradient computations. The two columns
+% you get should be very similar.
+disp([numgrad grad]);
+fprintf(['The above two columns you get should be very similar.\n' ...
+ '(Left-Your Numerical Gradient, Right-Analytical Gradient)\n\n']);
+
+% Evaluate the norm of the difference between two solutions.
+% If you have a correct implementation, and assuming you used EPSILON = 0.0001
+% in computeNumericalGradient.m, then diff below should be less than 1e-9
+diff = norm(numgrad-grad)/norm(numgrad+grad);
+
+fprintf(['If your backpropagation implementation is correct, then \n' ...
+ 'the relative difference will be small (less than 1e-9). \n' ...
+ '\nRelative Difference: %g\n'], diff);
+
+end
29 mlclass-ex4/mlclass-ex4/computeNumericalGradient.m
View
@@ -0,0 +1,29 @@
+function numgrad = computeNumericalGradient(J, theta)
+%COMPUTENUMERICALGRADIENT Computes the gradient using "finite differences"
+%and gives us a numerical estimate of the gradient.
+% numgrad = COMPUTENUMERICALGRADIENT(J, theta) computes the numerical
+% gradient of the function J around theta. Calling y = J(theta) should
+% return the function value at theta.
+
+% Notes: The following code implements numerical gradient checking, and
+% returns the numerical gradient.It sets numgrad(i) to (a numerical
+% approximation of) the partial derivative of J with respect to the
+% i-th input argument, evaluated at theta. (i.e., numgrad(i) should
+% be the (approximately) the partial derivative of J with respect
+% to theta(i).)
+%
+
+numgrad = zeros(size(theta));
+perturb = zeros(size(theta));
+e = 1e-4;
+for p = 1:numel(theta)
+ % Set perturbation vector
+ perturb(p) = e;
+ loss1 = J(theta - perturb);
+ loss2 = J(theta + perturb);
+ % Compute Numerical Gradient
+ numgrad(p) = (loss2 - loss1) / (2*e);
+ perturb(p) = 0;
+end
+
+end
22 mlclass-ex4/mlclass-ex4/debugInitializeWeights.m
View
@@ -0,0 +1,22 @@
+function W = debugInitializeWeights(fan_out, fan_in)
+%DEBUGINITIALIZEWEIGHTS Initialize the weights of a layer with fan_in
+%incoming connections and fan_out outgoing connections using a fixed
+%strategy, this will help you later in debugging
+% W = DEBUGINITIALIZEWEIGHTS(fan_in, fan_out) initializes the weights
+% of a layer with fan_in incoming connections and fan_out outgoing
+% connections using a fix set of values
+%
+% Note that W should be set to a matrix of size(1 + fan_in, fan_out) as
+% the first row of W handles the "bias" terms
+%
+
+% Set W to zeros
+W = zeros(fan_out, 1 + fan_in);
+
+% Initialize W using "sin", this ensures that W is always of the same
+% values and will be useful for debugging
+W = reshape(sin(1:numel(W)), size(W)) / 10;
+
+% =========================================================================
+
+end
59 mlclass-ex4/mlclass-ex4/displayData.m
View
@@ -0,0 +1,59 @@
+function [h, display_array] = displayData(X, example_width)
+%DISPLAYDATA Display 2D data in a nice grid
+% [h, display_array] = DISPLAYDATA(X, example_width) displays 2D data
+% stored in X in a nice grid. It returns the figure handle h and the
+% displayed array if requested.
+
+% Set example_width automatically if not passed in
+if ~exist('example_width', 'var') || isempty(example_width)
+ example_width = round(sqrt(size(X, 2)));
+end
+
+% Gray Image
+colormap(gray);
+
+% Compute rows, cols
+[m n] = size(X);
+example_height = (n / example_width);
+
+% Compute number of items to display
+display_rows = floor(sqrt(m));
+display_cols = ceil(m / display_rows);
+
+% Between images padding
+pad = 1;
+
+% Setup blank display
+display_array = - ones(pad + display_rows * (example_height + pad), ...
+ pad + display_cols * (example_width + pad));
+
+% Copy each example into a patch on the display array
+curr_ex = 1;
+for j = 1:display_rows
+ for i = 1:display_cols
+ if curr_ex > m,
+ break;
+ end
+ % Copy the patch
+
+ % Get the max value of the patch
+ max_val = max(abs(X(curr_ex, :)));
+ display_array(pad + (j - 1) * (example_height + pad) + (1:example_height), ...
+ pad + (i - 1) * (example_width + pad) + (1:example_width)) = ...
+ reshape(X(curr_ex, :), example_height, example_width) / max_val;
+ curr_ex = curr_ex + 1;
+ end
+ if curr_ex > m,
+ break;
+ end
+end
+
+% Display Image
+h = imagesc(display_array, [-1 1]);
+
+% Do not show axis
+axis image off
+
+drawnow;
+
+end
234 mlclass-ex4/mlclass-ex4/ex4.m
View
@@ -0,0 +1,234 @@
+%% Machine Learning Online Class - Exercise 4 Neural Network Learning
+
+% Instructions
+% ------------
+%
+% This file contains code that helps you get started on the
+% linear exercise. You will need to complete the following functions
+% in this exericse:
+%
+% sigmoidGradient.m
+% randInitializeWeights.m
+% nnCostFunction.m
+%
+% For this exercise, you will not need to change any code in this file,
+% or any other files other than those mentioned above.
+%
+
+%% Initialization
+clear ; close all; clc
+
+%% Setup the parameters you will use for this exercise
+input_layer_size = 400; % 20x20 Input Images of Digits
+hidden_layer_size = 25; % 25 hidden units
+num_labels = 10; % 10 labels, from 1 to 10
+ % (note that we have mapped "0" to label 10)
+
+%% =========== Part 1: Loading and Visualizing Data =============
+% We start the exercise by first loading and visualizing the dataset.
+% You will be working with a dataset that contains handwritten digits.
+%
+
+% Load Training Data
+fprintf('Loading and Visualizing Data ...\n')
+
+load('ex4data1.mat');
+m = size(X, 1);
+
+% Randomly select 100 data points to display
+sel = randperm(size(X, 1));
+sel = sel(1:100);
+
+displayData(X(sel, :));
+
+fprintf('Program paused. Press enter to continue.\n');
+pause;
+
+
+%% ================ Part 2: Loading Pameters ================
+% In this part of the exercise, we load some pre-initialized
+% neural network parameters.
+
+fprintf('\nLoading Saved Neural Network Parameters ...\n')
+
+% Load the weights into variables Theta1 and Theta2
+load('ex4weights.mat');
+
+% Unroll parameters
+nn_params = [Theta1(:) ; Theta2(:)];
+
+%% ================ Part 3: Compute Cost (Feedforward) ================
+% To the neural network, you should first start by implementing the
+% feedforward part of the neural network that returns the cost only. You
+% should complete the code in nnCostFunction.m to return cost. After
+% implementing the feedforward to compute the cost, you can verify that
+% your implementation is correct by verifying that you get the same cost
+% as us for the fixed debugging parameters.
+%
+% We suggest implementing the feedforward cost *without* regularization
+% first so that it will be easier for you to debug. Later, in part 4, you
+% will get to implement the regularized cost.
+%
+fprintf('\nFeedforward Using Neural Network ...\n')
+
+% Weight regularization parameter (we set this to 0 here).
+lambda = 0;
+
+J = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, ...
+ num_labels, X, y, lambda);
+
+fprintf(['Cost at parameters (loaded from ex4weights): %f '...
+ '\n(this value should be about 0.287629)\n'], J);
+
+fprintf('\nProgram paused. Press enter to continue.\n');
+pause;
+
+%% =============== Part 4: Implement Regularization ===============
+% Once your cost function implementation is correct, you should now
+% continue to implement the regularization with the cost.
+%
+
+fprintf('\nChecking Cost Function (w/ Regularization) ... \n')
+
+% Weight regularization parameter (we set this to 1 here).
+lambda = 1;
+
+J = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, ...
+ num_labels, X, y, lambda);
+
+fprintf(['Cost at parameters (loaded from ex4weights): %f '...
+ '\n(this value should be about 0.383770)\n'], J);
+
+fprintf('Program paused. Press enter to continue.\n');
+pause;
+
+
+%% ================ Part 5: Sigmoid Gradient ================
+% Before you start implementing the neural network, you will first
+% implement the gradient for the sigmoid function. You should complete the
+% code in the sigmoidGradient.m file.
+%
+
+fprintf('\nEvaluating sigmoid gradient...\n')
+
+g = sigmoidGradient([1 -0.5 0 0.5 1]);
+fprintf('Sigmoid gradient evaluated at [1 -0.5 0 0.5 1]:\n ');
+fprintf('%f ', g);
+fprintf('\n\n');
+
+fprintf('Program paused. Press enter to continue.\n');
+pause;
+
+
+%% ================ Part 6: Initializing Pameters ================
+% In this part of the exercise, you will be starting to implment a two
+% layer neural network that classifies digits. You will start by
+% implementing a function to initialize the weights of the neural network
+% (randInitializeWeights.m)
+
+fprintf('\nInitializing Neural Network Parameters ...\n')
+
+initial_Theta1 = randInitializeWeights(input_layer_size, hidden_layer_size);
+initial_Theta2 = randInitializeWeights(hidden_layer_size, num_labels);
+
+% Unroll parameters
+initial_nn_params = [initial_Theta1(:) ; initial_Theta2(:)];
+
+
+%% =============== Part 7: Implement Backpropagation ===============
+% Once your cost matches up with ours, you should proceed to implement the
+% backpropagation algorithm for the neural network. You should add to the
+% code you've written in nnCostFunction.m to return the partial
+% derivatives of the parameters.
+%
+fprintf('\nChecking Backpropagation... \n');
+
+% Check gradients by running checkNNGradients
+checkNNGradients;
+
+fprintf('\nProgram paused. Press enter to continue.\n');
+pause;
+
+
+%% =============== Part 8: Implement Regularization ===============
+% Once your backpropagation implementation is correct, you should now
+% continue to implement the regularization with the cost and gradient.
+%
+
+fprintf('\nChecking Backpropagation (w/ Regularization) ... \n')
+
+% Check gradients by running checkNNGradients
+lambda = 3;
+checkNNGradients(lambda);
+
+% Also output the costFunction debugging values
+debug_J = nnCostFunction(nn_params, input_layer_size, ...
+ hidden_layer_size, num_labels, X, y, lambda);
+
+fprintf(['\n\nCost at (fixed) debugging parameters (w/ lambda = 10): %f ' ...
+ '\n(this value should be about 0.576051)\n\n'], debug_J);
+
+fprintf('Program paused. Press enter to continue.\n');
+pause;
+
+
+%% =================== Part 8: Training NN ===================
+% You have now implemented all the code necessary to train a neural
+% network. To train your neural network, we will now use "fmincg", which
+% is a function which works similarly to "fminunc". Recall that these
+% advanced optimizers are able to train our cost functions efficiently as
+% long as we provide them with the gradient computations.
+%
+fprintf('\nTraining Neural Network... \n')
+
+% After you have completed the assignment, change the MaxIter to a larger
+% value to see how more training helps.
+options = optimset('MaxIter', 50);
+
+% You should also try different values of lambda
+lambda = 1;
+
+% Create "short hand" for the cost function to be minimized
+costFunction = @(p) nnCostFunction(p, ...
+ input_layer_size, ...
+ hidden_layer_size, ...
+ num_labels, X, y, lambda);
+
+% Now, costFunction is a function that takes in only one argument (the
+% neural network parameters)
+[nn_params, cost] = fmincg(costFunction, initial_nn_params, options);
+
+% Obtain Theta1 and Theta2 back from nn_params
+Theta1 = reshape(nn_params(1:hidden_layer_size * (input_layer_size + 1)), ...
+ hidden_layer_size, (input_layer_size + 1));
+
+Theta2 = reshape(nn_params((1 + (hidden_layer_size * (input_layer_size + 1))):end), ...
+ num_labels, (hidden_layer_size + 1));
+
+fprintf('Program paused. Press enter to continue.\n');
+pause;
+
+
+%% ================= Part 9: Visualize Weights =================
+% You can now "visualize" what the neural network is learning by
+% displaying the hidden units to see what features they are capturing in
+% the data.
+
+fprintf('\nVisualizing Neural Network... \n')
+
+displayData(Theta1(:, 2:end));
+
+fprintf('\nProgram paused. Press enter to continue.\n');
+pause;
+
+%% ================= Part 10: Implement Predict =================
+% After training the neural network, we would like to use it to predict
+% the labels. You will now implement the "predict" function to use the
+% neural network to predict the labels of the training set. This lets
+% you compute the training set accuracy.
+
+pred = predict(Theta1, Theta2, X);
+
+fprintf('\nTraining Set Accuracy: %f\n', mean(double(pred == y)) * 100);
+
+
BIN  mlclass-ex4/mlclass-ex4/ex4data1.mat
View
Binary file not shown
BIN  mlclass-ex4/mlclass-ex4/ex4weights.mat
View
Binary file not shown
175 mlclass-ex4/mlclass-ex4/fmincg.m
View
@@ -0,0 +1,175 @@
+function [X, fX, i] = fmincg(f, X, options, P1, P2, P3, P4, P5)
+% Minimize a continuous differentialble multivariate function. Starting point
+% is given by "X" (D by 1), and the function named in the string "f", must
+% return a function value and a vector of partial derivatives. The Polack-
+% Ribiere flavour of conjugate gradients is used to compute search directions,
+% and a line search using quadratic and cubic polynomial approximations and the
+% Wolfe-Powell stopping criteria is used together with the slope ratio method
+% for guessing initial step sizes. Additionally a bunch of checks are made to
+% make sure that exploration is taking place and that extrapolation will not
+% be unboundedly large. The "length" gives the length of the run: if it is
+% positive, it gives the maximum number of line searches, if negative its
+% absolute gives the maximum allowed number of function evaluations. You can
+% (optionally) give "length" a second component, which will indicate the
+% reduction in function value to be expected in the first line-search (defaults
+% to 1.0). The function returns when either its length is up, or if no further
+% progress can be made (ie, we are at a minimum, or so close that due to
+% numerical problems, we cannot get any closer). If the function terminates
+% within a few iterations, it could be an indication that the function value
+% and derivatives are not consistent (ie, there may be a bug in the
+% implementation of your "f" function). The function returns the found
+% solution "X", a vector of function values "fX" indicating the progress made
+% and "i" the number of iterations (line searches or function evaluations,
+% depending on the sign of "length") used.
+%
+% Usage: [X, fX, i] = fmincg(f, X, options, P1, P2, P3, P4, P5)
+%
+% See also: checkgrad
+%
+% Copyright (C) 2001 and 2002 by Carl Edward Rasmussen. Date 2002-02-13
+%
+%
+% (C) Copyright 1999, 2000 & 2001, Carl Edward Rasmussen
+%
+% Permission is granted for anyone to copy, use, or modify these
+% programs and accompanying documents for purposes of research or
+% education, provided this copyright notice is retained, and note is
+% made of any changes that have been made.
+%
+% These programs and documents are distributed without any warranty,
+% express or implied. As the programs were written for research
+% purposes only, they have not been tested to the degree that would be
+% advisable in any important application. All use of these programs is
+% entirely at the user's own risk.
+%
+% [ml-class] Changes Made:
+% 1) Function name and argument specifications
+% 2) Output display
+%
+
+% Read options
+if exist('options', 'var') && ~isempty(options) && isfield(options, 'MaxIter')
+ length = options.MaxIter;
+else
+ length = 100;
+end
+
+
+RHO = 0.01; % a bunch of constants for line searches
+SIG = 0.5; % RHO and SIG are the constants in the Wolfe-Powell conditions
+INT = 0.1; % don't reevaluate within 0.1 of the limit of the current bracket
+EXT = 3.0; % extrapolate maximum 3 times the current bracket
+MAX = 20; % max 20 function evaluations per line search
+RATIO = 100; % maximum allowed slope ratio
+
+argstr = ['feval(f, X']; % compose string used to call function
+for i = 1:(nargin - 3)
+ argstr = [argstr, ',P', int2str(i)];
+end
+argstr = [argstr, ')'];
+
+if max(size(length)) == 2, red=length(2); length=length(1); else red=1; end
+S=['Iteration '];
+
+i = 0; % zero the run length counter
+ls_failed = 0; % no previous line search has failed
+fX = [];
+[f1 df1] = eval(argstr); % get function value and gradient
+i = i + (length<0); % count epochs?!
+s = -df1; % search direction is steepest
+d1 = -s'*s; % this is the slope
+z1 = red/(1-d1); % initial step is red/(|s|+1)
+
+while i < abs(length) % while not finished
+ i = i + (length>0); % count iterations?!
+
+ X0 = X; f0 = f1; df0 = df1; % make a copy of current values
+ X = X + z1*s; % begin line search
+ [f2 df2] = eval(argstr);
+ i = i + (length<0); % count epochs?!
+ d2 = df2'*s;
+ f3 = f1; d3 = d1; z3 = -z1; % initialize point 3 equal to point 1
+ if length>0, M = MAX; else M = min(MAX, -length-i); end
+ success = 0; limit = -1; % initialize quanteties
+ while 1
+ while ((f2 > f1+z1*RHO*d1) | (d2 > -SIG*d1)) & (M > 0)
+ limit = z1; % tighten the bracket
+ if f2 > f1
+ z2 = z3 - (0.5*d3*z3*z3)/(d3*z3+f2-f3); % quadratic fit
+ else
+ A = 6*(f2-f3)/z3+3*(d2+d3); % cubic fit
+ B = 3*(f3-f2)-z3*(d3+2*d2);
+ z2 = (sqrt(B*B-A*d2*z3*z3)-B)/A; % numerical error possible - ok!
+ end
+ if isnan(z2) | isinf(z2)
+ z2 = z3/2; % if we had a numerical problem then bisect
+ end
+ z2 = max(min(z2, INT*z3),(1-INT)*z3); % don't accept too close to limits
+ z1 = z1 + z2; % update the step
+ X = X + z2*s;
+ [f2 df2] = eval(argstr);
+ M = M - 1; i = i + (length<0); % count epochs?!
+ d2 = df2'*s;
+ z3 = z3-z2; % z3 is now relative to the location of z2
+ end
+ if f2 > f1+z1*RHO*d1 | d2 > -SIG*d1
+ break; % this is a failure
+ elseif d2 > SIG*d1
+ success = 1; break; % success
+ elseif M == 0
+ break; % failure
+ end
+ A = 6*(f2-f3)/z3+3*(d2+d3); % make cubic extrapolation
+ B = 3*(f3-f2)-z3*(d3+2*d2);
+ z2 = -d2*z3*z3/(B+sqrt(B*B-A*d2*z3*z3)); % num. error possible - ok!
+ if ~isreal(z2) | isnan(z2) | isinf(z2) | z2 < 0 % num prob or wrong sign?
+ if limit < -0.5 % if we have no upper limit
+ z2 = z1 * (EXT-1); % the extrapolate the maximum amount
+ else
+ z2 = (limit-z1)/2; % otherwise bisect
+ end
+ elseif (limit > -0.5) & (z2+z1 > limit) % extraplation beyond max?
+ z2 = (limit-z1)/2; % bisect
+ elseif (limit < -0.5) & (z2+z1 > z1*EXT) % extrapolation beyond limit
+ z2 = z1*(EXT-1.0); % set to extrapolation limit
+ elseif z2 < -z3*INT
+ z2 = -z3*INT;
+ elseif (limit > -0.5) & (z2 < (limit-z1)*(1.0-INT)) % too close to limit?
+ z2 = (limit-z1)*(1.0-INT);
+ end
+ f3 = f2; d3 = d2; z3 = -z2; % set point 3 equal to point 2
+ z1 = z1 + z2; X = X + z2*s; % update current estimates
+ [f2 df2] = eval(argstr);
+ M = M - 1; i = i + (length<0); % count epochs?!
+ d2 = df2'*s;
+ end % end of line search
+
+ if success % if line search succeeded
+ f1 = f2; fX = [fX' f1]';
+ fprintf('%s %4i | Cost: %4.6e\r', S, i, f1);
+ s = (df2'*df2-df1'*df2)/(df1'*df1)*s - df2; % Polack-Ribiere direction
+ tmp = df1; df1 = df2; df2 = tmp; % swap derivatives
+ d2 = df1'*s;
+ if d2 > 0 % new slope must be negative
+ s = -df1; % otherwise use steepest direction
+ d2 = -s'*s;
+ end
+ z1 = z1 * min(RATIO, d1/(d2-realmin)); % slope ratio but max RATIO
+ d1 = d2;
+ ls_failed = 0; % this line search did not fail
+ else
+ X = X0; f1 = f0; df1 = df0; % restore point from before failed line search
+ if ls_failed | i > abs(length) % line search failed twice in a row
+ break; % or we ran out of time, so we give up
+ end
+ tmp = df1; df1 = df2; df2 = tmp; % swap derivatives
+ s = -df1; % try steepest
+ d1 = -s'*s;
+ z1 = 1/(1-d1);
+ ls_failed = 1; % this line search failed
+ end
+ if exist('OCTAVE_VERSION')
+ fflush(stdout);
+ end
+end
+fprintf('\n');
91 mlclass-ex4/mlclass-ex4/nnCostFunction.m
View
@@ -0,0 +1,91 @@
+function [J grad] = nnCostFunction(nn_params, ...
+ input_layer_size, ...
+ hidden_layer_size, ...
+ num_labels, ...
+ X, y, lambda)
+%NNCOSTFUNCTION Implements the neural network cost function for a two layer
+%neural network which performs classification
+% [J grad] = NNCOSTFUNCTON(nn_params, hidden_layer_size, num_labels, ...
+% X, y, lambda) computes the cost and gradient of the neural network. The
+% parameters for the neural network are "unrolled" into the vector
+% nn_params and need to be converted back into the weight matrices.
+%
+% The returned parameter grad should be a "unrolled" vector of the
+% partial derivatives of the neural network.
+%
+
+% Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices
+% for our 2 layer neural network
+Theta1 = reshape(nn_params(1:hidden_layer_size * (input_layer_size + 1)), ...
+ hidden_layer_size, (input_layer_size + 1));
+
+Theta2 = reshape(nn_params((1 + (hidden_layer_size * (input_layer_size + 1))):end), ...
+ num_labels, (hidden_layer_size + 1));
+
+% Setup some useful variables
+m = size(X, 1);
+
+% You need to return the following variables correctly
+J = 0;
+Theta1_grad = zeros(size(Theta1));
+Theta2_grad = zeros(size(Theta2));
+
+% ====================== YOUR CODE HERE ======================
+% Instructions: You should complete the code by working through the
+% following parts.
+%
+% Part 1: Feedforward the neural network and return the cost in the
+% variable J. After implementing Part 1, you can verify that your
+% cost function computation is correct by verifying the cost
+% computed in ex4.m
+%
+% Part 2: Implement the backpropagation algorithm to compute the gradients
+% Theta1_grad and Theta2_grad. You should return the partial derivatives of
+% the cost function with respect to Theta1 and Theta2 in Theta1_grad and
+% Theta2_grad, respectively. After implementing Part 2, you can check
+% that your implementation is correct by running checkNNGradients
+%
+% Note: The vector y passed into the function is a vector of labels
+% containing values from 1..K. You need to map this vector into a
+% binary vector of 1's and 0's to be used with the neural network
+% cost function.
+%
+% Hint: We recommend implementing backpropagation using a for-loop
+% over the training examples if you are implementing it for the
+% first time.
+%
+% Part 3: Implement regularization with the cost function and gradients.
+%
+% Hint: You can implement this around the code for
+% backpropagation. That is, you can compute the gradients for
+% the regularization separately and then add them to Theta1_grad
+% and Theta2_grad from Part 2.
+%
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+% -------------------------------------------------------------
+
+% =========================================================================
+
+% Unroll gradients
+grad = [Theta1_grad(:) ; Theta2_grad(:)];
+
+
+end
20 mlclass-ex4/mlclass-ex4/predict.m
View
@@ -0,0 +1,20 @@
+function p = predict(Theta1, Theta2, X)
+%PREDICT Predict the label of an input given a trained neural network
+% p = PREDICT(Theta1, Theta2, X) outputs the predicted label of X given the
+% trained weights of a neural network (Theta1, Theta2)
+
+% Useful values
+m = size(X, 1);
+num_labels = size(Theta2, 1);
+
+% You need to return the following variables correctly
+p = zeros(size(X, 1), 1);
+
+h1 = sigmoid([ones(m, 1) X] * Theta1');
+h2 = sigmoid([ones(m, 1) h1] * Theta2');
+[dummy, p] = max(h2, [], 2);
+
+% =========================================================================
+
+
+end
32 mlclass-ex4/mlclass-ex4/randInitializeWeights.m
View
@@ -0,0 +1,32 @@
+function W = randInitializeWeights(L_in, L_out)
+%RANDINITIALIZEWEIGHTS Randomly initialize the weights of a layer with L_in
+%incoming connections and L_out outgoing connections
+% W = RANDINITIALIZEWEIGHTS(L_in, L_out) randomly initializes the weights
+% of a layer with L_in incoming connections and L_out outgoing
+% connections.
+%
+% Note that W should be set to a matrix of size(L_out, 1 + L_in) as
+% the first row of W handles the "bias" terms
+%
+
+% You need to return the following variables correctly
+W = zeros(L_out, 1 + L_in);
+
+% ====================== YOUR CODE HERE ======================
+% Instructions: Initialize W randomly so that we break the symmetry while
+% training the neural network.
+%
+% Note: The first row of W corresponds to the parameters for the bias units
+%
+
+
+
+
+
+
+
+
+
+% =========================================================================
+
+end
6 mlclass-ex4/mlclass-ex4/sigmoid.m
View
@@ -0,0 +1,6 @@
+function g = sigmoid(z)
+%SIGMOID Compute sigmoid functoon
+% J = SIGMOID(z) computes the sigmoid of z.
+
+g = 1.0 ./ (1.0 + exp(-z));
+end
33 mlclass-ex4/mlclass-ex4/sigmoidGradient.m
View
@@ -0,0 +1,33 @@
+function g = sigmoidGradient(z)
+%SIGMOIDGRADIENT returns the gradient of the sigmoid function
+%evaluated at z
+% g = SIGMOIDGRADIENT(z) computes the gradient of the sigmoid function
+% evaluated at z. This should work regardless if z is a matrix or a
+% vector. In particular, if z is a vector or matrix, you should return
+% the gradient for each element.
+
+g = zeros(size(z));
+
+% ====================== YOUR CODE HERE ======================
+% Instructions: Compute the gradient of the sigmoid function evaluated at
+% each value of z (z can be a matrix, vector or scalar).
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+% =============================================================
+
+
+
+
+end
337 mlclass-ex4/mlclass-ex4/submit.m
View
@@ -0,0 +1,337 @@
+function submit(partId)
+%SUBMIT Submit your code and output to the ml-class servers
+% SUBMIT() will connect to the ml-class server and submit your solution
+
+ fprintf('==\n== [ml-class] Submitting Solutions | Programming Exercise %s\n==\n', ...
+ homework_id());
+ if ~exist('partId', 'var') || isempty(partId)
+ partId = promptPart();
+ end
+
+ % Check valid partId
+ partNames = validParts();
+ if ~isValidPartId(partId)
+ fprintf('!! Invalid homework part selected.\n');
+ fprintf('!! Expected an integer from 1 to %d.\n', numel(partNames) + 1);
+ fprintf('!! Submission Cancelled\n');
+ return
+ end
+
+ [login password] = loginPrompt();
+ if isempty(login)
+ fprintf('!! Submission Cancelled\n');
+ return
+ end
+
+ fprintf('\n== Connecting to ml-class ... ');
+ if exist('OCTAVE_VERSION')
+ fflush(stdout);
+ end
+
+ % Setup submit list
+ if partId == numel(partNames) + 1
+ submitParts = 1:numel(partNames);
+ else
+ submitParts = [partId];
+ end
+
+ for s = 1:numel(submitParts)
+ % Submit this part
+ partId = submitParts(s);
+
+ % Get Challenge
+ [login, ch, signature] = getChallenge(login);
+ if isempty(login) || isempty(ch) || isempty(signature)
+ % Some error occured, error string in first return element.
+ fprintf('\n!! Error: %s\n\n', login);
+ return
+ end
+
+ % Attempt Submission with Challenge
+ ch_resp = challengeResponse(login, password, ch);
+ [result, str] = submitSolution(login, ch_resp, partId, output(partId), ...
+ source(partId), signature);
+
+ fprintf('\n== [ml-class] Submitted Homework %s - Part %d - %s\n', ...
+ homework_id(), partId, partNames{partId});
+ fprintf('== %s\n', strtrim(str));
+ if exist('OCTAVE_VERSION')
+ fflush(stdout);
+ end
+ end
+
+end
+
+% ================== CONFIGURABLES FOR EACH HOMEWORK ==================
+
+function id = homework_id()
+ id = '4';
+end
+
+function [partNames] = validParts()
+ partNames = { 'Feedforward and Cost Function', ...
+ 'Regularized Cost Function', ...
+ 'Sigmoid Gradient', ...
+ 'Neural Network Gradient (Backpropagation)' ...
+ 'Regularized Gradient' ...
+ };
+end
+
+function srcs = sources()
+ % Separated by part
+ srcs = { { 'nnCostFunction.m' }, ...
+ { 'nnCostFunction.m' }, ...
+ { 'sigmoidGradient.m' }, ...
+ { 'nnCostFunction.m' }, ...
+ { 'nnCostFunction.m' } };
+end
+
+function out = output(partId)
+ % Random Test Cases
+ X = reshape(3 * sin(1:1:30), 3, 10);
+ Xm = reshape(sin(1:32), 16, 2) / 5;
+ ym = 1 + mod(1:16,4)';
+ t1 = sin(reshape(1:2:24, 4, 3));
+ t2 = cos(reshape(1:2:40, 4, 5));
+ t = [t1(:) ; t2(:)];
+ if partId == 1
+ [J] = nnCostFunction(t, 2, 4, 4, Xm, ym, 0);
+ out = sprintf('%0.5f ', J);
+ elseif partId == 2
+ [J] = nnCostFunction(t, 2, 4, 4, Xm, ym, 1.5);
+ out = sprintf('%0.5f ', J);
+ elseif partId == 3
+ out = sprintf('%0.5f ', sigmoidGradient(X));
+ elseif partId == 4
+ [J, grad] = nnCostFunction(t, 2, 4, 4, Xm, ym, 0);
+ out = sprintf('%0.5f ', J);
+ out = [out sprintf('%0.5f ', grad)];
+ elseif partId == 5
+ [J, grad] = nnCostFunction(t, 2, 4, 4, Xm, ym, 1.5);
+ out = sprintf('%0.5f ', J);
+ out = [out sprintf('%0.5f ', grad)];
+ end
+end
+
+function url = challenge_url()
+ url = 'http://www.ml-class.org/course/homework/challenge';
+end
+
+function url = submit_url()
+ url = 'http://www.ml-class.org/course/homework/submit';
+end
+
+% ========================= CHALLENGE HELPERS =========================
+
+function src = source(partId)
+ src = '';
+ src_files = sources();
+ if partId <= numel(src_files)
+ flist = src_files{partId};
+ for i = 1:numel(flist)
+ fid = fopen(flist{i});
+ while ~feof(fid)
+ line = fgets(fid);
+ src = [src line];
+ end
+ fclose(fid);
+ src = [src '||||||||'];
+ end
+ end
+end
+
+function ret = isValidPartId(partId)
+ partNames = validParts();
+ ret = (~isempty(partId)) && (partId >= 1) && (partId <= numel(partNames) + 1);
+end
+
+function partId = promptPart()
+ fprintf('== Select which part(s) to submit:\n', ...
+ homework_id());
+ partNames = validParts();
+ srcFiles = sources();
+ for i = 1:numel(partNames)
+ fprintf('== %d) %s [', i, partNames{i});
+ fprintf(' %s ', srcFiles{i}{:});
+ fprintf(']\n');
+ end
+ fprintf('== %d) All of the above \n==\nEnter your choice [1-%d]: ', ...
+ numel(partNames) + 1, numel(partNames) + 1);
+ selPart = input('', 's');
+ partId = str2num(selPart);
+ if ~isValidPartId(partId)
+ partId = -1;
+ end
+end
+
+function [email,ch,signature] = getChallenge(email)
+ str = urlread(challenge_url(), 'post', {'email_address', email});
+
+ str = strtrim(str);
+ [email, str] = strtok (str, '|');
+ [ch, str] = strtok (str, '|');
+ [signature, str] = strtok (str, '|');
+end
+
+
+function [result, str] = submitSolution(email, ch_resp, part, output, ...
+ source, signature)
+
+ params = {'homework', homework_id(), ...
+ 'part', num2str(part), ...
+ 'email', email, ...
+ 'output', output, ...
+ 'source', source, ...
+ 'challenge_response', ch_resp, ...
+ 'signature', signature};
+
+ str = urlread(submit_url(), 'post', params);
+
+ % Parse str to read for success / failure
+ result = 0;
+
+end
+
+% =========================== LOGIN HELPERS ===========================
+
+function [login password] = loginPrompt()
+ % Prompt for password
+ [login password] = basicPrompt();
+
+ if isempty(login) || isempty(password)
+ login = []; password = [];
+ end
+end
+
+
+function [login password] = basicPrompt()
+ login = input('Login (Email address): ', 's');
+ password = input('Password: ', 's');
+end
+
+
+function [str] = challengeResponse(email, passwd, challenge)
+ salt = ')~/|]QMB3[!W`?OVt7qC"@+}';
+ str = sha1([challenge sha1([salt email passwd])]);
+ sel = randperm(numel(str));
+ sel = sort(sel(1:16));
+ str = str(sel);
+end
+
+
+% =============================== SHA-1 ================================
+
+function hash = sha1(str)
+
+ % Initialize variables
+ h0 = uint32(1732584193);
+ h1 = uint32(4023233417);
+ h2 = uint32(2562383102);
+ h3 = uint32(271733878);
+ h4 = uint32(3285377520);
+
+ % Convert to word array
+ strlen = numel(str);
+
+ % Break string into chars and append the bit 1 to the message
+ mC = [double(str) 128];
+ mC = [mC zeros(1, 4-mod(numel(mC), 4), 'uint8')];
+
+ numB = strlen * 8;
+ if exist('idivide')
+ numC = idivide(uint32(numB + 65), 512, 'ceil');
+ else
+ numC = ceil(double(numB + 65)/512);
+ end
+ numW = numC * 16;
+ mW = zeros(numW, 1, 'uint32');
+
+ idx = 1;
+ for i = 1:4:strlen + 1
+ mW(idx) = bitor(bitor(bitor( ...
+ bitshift(uint32(mC(i)), 24), ...
+ bitshift(uint32(mC(i+1)), 16)), ...
+ bitshift(uint32(mC(i+2)), 8)), ...
+ uint32(mC(i+3)));
+ idx = idx + 1;
+ end
+
+ % Append length of message
+ mW(numW - 1) = uint32(bitshift(uint64(numB), -32));
+ mW(numW) = uint32(bitshift(bitshift(uint64(numB), 32), -32));
+
+ % Process the message in successive 512-bit chs
+ for cId = 1 : double(numC)
+ cSt = (cId - 1) * 16 + 1;
+ cEnd = cId * 16;
+ ch = mW(cSt : cEnd);
+
+ % Extend the sixteen 32-bit words into eighty 32-bit words
+ for j = 17 : 80
+ ch(j) = ch(j - 3);
+ ch(j) = bitxor(ch(j), ch(j - 8));
+ ch(j) = bitxor(ch(j), ch(j - 14));
+ ch(j) = bitxor(ch(j), ch(j - 16));
+ ch(j) = bitrotate(ch(j), 1);
+ end
+
+ % Initialize hash value for this ch
+ a = h0;
+ b = h1;
+ c = h2;
+ d = h3;
+ e = h4;
+
+ % Main loop
+ for i = 1 : 80
+ if(i >= 1 && i <= 20)
+ f = bitor(bitand(b, c), bitand(bitcmp(b), d));
+ k = uint32(1518500249);
+ elseif(i >= 21 && i <= 40)
+ f = bitxor(bitxor(b, c), d);
+ k = uint32(1859775393);
+ elseif(i >= 41 && i <= 60)
+ f = bitor(bitor(bitand(b, c), bitand(b, d)), bitand(c, d));
+ k = uint32(2400959708);
+ elseif(i >= 61 && i <= 80)
+ f = bitxor(bitxor(b, c), d);
+ k = uint32(3395469782);
+ end
+
+ t = bitrotate(a, 5);
+ t = bitadd(t, f);
+ t = bitadd(t, e);
+ t = bitadd(t, k);
+ t = bitadd(t, ch(i));
+ e = d;
+ d = c;
+ c = bitrotate(b, 30);
+ b = a;
+ a = t;
+
+ end
+ h0 = bitadd(h0, a);
+ h1 = bitadd(h1, b);
+ h2 = bitadd(h2, c);
+ h3 = bitadd(h3, d);
+ h4 = bitadd(h4, e);
+
+ end
+
+ hash = reshape(dec2hex(double([h0 h1 h2 h3 h4]), 8)', [1 40]);
+
+ hash = lower(hash);
+
+end
+
+function ret = bitadd(iA, iB)
+ ret = double(iA) + double(iB);
+ ret = bitset(ret, 33, 0);
+ ret = uint32(ret);
+end
+
+function ret = bitrotate(iA, places)
+ t = bitshift(iA, places - 32);
+ ret = bitshift(iA, places);
+ ret = bitor(ret, t);
+end
354 mlclass-ex4/mlclass-ex4/submitWeb.m
View
@@ -0,0 +1,354 @@
+function submitWeb(partId)
+%SUBMITWEB Generates a base64 encoded string for web-based submissions
+% SUBMITWEB() will generate a base64 encoded string so that you can submit your
+% solutions via a web form
+
+ fprintf('==\n== [ml-class] Submitting Solutions | Programming Exercise %s\n==\n', ...
+ homework_id());
+ if ~exist('partId', 'var') || isempty(partId)
+ partId = promptPart();
+ end
+
+ % Check valid partId
+ partNames = validParts();
+ if ~isValidPartId(partId)
+ fprintf('!! Invalid homework part selected.\n');
+ fprintf('!! Expected an integer from 1 to %d.\n', numel(partNames));
+ fprintf('!! Submission Cancelled\n');
+ return
+ end
+
+ [login] = loginPrompt();
+ if isempty(login)
+ fprintf('!! Submission Cancelled\n');
+ return
+ end
+
+ [result] = submitSolution(login, partId, output(partId), ...
+ source(partId));
+ result = base64encode(result);
+
+ fprintf('\nSave as submission file [submit_ex%s_part%d.txt]: ', ...
+ homework_id(), partId);
+ saveAsFile = input('', 's');
+ if (isempty(saveAsFile))
+ saveAsFile = sprintf('submit_ex%s_part%d.txt', homework_id(), partId);
+ end
+
+ fid = fopen(saveAsFile, 'w');
+ if (fid)
+ fwrite(fid, result);
+ fclose(fid);
+ fprintf('\nSaved your solutions to %s.\n\n', saveAsFile);
+ fprintf(['You can now submit your solutions through the web \n' ...
+ 'form in the programming exercises. Select the corresponding \n' ...
+ 'programming exercise to access the form.\n']);
+
+ else
+ fprintf('Unable to save to %s\n\n', saveAsFile);
+ fprintf(['You can create a submission file by saving the \n' ...
+ 'following text in a file: (press enter to continue)\n\n']);
+ pause;
+ fprintf(result);
+ end
+
+end
+
+% ================== CONFIGURABLES FOR EACH HOMEWORK ==================
+
+
+function id = homework_id()
+ id = '4';
+end
+
+function [partNames] = validParts()
+ partNames = { 'Feedforward and Cost Function', ...
+ 'Regularized Cost Function', ...
+ 'Sigmoid Gradient', ...
+ 'Neural Network Gradient (Backpropagation)' ...
+ 'Regularized Gradient' ...
+ };
+end
+
+function srcs = sources()
+ % Separated by part
+ srcs = { { 'nnCostFunction.m' }, ...
+ { 'nnCostFunction.m' }, ...
+ { 'sigmoidGradient.m' }, ...
+ { 'nnCostFunction.m' }, ...
+ { 'nnCostFunction.m' } };
+end
+
+function out = output(partId)
+ % Random Test Cases
+ X = reshape(3 * sin(1:1:30), 3, 10);
+ Xm = reshape(sin(1:32), 16, 2) / 5;
+ ym = 1 + mod(1:16,4)';
+ t1 = sin(reshape(1:2:24, 4, 3));
+ t2 = cos(reshape(1:2:40, 4, 5));
+ t = [t1(:) ; t2(:)];
+ if partId == 1
+ [J] = nnCostFunction(t, 2, 4, 4, Xm, ym, 0);
+ out = sprintf('%0.5f ', J);
+ elseif partId == 2
+ [J] = nnCostFunction(t, 2, 4, 4, Xm, ym, 1.5);
+ out = sprintf('%0.5f ', J);
+ elseif partId == 3
+ out = sprintf('%0.5f ', sigmoidGradient(X));
+ elseif partId == 4
+ [J, grad] = nnCostFunction(t, 2, 4, 4, Xm, ym, 0);
+ out = sprintf('%0.5f ', J);
+ out = [out sprintf('%0.5f ', grad)];
+ elseif partId == 5
+ [J, grad] = nnCostFunction(t, 2, 4, 4, Xm, ym, 1.5);
+ out = sprintf('%0.5f ', J);
+ out = [out sprintf('%0.5f ', grad)];
+ end
+end
+
+
+% ========================= SUBMIT HELPERS =========================
+
+function src = source(partId)
+ src = '';
+ src_files = sources();
+ if partId <= numel(src_files)
+ flist = src_files{partId};
+ for i = 1:numel(flist)
+ fid = fopen(flist{i});
+ while ~feof(fid)
+ line = fgets(fid);
+ src = [src line];
+ end
+ fclose(fid);
+ src = [src '||||||||'];
+ end
+ end
+end
+
+function ret = isValidPartId(partId)
+ partNames = validParts();
+ ret = (~isempty(partId)) && (partId >= 1) && (partId <= numel(partNames));
+end
+
+function partId = promptPart()
+ fprintf('== Select which part(s) to submit:\n', ...
+ homework_id());
+ partNames = validParts();
+ srcFiles = sources();
+ for i = 1:numel(partNames)
+ fprintf('== %d) %s [', i, partNames{i});
+ fprintf(' %s ', srcFiles{i}{:});
+ fprintf(']\n');
+ end
+ fprintf('\nEnter your choice [1-%d]: ', ...
+ numel(partNames));
+ selPart = input('', 's');
+ partId = str2num(selPart);
+ if ~isValidPartId(partId)
+ partId = -1;
+ end
+end
+
+
+function [result, str] = submitSolution(email, part, output, source)
+
+ result = ['a:5:{' ...
+ p_s('homework') p_s64(homework_id()) ...
+ p_s('part') p_s64(part) ...
+ p_s('email') p_s64(email) ...
+ p_s('output') p_s64(output) ...
+ p_s('source') p_s64(source) ...
+ '}'];
+
+end
+
+function s = p_s(str)
+ s = ['s:' num2str(numel(str)) ':"' str '";'];
+end
+
+function s = p_s64(str)
+ str = base64encode(str, '');
+ s = ['s:' num2str(numel(str)) ':"' str '";'];
+end
+
+% =========================== LOGIN HELPERS ===========================
+
+function [login] = loginPrompt()
+ % Prompt for password
+ [login] = basicPrompt();
+end
+
+
+function [login] = basicPrompt()
+ login = input('Login (Email address): ', 's');
+end
+
+
+% =========================== Base64 Encoder ============================
+% Thanks to Peter John Acklam
+%
+
+function y = base64encode(x, eol)
+%BASE64ENCODE Perform base64 encoding on a string.
+%
+% BASE64ENCODE(STR, EOL) encode the given string STR. EOL is the line ending
+% sequence to use; it is optional and defaults to '\n' (ASCII decimal 10).
+% The returned encoded string is broken into lines of no more than 76
+% characters each, and each line will end with EOL unless it is empty. Let
+% EOL be empty if you do not want the encoded string broken into lines.
+%
+% STR and EOL don't have to be strings (i.e., char arrays). The only
+% requirement is that they are vectors containing values in the range 0-255.
+%
+% This function may be used to encode strings into the Base64 encoding
+% specified in RFC 2045 - MIME (Multipurpose Internet Mail Extensions). The
+% Base64 encoding is designed to represent arbitrary sequences of octets in a
+% form that need not be humanly readable. A 65-character subset
+% ([A-Za-z0-9+/=]) of US-ASCII is used, enabling 6 bits to be represented per
+% printable character.
+%
+% Examples
+% --------
+%
+% If you want to encode a large file, you should encode it in chunks that are
+% a multiple of 57 bytes. This ensures that the base64 lines line up and
+% that you do not end up with padding in the middle. 57 bytes of data fills
+% one complete base64 line (76 == 57*4/3):
+%
+% If ifid and ofid are two file identifiers opened for reading and writing,
+% respectively, then you can base64 encode the data with
+%
+% while ~feof(ifid)
+% fwrite(ofid, base64encode(fread(ifid, 60*57)));
+% end
+%
+% or, if you have enough memory,
+%
+% fwrite(ofid, base64encode(fread(ifid)));
+%
+% See also BASE64DECODE.
+
+% Author: Peter John Acklam
+% Time-stamp: 2004-02-03 21:36:56 +0100
+% E-mail: pjacklam@online.no
+% URL: http://home.online.no/~pjacklam
+
+ if isnumeric(x)
+ x = num2str(x);
+ end
+
+ % make sure we have the EOL value
+ if nargin < 2
+ eol = sprintf('\n');
+ else
+ if sum(size(eol) > 1) > 1
+ error('EOL must be a vector.');
+ end
+ if any(eol(:) > 255)
+ error('EOL can not contain values larger than 255.');
+ end
+ end
+
+ if sum(size(x) > 1) > 1
+ error('STR must be a vector.');
+ end
+
+ x = uint8(x);
+ eol = uint8(eol);
+
+ ndbytes = length(x); % number of decoded bytes
+ nchunks = ceil(ndbytes / 3); % number of chunks/groups
+ nebytes = 4 * nchunks; % number of encoded bytes
+
+ % add padding if necessary, to make the length of x a multiple of 3
+ if rem(ndbytes, 3)
+ x(end+1 : 3*nchunks) = 0;
+ end
+
+ x = reshape(x, [3, nchunks]); % reshape the data
+ y = repmat(uint8(0), 4, nchunks); % for the encoded data
+
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ % Split up every 3 bytes into 4 pieces
+ %
+ % aaaaaabb bbbbcccc ccdddddd
+ %
+ % to form
+ %
+ % 00aaaaaa 00bbbbbb 00cccccc 00dddddd
+ %
+ y(1,:) = bitshift(x(1,:), -2); % 6 highest bits of x(1,:)
+
+ y(2,:) = bitshift(bitand(x(1,:), 3), 4); % 2 lowest bits of x(1,:)
+ y(2,:) = bitor(y(2,:), bitshift(x(2,:), -4)); % 4 highest bits of x(2,:)
+
+ y(3,:) = bitshift(bitand(x(2,:), 15), 2); % 4 lowest bits of x(2,:)
+ y(3,:) = bitor(y(3,:), bitshift(x(3,:), -6)); % 2 highest bits of x(3,:)
+
+ y(4,:) = bitand(x(3,:), 63); % 6 lowest bits of x(3,:)
+
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ % Now perform the following mapping
+ %
+ % 0 - 25 -> A-Z
+ % 26 - 51 -> a-z
+ % 52 - 61 -> 0-9
+ % 62 -> +
+ % 63 -> /
+ %
+ % We could use a mapping vector like
+ %
+ % ['A':'Z', 'a':'z', '0':'9', '+/']
+ %
+ % but that would require an index vector of class double.
+ %
+ z = repmat(uint8(0), size(y));
+ i = y <= 25; z(i) = 'A' + double(y(i));
+ i = 26 <= y & y <= 51; z(i) = 'a' - 26 + double(y(i));
+ i = 52 <= y & y <= 61; z(i) = '0' - 52 + double(y(i));
+ i = y == 62; z(i) = '+';
+ i = y == 63; z(i) = '/';
+ y = z;
+
+ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+ % Add padding if necessary.
+ %
+ npbytes = 3 * nchunks - ndbytes; % number of padding bytes
+ if npbytes
+ y(end-npbytes+1 : end) = '='; % '=' is used for padding
+ end
+
+ if isempty(eol)
+
+ % reshape to a row vector
+ y = reshape(y, [1, nebytes]);
+
+ else
+
+ nlines = ceil(nebytes / 76); % number of lines
+ neolbytes = length(eol); % number of bytes in eol string
+
+ % pad data so it becomes a multiple of 76 elements
+ y = [y(:) ; zeros(76 * nlines - numel(y), 1)];
+ y(nebytes + 1 : 76 * nlines) = 0;
+ y = reshape(y, 76, nlines);
+
+ % insert eol strings
+ eol = eol(:);
+ y(end + 1 : end + neolbytes, :) = eol(:, ones(1, nlines));
+
+ % remove padding, but keep the last eol string
+ m = nebytes + neolbytes * (nlines - 1);
+ n = (76+neolbytes)*nlines - neolbytes;
+ y(m+1 : n) = '';
+
+ % extract and reshape to row vector
+ y = reshape(y, 1, m+neolbytes);
+
+ end
+
+ % output is a character array
+ y = char(y);
+
+end
BIN  octave-core
View
Binary file not shown
Please sign in to comment.
Something went wrong with that request. Please try again.