diff --git a/Programming Assignment/Week4/Sumanth_week4/computeCentroids.m b/Programming Assignment/Week4/Sumanth_week4/computeCentroids.m new file mode 100644 index 0000000..b1a06d7 --- /dev/null +++ b/Programming Assignment/Week4/Sumanth_week4/computeCentroids.m @@ -0,0 +1,47 @@ +function centroids = computeCentroids(X, idx, K) +%COMPUTECENTROIDS returns the new centroids by computing the means of the +%data points assigned to each centroid. +% centroids = COMPUTECENTROIDS(X, idx, K) returns the new centroids by +% computing the means of the data points assigned to each centroid. It is +% given a dataset X where each row is a single data point, a vector +% idx of centroid assignments (i.e. each entry in range [1..K]) for each +% example, and K, the number of centroids. You should return a matrix +% centroids, where each row of centroids is the mean of the data points +% assigned to it. +% + +% Useful variables +[m n] = size(X); + +% You need to return the following variables correctly. +centroids = zeros(K, n); + + +% ====================== YOUR CODE HERE ====================== +% Instructions: Go over every centroid and compute mean of all points that +% belong to it. Concretely, the row vector centroids(i, :) +% should contain the mean of the data points assigned to +% centroid i. +% +% Note: You can use a for-loop over the centroids to compute this. +% +for k=1:K + C=0; + S=zeros(n,1); + for i=1:m + if idx(i)==k + S=S+X(i,:)'; + C=C+1; + end + end +centroids(k,:)=(S/C)'; +end + + + + +% ============================================================= + + +end + diff --git a/Programming Assignment/Week4/Sumanth_week4/displayData.m b/Programming Assignment/Week4/Sumanth_week4/displayData.m new file mode 100644 index 0000000..160697e --- /dev/null +++ b/Programming Assignment/Week4/Sumanth_week4/displayData.m @@ -0,0 +1,59 @@ +function [h, display_array] = displayData(X, example_width) +%DISPLAYDATA Display 2D data in a nice grid +% [h, display_array] = DISPLAYDATA(X, example_width) displays 2D data +% stored in X in a nice grid. It returns the figure handle h and the +% displayed array if requested. + +% Set example_width automatically if not passed in +if ~exist('example_width', 'var') || isempty(example_width) + example_width = round(sqrt(size(X, 2))); +end + +% Gray Image +colormap(gray); + +% Compute rows, cols +[m n] = size(X); +example_height = (n / example_width); + +% Compute number of items to display +display_rows = floor(sqrt(m)); +display_cols = ceil(m / display_rows); + +% Between images padding +pad = 1; + +% Setup blank display +display_array = - ones(pad + display_rows * (example_height + pad), ... + pad + display_cols * (example_width + pad)); + +% Copy each example into a patch on the display array +curr_ex = 1; +for j = 1:display_rows + for i = 1:display_cols + if curr_ex > m, + break; + end + % Copy the patch + + % Get the max value of the patch + max_val = max(abs(X(curr_ex, :))); + display_array(pad + (j - 1) * (example_height + pad) + (1:example_height), ... + pad + (i - 1) * (example_width + pad) + (1:example_width)) = ... + reshape(X(curr_ex, :), example_height, example_width) / max_val; + curr_ex = curr_ex + 1; + end + if curr_ex > m, + break; + end +end + +% Display Image +h = imagesc(display_array, [-1 1]); + +% Do not show axis +axis image off + +drawnow; + +end diff --git a/Programming Assignment/Week4/Sumanth_week4/drawLine.m b/Programming Assignment/Week4/Sumanth_week4/drawLine.m new file mode 100644 index 0000000..85e6c41 --- /dev/null +++ b/Programming Assignment/Week4/Sumanth_week4/drawLine.m @@ -0,0 +1,8 @@ +function drawLine(p1, p2, varargin) +%DRAWLINE Draws a line from point p1 to point p2 +% DRAWLINE(p1, p2) Draws a line from point p1 to point p2 and holds the +% current figure + +plot([p1(1) p2(1)], [p1(2) p2(2)], varargin{:}); + +end \ No newline at end of file diff --git a/Programming Assignment/Week4/Sumanth_week4/ex7.m b/Programming Assignment/Week4/Sumanth_week4/ex7.m new file mode 100644 index 0000000..7a3359a --- /dev/null +++ b/Programming Assignment/Week4/Sumanth_week4/ex7.m @@ -0,0 +1,174 @@ +%% Machine Learning Online Class +% Exercise 7 | Principle Component Analysis and K-Means Clustering +% +% Instructions +% ------------ +% +% This file contains code that helps you get started on the +% exercise. You will need to complete the following functions: +% +% pca.m +% projectData.m +% recoverData.m +% computeCentroids.m +% findClosestCentroids.m +% kMeansInitCentroids.m +% +% For this exercise, you will not need to change any code in this file, +% or any other files other than those mentioned above. +% + +%% Initialization +clear ; close all; clc + +%% ================= Part 1: Find Closest Centroids ==================== +% To help you implement K-Means, we have divided the learning algorithm +% into two functions -- findClosestCentroids and computeCentroids. In this +% part, you should complete the code in the findClosestCentroids function. +% +fprintf('Finding closest centroids.\n\n'); + +% Load an example dataset that we will be using +load('ex7data2.mat'); + +% Select an initial set of centroids +K = 3; % 3 Centroids +initial_centroids = [3 3; 6 2; 8 5]; + +% Find the closest centroids for the examples using the +% initial_centroids +idx = findClosestCentroids(X, initial_centroids); + +fprintf('Closest centroids for the first 3 examples: \n') +fprintf(' %d', idx(1:3)); +fprintf('\n(the closest centroids should be 1, 3, 2 respectively)\n'); + +fprintf('Program paused. Press enter to continue.\n'); +pause; + +%% ===================== Part 2: Compute Means ========================= +% After implementing the closest centroids function, you should now +% complete the computeCentroids function. +% +fprintf('\nComputing centroids means.\n\n'); + +% Compute means based on the closest centroids found in the previous part. +centroids = computeCentroids(X, idx, K); + +fprintf('Centroids computed after initial finding of closest centroids: \n') +fprintf(' %f %f \n' , centroids'); +fprintf('\n(the centroids should be\n'); +fprintf(' [ 2.428301 3.157924 ]\n'); +fprintf(' [ 5.813503 2.633656 ]\n'); +fprintf(' [ 7.119387 3.616684 ]\n\n'); + +fprintf('Program paused. Press enter to continue.\n'); +pause; + + +%% =================== Part 3: K-Means Clustering ====================== +% After you have completed the two functions computeCentroids and +% findClosestCentroids, you have all the necessary pieces to run the +% kMeans algorithm. In this part, you will run the K-Means algorithm on +% the example dataset we have provided. +% +fprintf('\nRunning K-Means clustering on example dataset.\n\n'); + +% Load an example dataset +load('ex7data2.mat'); + +% Settings for running K-Means +K = 3; +max_iters = 10; + +% For consistency, here we set centroids to specific values +% but in practice you want to generate them automatically, such as by +% settings them to be random examples (as can be seen in +% kMeansInitCentroids). +initial_centroids = [3 3; 6 2; 8 5]; + +% Run K-Means algorithm. The 'true' at the end tells our function to plot +% the progress of K-Means +[centroids, idx] = runkMeans(X, initial_centroids, max_iters, true); +fprintf('\nK-Means Done.\n\n'); + +fprintf('Program paused. Press enter to continue.\n'); +pause; + +%% ============= Part 4: K-Means Clustering on Pixels =============== +% In this exercise, you will use K-Means to compress an image. To do this, +% you will first run K-Means on the colors of the pixels in the image and +% then you will map each pixel onto its closest centroid. +% +% You should now complete the code in kMeansInitCentroids.m +% + +fprintf('\nRunning K-Means clustering on pixels from an image.\n\n'); + +% Load an image of a bird +A = double(imread('bird_small.mat')); + +% If imread does not work for you, you can try instead +% load ('bird_small.mat'); + +A = A / 255; % Divide by 255 so that all values are in the range 0 - 1 + +% Size of the image +img_size = size(A); + +% Reshape the image into an Nx3 matrix where N = number of pixels. +% Each row will contain the Red, Green and Blue pixel values +% This gives us our dataset matrix X that we will use K-Means on. +X = reshape(A, img_size(1) * img_size(2), 3); + +% Run your K-Means algorithm on this data +% You should try different values of K and max_iters here +K = 16; +max_iters = 10; + +% When using K-Means, it is important the initialize the centroids +% randomly. +% You should complete the code in kMeansInitCentroids.m before proceeding +initial_centroids = kMeansInitCentroids(X, K); + +% Run K-Means +[centroids, idx] = runkMeans(X, initial_centroids, max_iters); + +fprintf('Program paused. Press enter to continue.\n'); +pause; + + +%% ================= Part 5: Image Compression ====================== +% In this part of the exercise, you will use the clusters of K-Means to +% compress an image. To do this, we first find the closest clusters for +% each example. After that, we + +fprintf('\nApplying K-Means to compress an image.\n\n'); + +% Find closest cluster members +idx = findClosestCentroids(X, centroids); + +% Essentially, now we have represented the image X as in terms of the +% indices in idx. + +% We can now recover the image from the indices (idx) by mapping each pixel +% (specified by its index in idx) to the centroid value +X_recovered = centroids(idx,:); + +% Reshape the recovered image into proper dimensions +X_recovered = reshape(X_recovered, img_size(1), img_size(2), 3); + +% Display the original image +subplot(1, 2, 1); +imagesc(A); +title('Original'); + +% Display compressed image side by side +subplot(1, 2, 2); +imagesc(X_recovered) +title(sprintf('Compressed, with %d colors.', K)); + + +fprintf('Program paused. Press enter to continue.\n'); +pause; + diff --git a/Programming Assignment/Week4/Sumanth_week4/ex7_pca.m b/Programming Assignment/Week4/Sumanth_week4/ex7_pca.m new file mode 100644 index 0000000..701cd52 --- /dev/null +++ b/Programming Assignment/Week4/Sumanth_week4/ex7_pca.m @@ -0,0 +1,235 @@ +%% Machine Learning Online Class +% Exercise 7 | Principle Component Analysis and K-Means Clustering +% +% Instructions +% ------------ +% +% This file contains code that helps you get started on the +% exercise. You will need to complete the following functions: +% +% pca.m +% projectData.m +% recoverData.m +% computeCentroids.m +% findClosestCentroids.m +% kMeansInitCentroids.m +% +% For this exercise, you will not need to change any code in this file, +% or any other files other than those mentioned above. +% + +%% Initialization +clear ; close all; clc + +%% ================== Part 1: Load Example Dataset =================== +% We start this exercise by using a small dataset that is easily to +% visualize +% +fprintf('Visualizing example dataset for PCA.\n\n'); + +% The following command loads the dataset. You should now have the +% variable X in your environment +load ('ex7data1.mat'); + +% Visualize the example dataset +plot(X(:, 1), X(:, 2), 'bo'); +axis([0.5 6.5 2 8]); axis square; + +fprintf('Program paused. Press enter to continue.\n'); +pause; + + +%% =============== Part 2: Principal Component Analysis =============== +% You should now implement PCA, a dimension reduction technique. You +% should complete the code in pca.m +% +fprintf('\nRunning PCA on example dataset.\n\n'); + +% Before running PCA, it is important to first normalize X +[X_norm, mu, sigma] = featureNormalize(X); + +% Run PCA +[U, S] = pca(X_norm); + +% Compute mu, the mean of the each feature + +% Draw the eigenvectors centered at mean of data. These lines show the +% directions of maximum variations in the dataset. +hold on; +drawLine(mu, mu + 1.5 * S(1,1) * U(:,1)', '-k', 'LineWidth', 2); +drawLine(mu, mu + 1.5 * S(2,2) * U(:,2)', '-k', 'LineWidth', 2); +hold off; + +fprintf('Top eigenvector: \n'); +fprintf(' U(:,1) = %f %f \n', U(1,1), U(2,1)); +fprintf('\n(you should expect to see -0.707107 -0.707107)\n'); + +fprintf('Program paused. Press enter to continue.\n'); +pause; + + +%% =================== Part 3: Dimension Reduction =================== +% You should now implement the projection step to map the data onto the +% first k eigenvectors. The code will then plot the data in this reduced +% dimensional space. This will show you what the data looks like when +% using only the corresponding eigenvectors to reconstruct it. +% +% You should complete the code in projectData.m +% +fprintf('\nDimension reduction on example dataset.\n\n'); + +% Plot the normalized dataset (returned from pca) +plot(X_norm(:, 1), X_norm(:, 2), 'bo'); +axis([-4 3 -4 3]); axis square + +% Project the data onto K = 1 dimension +K = 1; +Z = projectData(X_norm, U, K); +fprintf('Projection of the first example: %f\n', Z(1)); +fprintf('\n(this value should be about 1.481274)\n\n'); + +X_rec = recoverData(Z, U, K); +fprintf('Approximation of the first example: %f %f\n', X_rec(1, 1), X_rec(1, 2)); +fprintf('\n(this value should be about -1.047419 -1.047419)\n\n'); + +% Draw lines connecting the projected points to the original points +hold on; +plot(X_rec(:, 1), X_rec(:, 2), 'ro'); +for i = 1:size(X_norm, 1) + drawLine(X_norm(i,:), X_rec(i,:), '--k', 'LineWidth', 1); +end +hold off + +fprintf('Program paused. Press enter to continue.\n'); +pause; + +%% =============== Part 4: Loading and Visualizing Face Data ============= +% We start the exercise by first loading and visualizing the dataset. +% The following code will load the dataset into your environment +% +fprintf('\nLoading face dataset.\n\n'); + +% Load Face dataset +load ('ex7faces.mat') + +% Display the first 100 faces in the dataset +displayData(X(1:100, :)); + +fprintf('Program paused. Press enter to continue.\n'); +pause; + +%% =========== Part 5: PCA on Face Data: Eigenfaces =================== +% Run PCA and visualize the eigenvectors which are in this case eigenfaces +% We display the first 36 eigenfaces. +% +fprintf(['\nRunning PCA on face dataset.\n' ... + '(this might take a minute or two ...)\n\n']); + +% Before running PCA, it is important to first normalize X by subtracting +% the mean value from each feature +[X_norm, mu, sigma] = featureNormalize(X); + +% Run PCA +[U, S] = pca(X_norm); + +% Visualize the top 36 eigenvectors found +displayData(U(:, 1:36)'); + +fprintf('Program paused. Press enter to continue.\n'); +pause; + + +%% ============= Part 6: Dimension Reduction for Faces ================= +% Project images to the eigen space using the top k eigenvectors +% If you are applying a machine learning algorithm +fprintf('\nDimension reduction for face dataset.\n\n'); + +K = 100; +Z = projectData(X_norm, U, K); + +fprintf('The projected data Z has a size of: ') +fprintf('%d ', size(Z)); + +fprintf('\n\nProgram paused. Press enter to continue.\n'); +pause; + +%% ==== Part 7: Visualization of Faces after PCA Dimension Reduction ==== +% Project images to the eigen space using the top K eigen vectors and +% visualize only using those K dimensions +% Compare to the original input, which is also displayed + +fprintf('\nVisualizing the projected (reduced dimension) faces.\n\n'); + +K = 100; +X_rec = recoverData(Z, U, K); + +% Display normalized data +subplot(1, 2, 1); +displayData(X_norm(1:100,:)); +title('Original faces'); +axis square; + +% Display reconstructed data from only k eigenfaces +subplot(1, 2, 2); +displayData(X_rec(1:100,:)); +title('Recovered faces'); +axis square; + +fprintf('Program paused. Press enter to continue.\n'); +pause; + + +%% === Part 8(a): Optional (ungraded) Exercise: PCA for Visualization === +% One useful application of PCA is to use it to visualize high-dimensional +% data. In the last K-Means exercise you ran K-Means on 3-dimensional +% pixel colors of an image. We first visualize this output in 3D, and then +% apply PCA to obtain a visualization in 2D. + +close all; close all; clc + +% Reload the image from the previous exercise and run K-Means on it +% For this to work, you need to complete the K-Means assignment first +A = double(imread('bird_small.png')); + +% If imread does not work for you, you can try instead +% load ('bird_small.mat'); + +A = A / 255; +img_size = size(A); +X = reshape(A, img_size(1) * img_size(2), 3); +K = 16; +max_iters = 10; +initial_centroids = kMeansInitCentroids(X, K); +[centroids, idx] = runkMeans(X, initial_centroids, max_iters); + +% Sample 1000 random indexes (since working with all the data is +% too expensive. If you have a fast computer, you may increase this. +sel = floor(rand(1000, 1) * size(X, 1)) + 1; + +% Setup Color Palette +palette = hsv(K); +colors = palette(idx(sel), :); + +% Visualize the data and centroid memberships in 3D +figure; +scatter3(X(sel, 1), X(sel, 2), X(sel, 3), 10, colors); +title('Pixel dataset plotted in 3D. Color shows centroid memberships'); +fprintf('Program paused. Press enter to continue.\n'); +pause; + +%% === Part 8(b): Optional (ungraded) Exercise: PCA for Visualization === +% Use PCA to project this cloud to 2D for visualization + +% Subtract the mean to use PCA +[X_norm, mu, sigma] = featureNormalize(X); + +% PCA and project the data to 2D +[U, S] = pca(X_norm); +Z = projectData(X_norm, U, 2); + +% Plot in 2D +figure; +plotDataPoints(Z(sel, :), idx(sel), K); +title('Pixel dataset plotted in 2D, using PCA for dimensionality reduction'); +fprintf('Program paused. Press enter to continue.\n'); +pause; diff --git a/Programming Assignment/Week4/Sumanth_week4/featureNormalize.m b/Programming Assignment/Week4/Sumanth_week4/featureNormalize.m new file mode 100644 index 0000000..da03bee --- /dev/null +++ b/Programming Assignment/Week4/Sumanth_week4/featureNormalize.m @@ -0,0 +1,17 @@ +function [X_norm, mu, sigma] = featureNormalize(X) +%FEATURENORMALIZE Normalizes the features in X +% FEATURENORMALIZE(X) returns a normalized version of X where +% the mean value of each feature is 0 and the standard deviation +% is 1. This is often a good preprocessing step to do when +% working with learning algorithms. + +mu = mean(X); +X_norm = bsxfun(@minus, X, mu); + +sigma = std(X_norm); +X_norm = bsxfun(@rdivide, X_norm, sigma); + + +% ============================================================ + +end diff --git a/Programming Assignment/Week4/Sumanth_week4/findClosestCentroids.m b/Programming Assignment/Week4/Sumanth_week4/findClosestCentroids.m new file mode 100644 index 0000000..3d0087a --- /dev/null +++ b/Programming Assignment/Week4/Sumanth_week4/findClosestCentroids.m @@ -0,0 +1,43 @@ +function idx = findClosestCentroids(X, centroids) +%FINDCLOSESTCENTROIDS computes the centroid memberships for every example +% idx = FINDCLOSESTCENTROIDS (X, centroids) returns the closest centroids +% in idx for a dataset X where each row is a single example. idx = m x 1 +% vector of centroid assignments (i.e. each entry in range [1..K]) +% + +% Set K +K = size(centroids, 1); + +% You need to return the following variables correctly. +idx = zeros(size(X,1), 1); + +% ====================== YOUR CODE HERE ====================== +% Instructions: Go over every example, find its closest centroid, and store +% the index inside idx at the appropriate location. +% Concretely, idx(i) should contain the index of the centroid +% closest to example i. Hence, it should be a value in the +% range 1..K +% +% Note: You can use a for-loop over the examples to compute this. + +for i = 1:size(X, 1) + min_dist = inf; + for k = 1:K + diff = X(i,:)'-centroids(k,:)'; + dist = diff'*diff; + if (dist < min_dist) + idx(i) = k; + min_dist = dist; + end + end +end + + + + + + +% ============================================================= + +end + diff --git a/Programming Assignment/Week4/Sumanth_week4/kMeansInitCentroids.m b/Programming Assignment/Week4/Sumanth_week4/kMeansInitCentroids.m new file mode 100644 index 0000000..ca884b8 --- /dev/null +++ b/Programming Assignment/Week4/Sumanth_week4/kMeansInitCentroids.m @@ -0,0 +1,28 @@ +function centroids = kMeansInitCentroids(X, K) +%KMEANSINITCENTROIDS This function initializes K centroids that are to be +%used in K-Means on the dataset X +% centroids = KMEANSINITCENTROIDS(X, K) returns K initial centroids to be +% used with the K-Means on the dataset X +% + +% You should return this values correctly +centroids = zeros(K, size(X, 2)); + +% ====================== YOUR CODE HERE ====================== +% Instructions: You should set centroids to randomly chosen examples from +% the dataset X + +randidx=randperm(size(X,1)); +centroids=X(randidx(1:K),: ); + + + + + + + + +% ============================================================= + +end + diff --git a/Programming Assignment/Week4/Sumanth_week4/pca.m b/Programming Assignment/Week4/Sumanth_week4/pca.m new file mode 100644 index 0000000..c51c965 --- /dev/null +++ b/Programming Assignment/Week4/Sumanth_week4/pca.m @@ -0,0 +1,33 @@ +function [U, S] = pca(X) +%PCA Run principal component analysis on the dataset X +% [U, S, X] = pca(X) computes eigenvectors of the covariance matrix of X +% Returns the eigenvectors U, the eigenvalues (on diagonal) in S +% + +% Useful values +[m, n] = size(X); + +% You need to return the following variables correctly. +U = zeros(n); +S = zeros(n); + +% ====================== YOUR CODE HERE ====================== +% Instructions: You should first compute the covariance matrix. Then, you +% should use the "svd" function to compute the eigenvectors +% and eigenvalues of the covariance matrix. +% +% Note: When computing the covariance matrix, remember to divide by m (the +% number of examples). + +sigma=((1/m)*(X'*X)); +[U,S,V]=svd(sigma); + + + + + + + +% ========================================================================= + +end diff --git a/Programming Assignment/Week4/Sumanth_week4/plotDataPoints.m b/Programming Assignment/Week4/Sumanth_week4/plotDataPoints.m new file mode 100644 index 0000000..77c4623 --- /dev/null +++ b/Programming Assignment/Week4/Sumanth_week4/plotDataPoints.m @@ -0,0 +1,14 @@ +function plotDataPoints(X, idx, K) +%PLOTDATAPOINTS plots data points in X, coloring them so that those with the same +%index assignments in idx have the same color +% PLOTDATAPOINTS(X, idx, K) plots data points in X, coloring them so that those +% with the same index assignments in idx have the same color + +% Create palette +palette = hsv(K + 1); +colors = palette(idx, :); + +% Plot the data +scatter(X(:,1), X(:,2), 15, colors); + +end diff --git a/Programming Assignment/Week4/Sumanth_week4/plotProgresskMeans.m b/Programming Assignment/Week4/Sumanth_week4/plotProgresskMeans.m new file mode 100644 index 0000000..f14d1c7 --- /dev/null +++ b/Programming Assignment/Week4/Sumanth_week4/plotProgresskMeans.m @@ -0,0 +1,27 @@ +function plotProgresskMeans(X, centroids, previous, idx, K, i) +%PLOTPROGRESSKMEANS is a helper function that displays the progress of +%k-Means as it is running. It is intended for use only with 2D data. +% PLOTPROGRESSKMEANS(X, centroids, previous, idx, K, i) plots the data +% points with colors assigned to each centroid. With the previous +% centroids, it also plots a line between the previous locations and +% current locations of the centroids. +% + +% Plot the examples +plotDataPoints(X, idx, K); + +% Plot the centroids as black x's +plot(centroids(:,1), centroids(:,2), 'x', ... + 'MarkerEdgeColor','k', ... + 'MarkerSize', 10, 'LineWidth', 3); + +% Plot the history of the centroids with lines +for j=1:size(centroids,1) + drawLine(centroids(j, :), previous(j, :)); +end + +% Title +title(sprintf('Iteration number %d', i)) + +end + diff --git a/Programming Assignment/Week4/Sumanth_week4/projectData.m b/Programming Assignment/Week4/Sumanth_week4/projectData.m new file mode 100644 index 0000000..964e886 --- /dev/null +++ b/Programming Assignment/Week4/Sumanth_week4/projectData.m @@ -0,0 +1,29 @@ +function Z = projectData(X, U, K) +%PROJECTDATA Computes the reduced data representation when projecting only +%on to the top k eigenvectors +% Z = projectData(X, U, K) computes the projection of +% the normalized inputs X into the reduced dimensional space spanned by +% the first K columns of U. It returns the projected examples in Z. +% + +% You need to return the following variables correctly. +Z = zeros(size(X, 1), K); + +% ====================== YOUR CODE HERE ====================== +% Instructions: Compute the projection of the data using only the top K +% eigenvectors in U (first K columns). +% For the i-th example X(i,:), the projection on to the k-th +% eigenvector is given as follows: +% x = X(i, :)'; +% projection_k = x' * U(:, k); + +U_reduce=U(:, 1:K); +for i=1:size(X,1) + x=X(i,:)'; + Z(i,:)=(U_reduce'*x); +end + + +% ============================================================= + +end diff --git a/Programming Assignment/Week4/Sumanth_week4/recoverData.m b/Programming Assignment/Week4/Sumanth_week4/recoverData.m new file mode 100644 index 0000000..5a1d4b5 --- /dev/null +++ b/Programming Assignment/Week4/Sumanth_week4/recoverData.m @@ -0,0 +1,31 @@ +function X_rec = recoverData(Z, U, K) +%RECOVERDATA Recovers an approximation of the original data when using the +%projected data +% X_rec = RECOVERDATA(Z, U, K) recovers an approximation the +% original data that has been reduced to K dimensions. It returns the +% approximate reconstruction in X_rec. +% + +% You need to return the following variables correctly. +X_rec = zeros(size(Z, 1), size(U, 1)); + +% ====================== YOUR CODE HERE ====================== +% Instructions: Compute the approximation of the data by projecting back +% onto the original space using the top K eigenvectors in U. +% +% For the i-th example Z(i,:), the (approximate) +% recovered data for dimension j is given as follows: +% v = Z(i, :)'; +% recovered_j = v' * U(j, 1:K)' +% Notice that U(j, 1:K) is a row vector. + +U_reduce = U(:,1:K); +for i = 1:size(Z, 1) + z=Z(i, :)'; + X_rec(i,:) = (U_reduce*z); +end + + +% ============================================================= + +end diff --git a/Programming Assignment/Week4/Sumanth_week4/runkMeans.m b/Programming Assignment/Week4/Sumanth_week4/runkMeans.m new file mode 100644 index 0000000..fc22c1b --- /dev/null +++ b/Programming Assignment/Week4/Sumanth_week4/runkMeans.m @@ -0,0 +1,64 @@ +function [centroids, idx] = runkMeans(X, initial_centroids, ... + max_iters, plot_progress) +%RUNKMEANS runs the K-Means algorithm on data matrix X, where each row of X +%is a single example +% [centroids, idx] = RUNKMEANS(X, initial_centroids, max_iters, ... +% plot_progress) runs the K-Means algorithm on data matrix X, where each +% row of X is a single example. It uses initial_centroids used as the +% initial centroids. max_iters specifies the total number of interactions +% of K-Means to execute. plot_progress is a true/false flag that +% indicates if the function should also plot its progress as the +% learning happens. This is set to false by default. runkMeans returns +% centroids, a Kxn matrix of the computed centroids and idx, a m x 1 +% vector of centroid assignments (i.e. each entry in range [1..K]) +% + +% Set default value for plot progress +if ~exist('plot_progress', 'var') || isempty(plot_progress) + plot_progress = false; +end + +% Plot the data if we are plotting progress +if plot_progress + figure; + hold on; +end + +% Initialize values +[m n] = size(X); +K = size(initial_centroids, 1); +centroids = initial_centroids; +previous_centroids = centroids; +idx = zeros(m, 1); + +% Run K-Means +for i=1:max_iters + + % Output progress + fprintf('K-Means iteration %d/%d...\n', i, max_iters); + if exist('OCTAVE_VERSION') + fflush(stdout); + end + + % For each example in X, assign it to the closest centroid + idx = findClosestCentroids(X, centroids); + + % Optionally, plot progress here + if plot_progress + plotProgresskMeans(X, centroids, previous_centroids, idx, K, i); + previous_centroids = centroids; + fprintf('Press enter to continue.\n'); + pause; + end + + % Given the memberships, compute new centroids + centroids = computeCentroids(X, idx, K); +end + +% Hold off if we are plotting progress +if plot_progress + hold off; +end + +end +