From 57fc13da04669a22a07047669b9d3752b73d18b4 Mon Sep 17 00:00:00 2001 From: Julia Rouzou Date: Sat, 15 Nov 2025 10:39:42 +0100 Subject: [PATCH 1/3] Completed numpy assignment-Julia Rouzou --- numpy_questions.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/numpy_questions.py b/numpy_questions.py index 21fcec4b..85857401 100644 --- a/numpy_questions.py +++ b/numpy_questions.py @@ -40,10 +40,15 @@ def max_index(X): i = 0 j = 0 - # TODO + if not isinstance(X, np.ndarray): + raise ValueError("Input must be a numpy array.") - return i, j + if X.ndim != 2: + raise ValueError("Input array must be 2-dimensional.") + flat_index = np.argmax(X) + i, j = np.unravel_index(flat_index, X.shape) + return i, j def wallis_product(n_terms): """Implement the Wallis product to compute an approximation of pi. @@ -62,6 +67,12 @@ def wallis_product(n_terms): pi : float The approximation of order `n_terms` of pi using the Wallis product. """ - # XXX : The n_terms is an int that corresponds to the number of - # terms in the product. For example 10000. - return 0. + if n_terms < 0: + raise ValueError("n_terms must be non-negative.") + + product = 1.0 + for n in range(1, n_terms + 1): + product *= (4 * n ** 2) / (4 * n ** 2 - 1) + + return 2 * product + From f5e1483b368233bae7961da8c08c462fb6f33e02 Mon Sep 17 00:00:00 2001 From: Julia Rouzou Date: Sat, 15 Nov 2025 11:03:53 +0100 Subject: [PATCH 2/3] Update numpy and sklearn assignments with fixes --- numpy_questions.py | 8 ++++---- sklearn_questions.py | 21 +++++++++++---------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/numpy_questions.py b/numpy_questions.py index 85857401..1a4f9b01 100644 --- a/numpy_questions.py +++ b/numpy_questions.py @@ -50,12 +50,10 @@ def max_index(X): i, j = np.unravel_index(flat_index, X.shape) return i, j + def wallis_product(n_terms): """Implement the Wallis product to compute an approximation of pi. - See: - https://en.wikipedia.org/wiki/Wallis_product - Parameters ---------- n_terms : int @@ -74,5 +72,7 @@ def wallis_product(n_terms): for n in range(1, n_terms + 1): product *= (4 * n ** 2) / (4 * n ** 2 - 1) - return 2 * product + if n_terms == 0: + return 1.0 + return 2 * product diff --git a/sklearn_questions.py b/sklearn_questions.py index f65038c6..089e94c4 100644 --- a/sklearn_questions.py +++ b/sklearn_questions.py @@ -23,13 +23,13 @@ from sklearn.base import BaseEstimator from sklearn.base import ClassifierMixin from sklearn.utils.validation import check_X_y -from sklearn.utils.validation import check_array from sklearn.utils.validation import check_is_fitted from sklearn.utils.multiclass import check_classification_targets +from sklearn.utils.validation import validate_data -class OneNearestNeighbor(BaseEstimator, ClassifierMixin): - "OneNearestNeighbor classifier." +class OneNearestNeighbor(ClassifierMixin, BaseEstimator): + """OneNearestNeighbor classifier.""" def __init__(self): # noqa: D107 pass @@ -41,10 +41,10 @@ def fit(self, X, y): """ X, y = check_X_y(X, y) check_classification_targets(y) + self.X_ = X + self.y_ = y self.classes_ = np.unique(y) self.n_features_in_ = X.shape[1] - - # XXX fix return self def predict(self, X): @@ -53,13 +53,16 @@ def predict(self, X): And describe parameters """ check_is_fitted(self) - X = check_array(X) + X = validate_data(self, X, reset=False) y_pred = np.full( shape=len(X), fill_value=self.classes_[0], dtype=self.classes_.dtype ) - # XXX fix + for i, x_test in enumerate(X): + distances = np.linalg.norm(self.X_ - x_test, axis=1) + nearest_index = np.argmin(distances) + y_pred[i] = self.y_[nearest_index] return y_pred def score(self, X, y): @@ -69,6 +72,4 @@ def score(self, X, y): """ X, y = check_X_y(X, y) y_pred = self.predict(X) - - # XXX fix - return y_pred.sum() + return np.mean(y_pred == y) From b82bea1b8cdf80bec52d20d31345f3f5b8edc2b1 Mon Sep 17 00:00:00 2001 From: Julia Rouzou Date: Sat, 15 Nov 2025 19:06:08 +0100 Subject: [PATCH 3/3] Julia Rouzou did functions --- sklearn_questions.py | 99 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 77 insertions(+), 22 deletions(-) diff --git a/sklearn_questions.py b/sklearn_questions.py index 089e94c4..e709c988 100644 --- a/sklearn_questions.py +++ b/sklearn_questions.py @@ -6,69 +6,124 @@ The nearest neighbor classifier predicts for a point X_i the target y_k of the training sample X_k which is the closest to X_i. We measure proximity with the Euclidean distance. The model will be evaluated with the accuracy (average -number of samples corectly classified). You need to implement the `fit`, -`predict` and `score` methods for this class. The code you write should pass +number of samples corectly classified). You need to implement the fit, +predict and score methods for this class. The code you write should pass the test we implemented. You can run the tests by calling at the root of the -repo `pytest test_sklearn_questions.py`. +repo pytest test_sklearn_questions.py. We also ask to respect the pep8 convention: https://pep8.org. This will be -enforced with `flake8`. You can check that there is no flake8 errors by -calling `flake8` at the root of the repo. +enforced with flake8. You can check that there is no flake8 errors by +calling flake8 at the root of the repo. -Finally, you need to write docstring similar to the one in `numpy_questions` +Finally, you need to write docstring similar to the one in numpy_questions for the methods you code and for the class. The docstring will be checked using -`pydocstyle` that you can also call at the root of the repo. +pydocstyle that you can also call at the root of the repo. """ import numpy as np from sklearn.base import BaseEstimator from sklearn.base import ClassifierMixin from sklearn.utils.validation import check_X_y +from sklearn.utils.validation import check_array from sklearn.utils.validation import check_is_fitted from sklearn.utils.multiclass import check_classification_targets -from sklearn.utils.validation import validate_data class OneNearestNeighbor(ClassifierMixin, BaseEstimator): """OneNearestNeighbor classifier.""" - def __init__(self): # noqa: D107 + def __init__(self): + """Initialize the OneNearestNeighbor classifier.""" pass def fit(self, X, y): - """Write docstring. + """Fit the OneNearestNeighbor classifier. - And describe parameters + This method saves the training samples and their labels, + used to predict the label of a test sample. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Training data. Each row represents one sample, and each column + represents a feature. + + y : ndarray of shape (n_samples,) + Target labels corresponding to the training samples. Must be valid + classification targets (e.g., integers or strings). + + Returns + ------- + self : OneNearestNeighbor + The fitted OneNearestNeighbor classifier. """ X, y = check_X_y(X, y) check_classification_targets(y) - self.X_ = X - self.y_ = y self.classes_ = np.unique(y) self.n_features_in_ = X.shape[1] + + self.X_ = X + self.y_ = y + return self def predict(self, X): - """Write docstring. + """Predict class labels for the given samples. - And describe parameters + For each input sample, the label of the closest training sample + is returned based on Euclidean distance. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Test data, where each row corresponds to a sample and each column + corresponds to a feature. + + Returns + ------- + y_pred : ndarray of shape (n_samples,) + Predicted class labels for each input sample. """ check_is_fitted(self) - X = validate_data(self, X, reset=False) + X = check_array(X) + + if X.shape[1] != self.n_features_in_: + raise ValueError( + f"X has {X.shape[1]} features, but " + f"{self.__class__.__name__} is expecting " + f"{self.n_features_in_} features as input" + ) + y_pred = np.full( shape=len(X), fill_value=self.classes_[0], dtype=self.classes_.dtype ) - for i, x_test in enumerate(X): - distances = np.linalg.norm(self.X_ - x_test, axis=1) - nearest_index = np.argmin(distances) - y_pred[i] = self.y_[nearest_index] + euc_dist = np.sqrt(((X[None, :, :] - self.X_[:, None, :])**2) + .sum(axis=2)) + nearest_point = np.argmin(euc_dist, axis=0) + y_pred = self.y_[nearest_point] + return y_pred def score(self, X, y): - """Write docstring. + """Compute the accuracy of the classifier on test data. + + This method predicts the labels and compares + them to the true labels + to calculate the fraction of correctly classified samples. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Test samples, row represents a sample and column a feature. + + y : ndarray of shape (n_samples,) + True labels corresponding to the test samples. - And describe parameters + Returns + ------- + accuracy : float + Proportion of samples that were correctly classified. """ X, y = check_X_y(X, y) y_pred = self.predict(X)