diff --git a/numpy_questions.py b/numpy_questions.py index 21fcec4b..72645158 100644 --- a/numpy_questions.py +++ b/numpy_questions.py @@ -37,11 +37,12 @@ def max_index(X): If the input is not a numpy array or if the shape is not 2D. """ - i = 0 - j = 0 - - # TODO - + if not isinstance(X, np.ndarray): + raise ValueError("Input is not a numpy array.") + if X.ndim != 2: + raise ValueError("Input is not a 2D array.") + flat_index = np.argmax(X) + i, j = np.unravel_index(flat_index, X.shape) return i, j @@ -62,6 +63,16 @@ def wallis_product(n_terms): pi : float The approximation of order `n_terms` of pi using the Wallis product. """ - # XXX : The n_terms is an int that corresponds to the number of - # terms in the product. For example 10000. - return 0. + if not isinstance(n_terms, int): + raise ValueError("n_terms must be an integer.") + if n_terms < 0: + raise ValueError("n_terms must be non-negative.") + if n_terms == 0: + pi = 1.0 + else: + product_terms = np.arange(1, n_terms + 1) + numerator = 4 * (product_terms ** 2) + denominator = numerator - 1 + pi = np.prod(numerator / denominator)*2 + + return pi diff --git a/sklearn_questions.py b/sklearn_questions.py index f65038c6..362fc58a 100644 --- a/sklearn_questions.py +++ b/sklearn_questions.py @@ -19,6 +19,7 @@ for the methods you code and for the class. The docstring will be checked using `pydocstyle` that you can also call at the root of the repo. """ + import numpy as np from sklearn.base import BaseEstimator from sklearn.base import ClassifierMixin @@ -28,47 +29,75 @@ from sklearn.utils.multiclass import check_classification_targets -class OneNearestNeighbor(BaseEstimator, ClassifierMixin): - "OneNearestNeighbor classifier." +class OneNearestNeighbor(ClassifierMixin, BaseEstimator): + """One Nearest Neighbor classifier using Euclidean distance. + + This classifier assigns to each input sample the label of its closest + training sample, + where closeness is measured using the Euclidean distance. + """ def __init__(self): # noqa: D107 pass def fit(self, X, y): - """Write docstring. + """Fit the OneNearestNeighbor classifier from the training data. - And describe parameters + Parameters + X: ndarray of shape (n_samples, n_features) containing training data. + y: ndarray of shape (n_samples,). + It contains target labels corresponding to the training samples. + + Returns + self : OneNearestNeighbor + The fitted classifier. """ X, y = check_X_y(X, y) check_classification_targets(y) self.classes_ = np.unique(y) self.n_features_in_ = X.shape[1] - - # XXX fix + self.X_ = X + self.y_ = y return self def predict(self, X): - """Write docstring. + """Predict class labels for the input samples. - And describe parameters + Parameters + X: ndarray of shape (n_samples, n_features) + containing input samples for which predictions are requested. + It must contain the same number of features as the training data. + + Returns + y_pred: ndarray of shape (n_samples,), + containing predicted class labels for each input sample. """ check_is_fitted(self) X = check_array(X) + X = self._validate_data(X, reset=False) y_pred = np.full( shape=len(X), fill_value=self.classes_[0], dtype=self.classes_.dtype ) + differences = (X[:, np.newaxis, :] - self.X_[np.newaxis, :, :]) + distances = np.sqrt(np.sum(differences ** 2, axis=2)) + nearest_indices = np.argmin(distances, axis=1) + y_pred = self.y_[nearest_indices] - # XXX fix return y_pred def score(self, X, y): - """Write docstring. + """Compute the accuracy of the classifier. + + Parameters + X: ndarray of shape (n_samples, n_features) containing test samples. + y: array-like of shape (n_samples,) + containing true labels for the test samples. - And describe parameters + Returns + accuracy: The fraction of correctly classified samples. """ X, y = check_X_y(X, y) y_pred = self.predict(X) - - # XXX fix - return y_pred.sum() + score = np.sum(y_pred == y) + return score / len(y)