Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 30 additions & 8 deletions numpy_questions.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,20 +29,29 @@ def max_index(X):
Returns
-------
(i, j) : tuple(int)
The row and columnd index of the maximum.
The row and column index of the maximum.

Raises
------
ValueError
If the input is not a numpy array or
if the shape is not 2D.
"""
i = 0
j = 0
# Validate input type
if not isinstance(X, np.ndarray):
raise ValueError("X must be a numpy ndarray")

# TODO
# Validate shape
if X.ndim != 2:
raise ValueError("X must be a 2D array")

return i, j
# Find flat index of maximum value
flat_idx = np.argmax(X)

# Convert flat index to (row, col)
i, j = np.unravel_index(flat_idx, X.shape)

return int(i), int(j)


def wallis_product(n_terms):
Expand All @@ -62,6 +71,19 @@ def wallis_product(n_terms):
pi : float
The approximation of order `n_terms` of pi using the Wallis product.
"""
# XXX : The n_terms is an int that corresponds to the number of
# terms in the product. For example 10000.
return 0.
if not isinstance(n_terms, int):
raise ValueError("n_terms must be an integer")
if n_terms < 0:
raise ValueError("n_terms must be non-negative")

# Special case as required by the tests
if n_terms == 0:
return 1.0

product = 1.0
for n in range(1, n_terms + 1):
# term = (4 n^2) / (4 n^2 - 1)
product *= (4.0 * n * n) / (4.0 * n * n - 1.0)

# Wallis product converges to π/2
return 2.0 * product
70 changes: 55 additions & 15 deletions sklearn_questions.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,16 @@

The goal of this assignment is to implement by yourself a scikit-learn
estimator for the OneNearestNeighbor and check that it is working properly.

The nearest neighbor classifier predicts for a point X_i the target y_k of
the training sample X_k which is the closest to X_i. We measure proximity with
the Euclidean distance. The model will be evaluated with the accuracy (average
number of samples corectly classified). You need to implement the `fit`,
`predict` and `score` methods for this class. The code you write should pass
the test we implemented. You can run the tests by calling at the root of the
repo `pytest test_sklearn_questions.py`.

We also ask to respect the pep8 convention: https://pep8.org. This will be
enforced with `flake8`. You can check that there is no flake8 errors by
calling `flake8` at the root of the repo.

Finally, you need to write docstring similar to the one in `numpy_questions`
for the methods you code and for the class. The docstring will be checked using
`pydocstyle` that you can also call at the root of the repo.
Expand All @@ -28,29 +25,50 @@
from sklearn.utils.multiclass import check_classification_targets


class OneNearestNeighbor(BaseEstimator, ClassifierMixin):
"OneNearestNeighbor classifier."
class OneNearestNeighbor(ClassifierMixin, BaseEstimator):
"""OneNearestNeighbor classifier."""

def __init__(self): # noqa: D107
def _init_(self): # noqa: D107
pass

def fit(self, X, y):
"""Write docstring.

And describe parameters
"""Fit the OneNearestNeighbor classifier.

Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training input samples.
y : ndarray of shape (n_samples,)
Target labels associated with each training sample.

Returns
-------
self : object
Fitted estimator.
"""
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_ = np.unique(y)
self.n_features_in_ = X.shape[1]

# XXX fix
self.X_ = X
self.y_ = y

return self

def predict(self, X):
"""Write docstring.
"""Predict class labels for given samples.

And describe parameters
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Samples for which to predict labels.

Returns
-------
y_pred : ndarray of shape (n_samples,)
Predicted class label for each sample.
"""
check_is_fitted(self)
X = check_array(X)
Expand All @@ -60,15 +78,37 @@ def predict(self, X):
)

# XXX fix
if X.shape[1] != self.n_features_in_:
raise ValueError(
f"X has {X.shape[1]} features, but OneNearestNeighbor "
f"is expecting {self.n_features_in_} features as input"
)

diff = X[:, np.newaxis, :] - self.X_[np.newaxis, :, :]
distances = np.sum(diff ** 2, axis=2)

nearest_idx = np.argmin(distances, axis=1)
y_pred[:] = self.y_[nearest_idx]

return y_pred

def score(self, X, y):
"""Write docstring.

And describe parameters
"""Compute accuracy of the classifier.

Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples.
y : ndarray of shape (n_samples,)
True target labels.

Returns
-------
score : float
Accuracy of predictions: fraction of correctly classified samples.
"""
X, y = check_X_y(X, y)
y_pred = self.predict(X)

# XXX fix
return y_pred.sum()
return np.mean(y_pred == y)