Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 17 additions & 6 deletions numpy_questions.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,13 @@ def max_index(X):
If the input is not a numpy array or
if the shape is not 2D.
"""
i = 0
j = 0
if not isinstance(X, np.ndarray):
raise ValueError("X must be a numpy ndarray.")
if X.ndim != 2:
raise ValueError("X must be a 2D array.")

# TODO
flat_idx = np.argmax(X)
i, j = np.unravel_index(flat_idx, X.shape)

return i, j

Expand All @@ -62,6 +65,14 @@ def wallis_product(n_terms):
pi : float
The approximation of order `n_terms` of pi using the Wallis product.
"""
# XXX : The n_terms is an int that corresponds to the number of
# terms in the product. For example 10000.
return 0.
if not isinstance(n_terms, int) or n_terms < 0:
raise ValueError("n_terms must be a non-negative integer.")

if n_terms == 0:
return 1.0

k = np.arange(1, n_terms + 1, dtype=float)
terms = (4 * k ** 2) / (4 * k ** 2 - 1)
product = np.prod(terms)

return 2 * product
80 changes: 70 additions & 10 deletions sklearn_questions.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,47 +28,107 @@
from sklearn.utils.multiclass import check_classification_targets


class OneNearestNeighbor(BaseEstimator, ClassifierMixin):
"OneNearestNeighbor classifier."
class OneNearestNeighbor(ClassifierMixin, BaseEstimator):
"""OneNearestNeighbor classifier.

A simple classifier that assigns to each sample the label of the
closest training sample according to the Euclidean distance.
"""

def __init__(self): # noqa: D107
"""Initialize the estimator."""
pass

def fit(self, X, y):
"""Write docstring.

And describe parameters

Fit the nearest neighbor classifier.

Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data.

y : ndarray of shape (n_samples,)
Target labels.

Returns
-------
self : object
Fitted estimator.

"""
X, y = check_X_y(X, y)
check_classification_targets(y)
self.X_ = X
self.y_ = y
self.classes_ = np.unique(y)
self.n_features_in_ = X.shape[1]

# XXX fix
return self

def predict(self, X):
"""Write docstring.

And describe parameters

Predict class labels for samples in X.

Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples.

Returns
-------
y_pred : ndarray of shape (n_samples,)
Predicted labels.
"""
check_is_fitted(self)
X = check_array(X)

# Vérifier la cohérence du nombre de features
if X.shape[1] != self.n_features_in_:
raise ValueError(
f"X has {X.shape[1]} features, but "
f"{self.__class__.__name__} is expecting "
f"{self.n_features_in_} features as input"
)

y_pred = np.full(
shape=len(X), fill_value=self.classes_[0],
dtype=self.classes_.dtype
shape=len(X),
fill_value=self.classes_[0],
dtype=self.classes_.dtype,
)

# XXX fix
for i, x in enumerate(X):
distances = np.linalg.norm(self.X_ - x, axis=1)
nearest_idx = np.argmin(distances)
y_pred[i] = self.y_[nearest_idx]

return y_pred

def score(self, X, y):
"""Write docstring.

Compute accuracy of the classifier.

Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples.

y : ndarray of shape (n_samples,)
True labels.

Returns
-------
accuracy : float
Proportion of correctly classified samples.

And describe parameters
"""
X, y = check_X_y(X, y)
y_pred = self.predict(X)

# XXX fix
return y_pred.sum()
return np.mean(y_pred == y)