Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@
include_package_data=True,
install_requires=install_requires,
extras_require={
"tests": ["flake8", "pytest", "coverage", "pre-commit"],
"tests": ["flake8", "pytest", "coverage", "pre-commit", "hypothesis", "distutils"],
"sparkml": ["pyspark>=2.4.4", "pyarrow>1.0"],
"onnx": onnx_requires,
"extra": extra_requires,
Expand Down
2 changes: 2 additions & 0 deletions tests/test_onnxml_decision_tree_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from onnxconverter_common.data_types import FloatTensorType
from distutils.version import LooseVersion

from hummingbird.ml import convert
from hummingbird.ml._utils import onnx_ml_tools_installed, onnx_runtime_installed, xgboost_installed
Expand Down Expand Up @@ -263,6 +264,7 @@ def test_random_forest_multi_random(self):
not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason="ONNXML test require ONNX, ORT and ONNXMLTOOLS"
)
@unittest.skipIf(not xgboost_installed(), reason="ONNXML BRANCH_LT test requires XGBoost")
@unittest.skipIf(LooseVersion(xgb.__version__) > LooseVersion("2.0"), reason="Unsupported XGBoost version")
def test_xgboost_branch_lt(self):
warnings.filterwarnings("ignore")
n_features = 28
Expand Down
15 changes: 7 additions & 8 deletions tests/test_xgboost_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

if xgboost_installed():
import xgboost as xgb
from xgboost import testing as tm

if pandas_installed():
import pandas as pd
Expand Down Expand Up @@ -98,13 +99,11 @@ def test_xgb_perf_tree_trav_multi_classifier_converter(self):
def _run_xgb_ranker_converter(self, num_classes, extra_config={}):
warnings.filterwarnings("ignore")
for max_depth in [1, 3, 8, 10, 12]:
model = xgb.XGBRanker(n_estimators=10, max_depth=max_depth)
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
y = np.random.randint(num_classes, size=100)
model = xgb.XGBRanker(n_estimators=10, max_depth=max_depth, ndcg_exp_gain=True) # ndcg is default
X, y, q, w = tm.make_ltr(n_samples=1024, n_features=num_classes, n_query_groups=3, max_rel=3)

model.fit(X, y, group=[X.shape[0]])
model.fit(X, y, qid=q, sample_weight=w, eval_set=[(X, y)],
eval_qid=(q,), sample_weight_eval_set=(w,), verbose=True)

torch_model = hummingbird.ml.convert(model, "torch", X, extra_config=extra_config)
self.assertIsNotNone(torch_model)
Expand Down Expand Up @@ -256,7 +255,7 @@ def test_xgb_regressor_converter_torchscript(self):
import torch

for max_depth in [1, 3, 8, 10, 12]:
model = xgb.XGBRegressor(n_estimators=10, max_depth=max_depth)
model = xgb.XGBRegressor(n_estimators=10, max_depth=max_depth, objective="rank:pairwise")
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
Expand All @@ -275,7 +274,7 @@ def test_xgb_classifier_converter_torchscript(self):
import torch

for max_depth in [1, 3, 8, 10, 12]:
model = xgb.XGBClassifier(n_estimators=10, max_depth=max_depth)
model = xgb.XGBClassifier(n_estimators=10, max_depth=max_depth, objective="rank:pairwise")
np.random.seed(0)
X = np.random.rand(100, 200)
X = np.array(X, dtype=np.float32)
Expand Down