diff --git a/setup.py b/setup.py index b64b9215..8eb0f9c1 100644 --- a/setup.py +++ b/setup.py @@ -55,7 +55,7 @@ include_package_data=True, install_requires=install_requires, extras_require={ - "tests": ["flake8", "pytest", "coverage", "pre-commit"], + "tests": ["flake8", "pytest", "coverage", "pre-commit", "hypothesis", "distutils"], "sparkml": ["pyspark>=2.4.4", "pyarrow>1.0"], "onnx": onnx_requires, "extra": extra_requires, diff --git a/tests/test_onnxml_decision_tree_converter.py b/tests/test_onnxml_decision_tree_converter.py index 567825aa..0c1b9503 100644 --- a/tests/test_onnxml_decision_tree_converter.py +++ b/tests/test_onnxml_decision_tree_converter.py @@ -8,6 +8,7 @@ from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor from onnxconverter_common.data_types import FloatTensorType +from distutils.version import LooseVersion from hummingbird.ml import convert from hummingbird.ml._utils import onnx_ml_tools_installed, onnx_runtime_installed, xgboost_installed @@ -263,6 +264,7 @@ def test_random_forest_multi_random(self): not (onnx_ml_tools_installed() and onnx_runtime_installed()), reason="ONNXML test require ONNX, ORT and ONNXMLTOOLS" ) @unittest.skipIf(not xgboost_installed(), reason="ONNXML BRANCH_LT test requires XGBoost") + @unittest.skipIf(LooseVersion(xgb.__version__) > LooseVersion("2.0"), reason="Unsupported XGBoost version") def test_xgboost_branch_lt(self): warnings.filterwarnings("ignore") n_features = 28 diff --git a/tests/test_xgboost_converter.py b/tests/test_xgboost_converter.py index 42abaf31..b6f8eb3d 100644 --- a/tests/test_xgboost_converter.py +++ b/tests/test_xgboost_converter.py @@ -15,6 +15,7 @@ if xgboost_installed(): import xgboost as xgb + from xgboost import testing as tm if pandas_installed(): import pandas as pd @@ -98,13 +99,11 @@ def test_xgb_perf_tree_trav_multi_classifier_converter(self): def _run_xgb_ranker_converter(self, num_classes, extra_config={}): warnings.filterwarnings("ignore") for max_depth in [1, 3, 8, 10, 12]: - model = xgb.XGBRanker(n_estimators=10, max_depth=max_depth) - np.random.seed(0) - X = np.random.rand(100, 200) - X = np.array(X, dtype=np.float32) - y = np.random.randint(num_classes, size=100) + model = xgb.XGBRanker(n_estimators=10, max_depth=max_depth, ndcg_exp_gain=True) # ndcg is default + X, y, q, w = tm.make_ltr(n_samples=1024, n_features=num_classes, n_query_groups=3, max_rel=3) - model.fit(X, y, group=[X.shape[0]]) + model.fit(X, y, qid=q, sample_weight=w, eval_set=[(X, y)], + eval_qid=(q,), sample_weight_eval_set=(w,), verbose=True) torch_model = hummingbird.ml.convert(model, "torch", X, extra_config=extra_config) self.assertIsNotNone(torch_model) @@ -256,7 +255,7 @@ def test_xgb_regressor_converter_torchscript(self): import torch for max_depth in [1, 3, 8, 10, 12]: - model = xgb.XGBRegressor(n_estimators=10, max_depth=max_depth) + model = xgb.XGBRegressor(n_estimators=10, max_depth=max_depth, objective="rank:pairwise") np.random.seed(0) X = np.random.rand(100, 200) X = np.array(X, dtype=np.float32) @@ -275,7 +274,7 @@ def test_xgb_classifier_converter_torchscript(self): import torch for max_depth in [1, 3, 8, 10, 12]: - model = xgb.XGBClassifier(n_estimators=10, max_depth=max_depth) + model = xgb.XGBClassifier(n_estimators=10, max_depth=max_depth, objective="rank:pairwise") np.random.seed(0) X = np.random.rand(100, 200) X = np.array(X, dtype=np.float32)