File: test_gpu_basic_models.py

package info (click to toggle)
xgboost 3.0.0-1
  • links: PTS, VCS
  • area: main
  • in suites: trixie
  • size: 13,796 kB
  • sloc: cpp: 67,502; python: 35,503; java: 4,676; ansic: 1,426; sh: 1,320; xml: 1,197; makefile: 204; javascript: 19
file content (84 lines) | stat: -rw-r--r-- 2,736 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import os
import sys

import numpy as np
import pytest

import xgboost as xgb
from xgboost import testing as tm

sys.path.append("tests/python")
import test_basic_models as test_bm

# Don't import the test class, otherwise they will run twice.
import test_callback as test_cb  # noqa

rng = np.random.RandomState(1994)


class TestGPUBasicModels:
    cpu_test_cb = test_cb.TestCallbacks()
    cpu_test_bm = test_bm.TestModels()

    def run_cls(self, X, y):
        cls = xgb.XGBClassifier(tree_method="hist", device="cuda")
        cls.fit(X, y)
        cls.get_booster().save_model("test_deterministic_gpu_hist-0.json")

        cls = xgb.XGBClassifier(tree_method="hist", device="cuda")
        cls.fit(X, y)
        cls.get_booster().save_model("test_deterministic_gpu_hist-1.json")

        with open("test_deterministic_gpu_hist-0.json", "r") as fd:
            model_0 = fd.read()
        with open("test_deterministic_gpu_hist-1.json", "r") as fd:
            model_1 = fd.read()

        os.remove("test_deterministic_gpu_hist-0.json")
        os.remove("test_deterministic_gpu_hist-1.json")

        return hash(model_0), hash(model_1)

    def test_custom_objective(self):
        self.cpu_test_bm.run_custom_objective("gpu_hist")

    def test_eta_decay(self):
        self.cpu_test_cb.run_eta_decay("gpu_hist")

    @pytest.mark.parametrize(
        "objective", ["binary:logistic", "reg:absoluteerror", "reg:quantileerror"]
    )
    def test_eta_decay_leaf_output(self, objective) -> None:
        self.cpu_test_cb.run_eta_decay_leaf_output("gpu_hist", objective)

    def test_deterministic_gpu_hist(self):
        kRows = 1000
        kCols = 64
        kClasses = 4
        # Create large values to force rounding.
        X = np.random.randn(kRows, kCols) * 1e4
        y = np.random.randint(0, kClasses, size=kRows)

        model_0, model_1 = self.run_cls(X, y)
        assert model_0 == model_1

    @pytest.mark.skipif(**tm.no_sklearn())
    def test_invalid_gpu_id(self):
        from sklearn.datasets import load_digits

        X, y = load_digits(return_X_y=True)
        # should pass with invalid gpu id
        cls1 = xgb.XGBClassifier(tree_method="gpu_hist", gpu_id=9999)
        cls1.fit(X, y)
        # should throw error with fail_on_invalid_gpu_id enabled
        cls2 = xgb.XGBClassifier(
            tree_method="gpu_hist", gpu_id=9999, fail_on_invalid_gpu_id=True
        )
        with pytest.raises(ValueError, match="ordinal 9999 is invalid"):
            cls2.fit(X, y)

        cls2 = xgb.XGBClassifier(
            tree_method="hist", device="cuda:9999", fail_on_invalid_gpu_id=True
        )
        with pytest.raises(ValueError, match="ordinal 9999 is invalid"):
            cls2.fit(X, y)