File: test_gpu_eval_metrics.py

package info (click to toggle)
xgboost 3.0.0-1
  • links: PTS, VCS
  • area: main
  • in suites: trixie
  • size: 13,796 kB
  • sloc: cpp: 67,502; python: 35,503; java: 4,676; ansic: 1,426; sh: 1,320; xml: 1,197; makefile: 204; javascript: 19
file content (73 lines) | stat: -rw-r--r-- 2,281 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import json
import sys

import pytest

import xgboost
from xgboost import testing as tm
from xgboost.testing.metrics import check_precision_score, check_quantile_error

sys.path.append("tests/python")
import test_eval_metrics as test_em  # noqa


class TestGPUEvalMetrics:
    cpu_test = test_em.TestEvalMetrics()

    @pytest.mark.parametrize("n_samples", [4, 100, 1000])
    def test_roc_auc_binary(self, n_samples):
        self.cpu_test.run_roc_auc_binary("gpu_hist", n_samples)

    @pytest.mark.parametrize(
        "n_samples,weighted", [(4, False), (100, False), (1000, False), (1000, True)]
    )
    def test_roc_auc_multi(self, n_samples, weighted):
        self.cpu_test.run_roc_auc_multi("gpu_hist", n_samples, weighted)

    @pytest.mark.parametrize("n_samples", [4, 100, 1000])
    def test_roc_auc_ltr(self, n_samples):
        import numpy as np

        rng = np.random.RandomState(1994)
        n_samples = n_samples
        n_features = 10
        X = rng.randn(n_samples, n_features)
        y = rng.randint(0, 16, size=n_samples)
        group = np.array([n_samples // 2, n_samples // 2])

        Xy = xgboost.DMatrix(X, y, group=group)

        booster = xgboost.train(
            {"tree_method": "hist", "eval_metric": "auc", "objective": "rank:ndcg"},
            Xy,
            num_boost_round=10,
        )
        cpu_auc = float(booster.eval(Xy).split(":")[1])
        booster.set_param({"device": "cuda:0"})
        assert (
            json.loads(booster.save_config())["learner"]["generic_param"]["device"]
            == "cuda:0"
        )
        gpu_auc = float(booster.eval(Xy).split(":")[1])
        assert (
            json.loads(booster.save_config())["learner"]["generic_param"]["device"]
            == "cuda:0"
        )

        np.testing.assert_allclose(cpu_auc, gpu_auc)

    def test_pr_auc_binary(self):
        self.cpu_test.run_pr_auc_binary("gpu_hist")

    def test_pr_auc_multi(self):
        self.cpu_test.run_pr_auc_multi("gpu_hist")

    def test_pr_auc_ltr(self):
        self.cpu_test.run_pr_auc_ltr("gpu_hist")

    def test_precision_score(self):
        check_precision_score("gpu_hist")

    @pytest.mark.skipif(**tm.no_sklearn())
    def test_quantile_error(self) -> None:
        check_quantile_error("gpu_hist")