File: ogb_mag.py

package info (click to toggle)
pytorch-geometric 2.6.1-7
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 12,904 kB
  • sloc: python: 127,155; sh: 338; cpp: 27; makefile: 18; javascript: 16
file content (183 lines) | stat: -rw-r--r-- 7,484 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
import os
import os.path as osp
import shutil
from typing import Callable, List, Optional

import numpy as np
import torch

from torch_geometric.data import (
    HeteroData,
    InMemoryDataset,
    download_url,
    extract_zip,
)
from torch_geometric.io import fs


class OGB_MAG(InMemoryDataset):
    r"""The :obj:`ogbn-mag` dataset from the `"Open Graph Benchmark: Datasets
    for Machine Learning on Graphs" <https://arxiv.org/abs/2005.00687>`_ paper.
    :obj:`ogbn-mag` is a heterogeneous graph composed of a subset of the
    Microsoft Academic Graph (MAG).
    It contains four types of entities — papers (736,389 nodes), authors
    (1,134,649 nodes), institutions (8,740 nodes), and fields of study
    (59,965 nodes) — as well as four types of directed relations connecting two
    types of entities.
    Each paper is associated with a 128-dimensional :obj:`word2vec` feature
    vector, while all other node types are not associated with any input
    features.
    The task is to predict the venue (conference or journal) of each paper.
    In total, there are 349 different venues.

    Args:
        root (str): Root directory where the dataset should be saved.
        preprocess (str, optional): Pre-processes the original dataset by
            adding structural features (:obj:`"metapath2vec"`, :obj:`"TransE"`)
            to featureless nodes. (default: :obj:`None`)
        transform (callable, optional): A function/transform that takes in an
            :obj:`torch_geometric.data.HeteroData` object and returns a
            transformed version. The data object will be transformed before
            every access. (default: :obj:`None`)
        pre_transform (callable, optional): A function/transform that takes in
            an :obj:`torch_geometric.data.HeteroData` object and returns a
            transformed version. The data object will be transformed before
            being saved to disk. (default: :obj:`None`)
        force_reload (bool, optional): Whether to re-process the dataset.
            (default: :obj:`False`)
    """

    url = 'http://snap.stanford.edu/ogb/data/nodeproppred/mag.zip'
    urls = {
        'metapath2vec': ('https://data.pyg.org/datasets/'
                         'mag_metapath2vec_emb.zip'),
        'transe': ('https://data.pyg.org/datasets/'
                   'mag_transe_emb.zip'),
    }

    def __init__(
        self,
        root: str,
        preprocess: Optional[str] = None,
        transform: Optional[Callable] = None,
        pre_transform: Optional[Callable] = None,
        force_reload: bool = False,
    ) -> None:
        preprocess = None if preprocess is None else preprocess.lower()
        self.preprocess = preprocess
        assert self.preprocess in [None, 'metapath2vec', 'transe']
        super().__init__(root, transform, pre_transform,
                         force_reload=force_reload)
        self.load(self.processed_paths[0], data_cls=HeteroData)

    @property
    def num_classes(self) -> int:
        assert isinstance(self._data, HeteroData)
        return int(self._data['paper'].y.max()) + 1

    @property
    def raw_dir(self) -> str:
        return osp.join(self.root, 'mag', 'raw')

    @property
    def processed_dir(self) -> str:
        return osp.join(self.root, 'mag', 'processed')

    @property
    def raw_file_names(self) -> List[str]:
        file_names = [
            'node-feat', 'node-label', 'relations', 'split',
            'num-node-dict.csv.gz'
        ]

        if self.preprocess is not None:
            file_names += [f'mag_{self.preprocess}_emb.pt']

        return file_names

    @property
    def processed_file_names(self) -> str:
        if self.preprocess is not None:
            return f'data_{self.preprocess}.pt'
        else:
            return 'data.pt'

    def download(self) -> None:
        if not all([osp.exists(f) for f in self.raw_paths[:5]]):
            path = download_url(self.url, self.raw_dir)
            extract_zip(path, self.raw_dir)
            for file_name in ['node-feat', 'node-label', 'relations']:
                path = osp.join(self.raw_dir, 'mag', 'raw', file_name)
                shutil.move(path, self.raw_dir)
            path = osp.join(self.raw_dir, 'mag', 'split')
            shutil.move(path, self.raw_dir)
            path = osp.join(self.raw_dir, 'mag', 'raw', 'num-node-dict.csv.gz')
            shutil.move(path, self.raw_dir)
            fs.rm(osp.join(self.raw_dir, 'mag'))
            os.remove(osp.join(self.raw_dir, 'mag.zip'))
        if self.preprocess is not None:
            path = download_url(self.urls[self.preprocess], self.raw_dir)
            extract_zip(path, self.raw_dir)
            os.remove(path)

    def process(self) -> None:
        import pandas as pd

        data = HeteroData()

        path = osp.join(self.raw_dir, 'node-feat', 'paper', 'node-feat.csv.gz')
        x_paper = pd.read_csv(path, compression='gzip', header=None,
                              dtype=np.float32).values
        data['paper'].x = torch.from_numpy(x_paper)

        path = osp.join(self.raw_dir, 'node-feat', 'paper', 'node_year.csv.gz')
        year_paper = pd.read_csv(path, compression='gzip', header=None,
                                 dtype=np.int64).values
        data['paper'].year = torch.from_numpy(year_paper).view(-1)

        path = osp.join(self.raw_dir, 'node-label', 'paper',
                        'node-label.csv.gz')
        y_paper = pd.read_csv(path, compression='gzip', header=None,
                              dtype=np.int64).values.flatten()
        data['paper'].y = torch.from_numpy(y_paper)

        if self.preprocess is None:
            path = osp.join(self.raw_dir, 'num-node-dict.csv.gz')
            num_nodes_df = pd.read_csv(path, compression='gzip')
            for node_type in ['author', 'institution', 'field_of_study']:
                data[node_type].num_nodes = num_nodes_df[node_type].tolist()[0]
        else:
            emb_dict = fs.torch_load(self.raw_paths[-1])
            for key, value in emb_dict.items():
                if key != 'paper':
                    data[key].x = value

        for edge_type in [('author', 'affiliated_with', 'institution'),
                          ('author', 'writes', 'paper'),
                          ('paper', 'cites', 'paper'),
                          ('paper', 'has_topic', 'field_of_study')]:

            f = '___'.join(edge_type)
            path = osp.join(self.raw_dir, 'relations', f, 'edge.csv.gz')
            edge_index = pd.read_csv(path, compression='gzip', header=None,
                                     dtype=np.int64).values
            edge_index = torch.from_numpy(edge_index).t().contiguous()
            data[edge_type].edge_index = edge_index

        for f, v in [('train', 'train'), ('valid', 'val'), ('test', 'test')]:
            path = osp.join(self.raw_dir, 'split', 'time', 'paper',
                            f'{f}.csv.gz')
            idx = pd.read_csv(path, compression='gzip', header=None,
                              dtype=np.int64).values.flatten()
            idx = torch.from_numpy(idx)
            mask = torch.zeros(data['paper'].num_nodes, dtype=torch.bool)
            mask[idx] = True
            data['paper'][f'{v}_mask'] = mask

        if self.pre_transform is not None:
            data = self.pre_transform(data)

        self.save([data], self.processed_paths[0])

    def __repr__(self) -> str:
        return 'ogbn-mag()'