1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
|
#ifdef WITH_PYTHON
#include <Python.h>
#endif
#include <torch/script.h>
#include "cpu/relabel_cpu.h"
#ifdef _WIN32
#ifdef WITH_PYTHON
#ifdef WITH_CUDA
PyMODINIT_FUNC PyInit__relabel_cuda(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__relabel_cpu(void) { return NULL; }
#endif
#endif
#endif
SPARSE_API std::tuple<torch::Tensor, torch::Tensor> relabel(torch::Tensor col,
torch::Tensor idx) {
if (col.device().is_cuda()) {
#ifdef WITH_CUDA
AT_ERROR("No CUDA version supported");
#else
AT_ERROR("Not compiled with CUDA support");
#endif
} else {
return relabel_cpu(col, idx);
}
}
SPARSE_API std::tuple<torch::Tensor, torch::Tensor, std::optional<torch::Tensor>,
torch::Tensor>
relabel_one_hop(torch::Tensor rowptr, torch::Tensor col,
std::optional<torch::Tensor> optional_value,
torch::Tensor idx, bool bipartite) {
if (rowptr.device().is_cuda()) {
#ifdef WITH_CUDA
AT_ERROR("No CUDA version supported");
#else
AT_ERROR("Not compiled with CUDA support");
#endif
} else {
return relabel_one_hop_cpu(rowptr, col, optional_value, idx, bipartite);
}
}
static auto registry =
torch::RegisterOperators()
.op("torch_sparse::relabel", &relabel)
.op("torch_sparse::relabel_one_hop", &relabel_one_hop);
|