1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
|
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
*/
#include "mlx5_ib.h"
#include "data_direct.h"
static LIST_HEAD(mlx5_data_direct_dev_list);
static LIST_HEAD(mlx5_data_direct_reg_list);
/*
* This mutex should be held when accessing either of the above lists
*/
static DEFINE_MUTEX(mlx5_data_direct_mutex);
struct mlx5_data_direct_registration {
struct mlx5_ib_dev *ibdev;
char vuid[MLX5_ST_SZ_BYTES(array1024_auto) + 1];
struct list_head list;
};
static const struct pci_device_id mlx5_data_direct_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x2100) }, /* ConnectX-8 Data Direct */
{ 0, }
};
static int mlx5_data_direct_vpd_get_vuid(struct mlx5_data_direct_dev *dev)
{
struct pci_dev *pdev = dev->pdev;
unsigned int vpd_size, kw_len;
u8 *vpd_data;
int start;
int ret;
vpd_data = pci_vpd_alloc(pdev, &vpd_size);
if (IS_ERR(vpd_data)) {
pci_err(pdev, "Unable to read VPD, err=%ld\n", PTR_ERR(vpd_data));
return PTR_ERR(vpd_data);
}
start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, "VU", &kw_len);
if (start < 0) {
ret = start;
pci_err(pdev, "VU keyword not found, err=%d\n", ret);
goto end;
}
dev->vuid = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
ret = dev->vuid ? 0 : -ENOMEM;
end:
kfree(vpd_data);
return ret;
}
static void mlx5_data_direct_shutdown(struct pci_dev *pdev)
{
pci_disable_device(pdev);
}
static int mlx5_data_direct_set_dma_caps(struct pci_dev *pdev)
{
int err;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev,
"Warning: couldn't set 64-bit PCI DMA mask, err=%d\n", err);
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "Can't set PCI DMA mask, err=%d\n", err);
return err;
}
}
dma_set_max_seg_size(&pdev->dev, SZ_2G);
return 0;
}
int mlx5_data_direct_ib_reg(struct mlx5_ib_dev *ibdev, char *vuid)
{
struct mlx5_data_direct_registration *reg;
struct mlx5_data_direct_dev *dev;
reg = kzalloc(sizeof(*reg), GFP_KERNEL);
if (!reg)
return -ENOMEM;
reg->ibdev = ibdev;
strcpy(reg->vuid, vuid);
mutex_lock(&mlx5_data_direct_mutex);
list_for_each_entry(dev, &mlx5_data_direct_dev_list, list) {
if (strcmp(dev->vuid, vuid) == 0) {
mlx5_ib_data_direct_bind(ibdev, dev);
break;
}
}
/* Add the registration to its global list, to be used upon bind/unbind
* of its affiliated data direct device
*/
list_add_tail(®->list, &mlx5_data_direct_reg_list);
mutex_unlock(&mlx5_data_direct_mutex);
return 0;
}
void mlx5_data_direct_ib_unreg(struct mlx5_ib_dev *ibdev)
{
struct mlx5_data_direct_registration *reg;
mutex_lock(&mlx5_data_direct_mutex);
list_for_each_entry(reg, &mlx5_data_direct_reg_list, list) {
if (reg->ibdev == ibdev) {
list_del(®->list);
kfree(reg);
goto end;
}
}
WARN_ON(true);
end:
mutex_unlock(&mlx5_data_direct_mutex);
}
static void mlx5_data_direct_dev_reg(struct mlx5_data_direct_dev *dev)
{
struct mlx5_data_direct_registration *reg;
mutex_lock(&mlx5_data_direct_mutex);
list_for_each_entry(reg, &mlx5_data_direct_reg_list, list) {
if (strcmp(dev->vuid, reg->vuid) == 0)
mlx5_ib_data_direct_bind(reg->ibdev, dev);
}
/* Add the data direct device to the global list, further IB devices may
* use it later as well
*/
list_add_tail(&dev->list, &mlx5_data_direct_dev_list);
mutex_unlock(&mlx5_data_direct_mutex);
}
static void mlx5_data_direct_dev_unreg(struct mlx5_data_direct_dev *dev)
{
struct mlx5_data_direct_registration *reg;
mutex_lock(&mlx5_data_direct_mutex);
/* Prevent any further affiliations */
list_del(&dev->list);
list_for_each_entry(reg, &mlx5_data_direct_reg_list, list) {
if (strcmp(dev->vuid, reg->vuid) == 0)
mlx5_ib_data_direct_unbind(reg->ibdev);
}
mutex_unlock(&mlx5_data_direct_mutex);
}
static int mlx5_data_direct_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct mlx5_data_direct_dev *dev;
int err;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->device = &pdev->dev;
dev->pdev = pdev;
pci_set_drvdata(dev->pdev, dev);
err = pci_enable_device(pdev);
if (err) {
dev_err(dev->device, "Cannot enable PCI device, err=%d\n", err);
goto err;
}
pci_set_master(pdev);
err = mlx5_data_direct_set_dma_caps(pdev);
if (err)
goto err_disable;
if (pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP32) &&
pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP64) &&
pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128))
dev_dbg(dev->device, "Enabling pci atomics failed\n");
err = mlx5_data_direct_vpd_get_vuid(dev);
if (err)
goto err_disable;
mlx5_data_direct_dev_reg(dev);
return 0;
err_disable:
pci_disable_device(pdev);
err:
kfree(dev);
return err;
}
static void mlx5_data_direct_remove(struct pci_dev *pdev)
{
struct mlx5_data_direct_dev *dev = pci_get_drvdata(pdev);
mlx5_data_direct_dev_unreg(dev);
pci_disable_device(pdev);
kfree(dev->vuid);
kfree(dev);
}
static struct pci_driver mlx5_data_direct_driver = {
.name = KBUILD_MODNAME,
.id_table = mlx5_data_direct_pci_table,
.probe = mlx5_data_direct_probe,
.remove = mlx5_data_direct_remove,
.shutdown = mlx5_data_direct_shutdown,
};
int mlx5_data_direct_driver_register(void)
{
return pci_register_driver(&mlx5_data_direct_driver);
}
void mlx5_data_direct_driver_unregister(void)
{
pci_unregister_driver(&mlx5_data_direct_driver);
}
|