1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
|
From 4940f097202d72ef5694244e1c7319822a62b5b7 Mon Sep 17 00:00:00 2001
From: Ajay Sharma <sharmaajay@microsoft.com>
Date: Thu, 3 Nov 2022 12:16:29 -0700
Subject: [PATCH 12/44] net: mana: Define data structures for protection domain
and memory registration
The MANA hardware support protection domain and memory registration for use
in RDMA environment. Add those definitions and expose them for use by the
RDMA driver.
Signed-off-by: Ajay Sharma <sharmaajay@microsoft.com>
Signed-off-by: Long Li <longli@microsoft.com>
Link: https://lore.kernel.org/r/1667502990-2559-12-git-send-email-longli@linuxonhyperv.com
Reviewed-by: Dexuan Cui <decui@microsoft.com>
Acked-by: Haiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
(cherry picked from commit 28c66cfa45388af1126985d1114e0ed762eb2abd)
Signed-off-by: Bastian Blank <waldi@debian.org>
---
.../net/ethernet/microsoft/mana/gdma_main.c | 27 ++--
drivers/net/ethernet/microsoft/mana/mana_en.c | 18 +--
include/net/mana/gdma.h | 121 +++++++++++++++++-
3 files changed, 143 insertions(+), 23 deletions(-)
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index b114c31d70ba..690691e3e86c 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -198,7 +198,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc,
req.type = queue->type;
req.pdid = queue->gdma_dev->pdid;
req.doolbell_id = queue->gdma_dev->doorbell;
- req.gdma_region = queue->mem_info.gdma_region;
+ req.gdma_region = queue->mem_info.dma_region_handle;
req.queue_size = queue->queue_size;
req.log2_throttle_limit = queue->eq.log2_throttle_limit;
req.eq_pci_msix_index = queue->eq.msix_index;
@@ -212,7 +212,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc,
queue->id = resp.queue_index;
queue->eq.disable_needed = true;
- queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+ queue->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
return 0;
}
@@ -671,24 +671,30 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
return err;
}
-static void mana_gd_destroy_dma_region(struct gdma_context *gc, u64 gdma_region)
+int mana_gd_destroy_dma_region(struct gdma_context *gc,
+ gdma_obj_handle_t dma_region_handle)
{
struct gdma_destroy_dma_region_req req = {};
struct gdma_general_resp resp = {};
int err;
- if (gdma_region == GDMA_INVALID_DMA_REGION)
- return;
+ if (dma_region_handle == GDMA_INVALID_DMA_REGION)
+ return 0;
mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
sizeof(resp));
- req.gdma_region = gdma_region;
+ req.dma_region_handle = dma_region_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
- if (err || resp.hdr.status)
+ if (err || resp.hdr.status) {
dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
err, resp.hdr.status);
+ return -EPROTO;
+ }
+
+ return 0;
}
+EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA);
static int mana_gd_create_dma_region(struct gdma_dev *gd,
struct gdma_mem_info *gmi)
@@ -733,14 +739,15 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
if (err)
goto out;
- if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) {
+ if (resp.hdr.status ||
+ resp.dma_region_handle == GDMA_INVALID_DMA_REGION) {
dev_err(gc->dev, "Failed to create DMA region: 0x%x\n",
resp.hdr.status);
err = -EPROTO;
goto out;
}
- gmi->gdma_region = resp.gdma_region;
+ gmi->dma_region_handle = resp.dma_region_handle;
out:
kfree(req);
return err;
@@ -863,7 +870,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
return;
}
- mana_gd_destroy_dma_region(gc, gmi->gdma_region);
+ mana_gd_destroy_dma_region(gc, gmi->dma_region_handle);
mana_gd_free_memory(gmi);
kfree(queue);
}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 65637aa333a4..9eaa53f00ca1 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1543,10 +1543,10 @@ static int mana_create_txq(struct mana_port_context *apc,
memset(&wq_spec, 0, sizeof(wq_spec));
memset(&cq_spec, 0, sizeof(cq_spec));
- wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
+ wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
wq_spec.queue_size = txq->gdma_sq->queue_size;
- cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
+ cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
cq_spec.queue_size = cq->gdma_cq->queue_size;
cq_spec.modr_ctx_id = 0;
cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
@@ -1561,8 +1561,10 @@ static int mana_create_txq(struct mana_port_context *apc,
txq->gdma_sq->id = wq_spec.queue_index;
cq->gdma_cq->id = cq_spec.queue_index;
- txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
- cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+ txq->gdma_sq->mem_info.dma_region_handle =
+ GDMA_INVALID_DMA_REGION;
+ cq->gdma_cq->mem_info.dma_region_handle =
+ GDMA_INVALID_DMA_REGION;
txq->gdma_txq_id = txq->gdma_sq->id;
@@ -1774,10 +1776,10 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
memset(&wq_spec, 0, sizeof(wq_spec));
memset(&cq_spec, 0, sizeof(cq_spec));
- wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
+ wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
wq_spec.queue_size = rxq->gdma_rq->queue_size;
- cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
+ cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
cq_spec.queue_size = cq->gdma_cq->queue_size;
cq_spec.modr_ctx_id = 0;
cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
@@ -1790,8 +1792,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
rxq->gdma_rq->id = wq_spec.queue_index;
cq->gdma_cq->id = cq_spec.queue_index;
- rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
- cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+ rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
+ cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
rxq->gdma_id = rxq->gdma_rq->id;
cq->gdma_id = cq->gdma_cq->id;
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index 202ac405ab59..aabc7cea8a49 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -27,6 +27,10 @@ enum gdma_request_type {
GDMA_CREATE_DMA_REGION = 25,
GDMA_DMA_REGION_ADD_PAGES = 26,
GDMA_DESTROY_DMA_REGION = 27,
+ GDMA_CREATE_PD = 29,
+ GDMA_DESTROY_PD = 30,
+ GDMA_CREATE_MR = 31,
+ GDMA_DESTROY_MR = 32,
};
enum gdma_queue_type {
@@ -57,6 +61,8 @@ enum {
GDMA_DEVICE_MANA = 2,
};
+typedef u64 gdma_obj_handle_t;
+
struct gdma_resource {
/* Protect the bitmap */
spinlock_t lock;
@@ -190,7 +196,7 @@ struct gdma_mem_info {
u64 length;
/* Allocated by the PF driver */
- u64 gdma_region;
+ gdma_obj_handle_t dma_region_handle;
};
#define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
@@ -605,7 +611,7 @@ struct gdma_create_queue_req {
u32 reserved1;
u32 pdid;
u32 doolbell_id;
- u64 gdma_region;
+ gdma_obj_handle_t gdma_region;
u32 reserved2;
u32 queue_size;
u32 log2_throttle_limit;
@@ -632,6 +638,28 @@ struct gdma_disable_queue_req {
u32 alloc_res_id_on_creation;
}; /* HW DATA */
+enum atb_page_size {
+ ATB_PAGE_SIZE_4K,
+ ATB_PAGE_SIZE_8K,
+ ATB_PAGE_SIZE_16K,
+ ATB_PAGE_SIZE_32K,
+ ATB_PAGE_SIZE_64K,
+ ATB_PAGE_SIZE_128K,
+ ATB_PAGE_SIZE_256K,
+ ATB_PAGE_SIZE_512K,
+ ATB_PAGE_SIZE_1M,
+ ATB_PAGE_SIZE_2M,
+ ATB_PAGE_SIZE_MAX,
+};
+
+enum gdma_mr_access_flags {
+ GDMA_ACCESS_FLAG_LOCAL_READ = BIT_ULL(0),
+ GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT_ULL(1),
+ GDMA_ACCESS_FLAG_REMOTE_READ = BIT_ULL(2),
+ GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT_ULL(3),
+ GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT_ULL(4),
+};
+
/* GDMA_CREATE_DMA_REGION */
struct gdma_create_dma_region_req {
struct gdma_req_hdr hdr;
@@ -658,14 +686,14 @@ struct gdma_create_dma_region_req {
struct gdma_create_dma_region_resp {
struct gdma_resp_hdr hdr;
- u64 gdma_region;
+ gdma_obj_handle_t dma_region_handle;
}; /* HW DATA */
/* GDMA_DMA_REGION_ADD_PAGES */
struct gdma_dma_region_add_pages_req {
struct gdma_req_hdr hdr;
- u64 gdma_region;
+ gdma_obj_handle_t dma_region_handle;
u32 page_addr_list_len;
u32 reserved3;
@@ -677,9 +705,88 @@ struct gdma_dma_region_add_pages_req {
struct gdma_destroy_dma_region_req {
struct gdma_req_hdr hdr;
- u64 gdma_region;
+ gdma_obj_handle_t dma_region_handle;
}; /* HW DATA */
+enum gdma_pd_flags {
+ GDMA_PD_FLAG_INVALID = 0,
+};
+
+struct gdma_create_pd_req {
+ struct gdma_req_hdr hdr;
+ enum gdma_pd_flags flags;
+ u32 reserved;
+};/* HW DATA */
+
+struct gdma_create_pd_resp {
+ struct gdma_resp_hdr hdr;
+ gdma_obj_handle_t pd_handle;
+ u32 pd_id;
+ u32 reserved;
+};/* HW DATA */
+
+struct gdma_destroy_pd_req {
+ struct gdma_req_hdr hdr;
+ gdma_obj_handle_t pd_handle;
+};/* HW DATA */
+
+struct gdma_destory_pd_resp {
+ struct gdma_resp_hdr hdr;
+};/* HW DATA */
+
+enum gdma_mr_type {
+ /* Guest Virtual Address - MRs of this type allow access
+ * to memory mapped by PTEs associated with this MR using a virtual
+ * address that is set up in the MST
+ */
+ GDMA_MR_TYPE_GVA = 2,
+};
+
+struct gdma_create_mr_params {
+ gdma_obj_handle_t pd_handle;
+ enum gdma_mr_type mr_type;
+ union {
+ struct {
+ gdma_obj_handle_t dma_region_handle;
+ u64 virtual_address;
+ enum gdma_mr_access_flags access_flags;
+ } gva;
+ };
+};
+
+struct gdma_create_mr_request {
+ struct gdma_req_hdr hdr;
+ gdma_obj_handle_t pd_handle;
+ enum gdma_mr_type mr_type;
+ u32 reserved_1;
+
+ union {
+ struct {
+ gdma_obj_handle_t dma_region_handle;
+ u64 virtual_address;
+ enum gdma_mr_access_flags access_flags;
+ } gva;
+
+ };
+ u32 reserved_2;
+};/* HW DATA */
+
+struct gdma_create_mr_response {
+ struct gdma_resp_hdr hdr;
+ gdma_obj_handle_t mr_handle;
+ u32 lkey;
+ u32 rkey;
+};/* HW DATA */
+
+struct gdma_destroy_mr_request {
+ struct gdma_req_hdr hdr;
+ gdma_obj_handle_t mr_handle;
+};/* HW DATA */
+
+struct gdma_destroy_mr_response {
+ struct gdma_resp_hdr hdr;
+};/* HW DATA */
+
int mana_gd_verify_vf_version(struct pci_dev *pdev);
int mana_gd_register_device(struct gdma_dev *gd);
@@ -706,4 +813,8 @@ void mana_gd_free_memory(struct gdma_mem_info *gmi);
int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,
u32 resp_len, void *resp);
+
+int mana_gd_destroy_dma_region(struct gdma_context *gc,
+ gdma_obj_handle_t dma_region_handle);
+
#endif /* _GDMA_H */
--
2.49.0
|