1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
|
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Copyright 2019-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef __EFA_H__
#define __EFA_H__
#include <inttypes.h>
#include <pthread.h>
#include <stddef.h>
#include <infiniband/driver.h>
#include <util/udma_barrier.h>
#include "efa-abi.h"
#include "efa_io_defs.h"
#include "efadv.h"
#define EFA_GET(ptr, mask) FIELD_GET(mask##_MASK, *(ptr))
#define EFA_SET(ptr, mask, value) \
({ \
typeof(ptr) _ptr = ptr; \
*_ptr = (*_ptr & ~(mask##_MASK)) | \
FIELD_PREP(mask##_MASK, value); \
})
struct efa_context {
struct verbs_context ibvctx;
uint32_t cmds_supp_udata_mask;
uint16_t sub_cqs_per_cq;
uint16_t inline_buf_size;
uint32_t max_llq_size;
uint32_t device_caps;
uint32_t max_sq_wr;
uint32_t max_rq_wr;
uint16_t max_sq_sge;
uint16_t max_rq_sge;
uint32_t max_rdma_size;
uint16_t max_wr_rdma_sge;
uint16_t max_tx_batch;
uint16_t min_sq_wr;
size_t cqe_size;
size_t ex_cqe_size;
struct efa_qp **qp_table;
unsigned int qp_table_sz_m1;
pthread_spinlock_t qp_table_lock;
};
struct efa_pd {
struct ibv_pd ibvpd;
uint16_t pdn;
};
struct efa_sub_cq {
uint16_t consumed_cnt;
int phase;
uint8_t *buf;
int qmask;
int cqe_size;
uint32_t ref_cnt;
};
struct efa_cq {
struct verbs_cq verbs_cq;
struct efadv_cq dv_cq;
uint32_t cqn;
size_t cqe_size;
uint8_t *buf;
size_t buf_size;
uint32_t *db;
uint8_t *db_mmap_addr;
uint16_t cc; /* Consumer Counter */
uint8_t cmd_sn;
uint16_t num_sub_cqs;
/* Index of next sub cq idx to poll. This is used to guarantee fairness for sub cqs */
uint16_t next_poll_idx;
pthread_spinlock_t lock;
struct efa_wq *cur_wq;
struct efa_io_cdesc_common *cur_cqe;
struct ibv_device *dev;
struct efa_sub_cq sub_cq_arr[];
};
struct efa_wq {
uint64_t *wrid;
/* wrid_idx_pool: Pool of free indexes in the wrid array, used to select the
* wrid entry to be used to hold the next tx packet's context.
* At init time, entry N will hold value N, as OOO tx-completions arrive,
* the value stored in a given entry might not equal the entry's index.
*/
uint32_t *wrid_idx_pool;
uint32_t wqe_cnt;
uint32_t wqe_posted;
uint32_t wqe_completed;
uint16_t pc; /* Producer counter */
uint16_t desc_mask;
/* wrid_idx_pool_next: Index of the next entry to use in wrid_idx_pool. */
uint16_t wrid_idx_pool_next;
int max_sge;
int phase;
pthread_spinlock_t wqlock;
uint32_t *db;
uint16_t sub_cq_idx;
};
struct efa_rq {
struct efa_wq wq;
uint8_t *buf;
size_t buf_size;
};
struct efa_sq {
struct efa_wq wq;
uint8_t *desc;
uint32_t desc_offset;
size_t desc_ring_mmap_size;
size_t max_inline_data;
size_t max_wr_rdma_sge;
uint16_t max_batch_wr;
/* Buffer for pending WR entries in the current session */
uint8_t *local_queue;
/* Number of WR entries posted in the current session */
uint32_t num_wqe_pending;
/* Phase before current session */
int phase_rb;
/* Current wqe being built */
struct efa_io_tx_wqe *curr_tx_wqe;
};
struct efa_qp {
struct verbs_qp verbs_qp;
struct efa_sq sq;
struct efa_rq rq;
int page_size;
int sq_sig_all;
int wr_session_err;
struct ibv_device *dev;
};
struct efa_mr {
struct verbs_mr vmr;
};
struct efa_ah {
struct ibv_ah ibvah;
uint16_t efa_ah;
};
struct efa_dev {
struct verbs_device vdev;
uint32_t pg_sz;
};
static inline struct efa_dev *to_efa_dev(struct ibv_device *ibvdev)
{
return container_of(ibvdev, struct efa_dev, vdev.device);
}
static inline struct efa_context *to_efa_context(struct ibv_context *ibvctx)
{
return container_of(ibvctx, struct efa_context, ibvctx.context);
}
static inline struct efa_pd *to_efa_pd(struct ibv_pd *ibvpd)
{
return container_of(ibvpd, struct efa_pd, ibvpd);
}
static inline struct efa_cq *to_efa_cq(struct ibv_cq *ibvcq)
{
return container_of(ibvcq, struct efa_cq, verbs_cq.cq);
}
static inline struct efa_cq *to_efa_cq_ex(struct ibv_cq_ex *ibvcqx)
{
return container_of(ibvcqx, struct efa_cq, verbs_cq.cq_ex);
}
static inline struct efa_cq *efadv_cq_to_efa_cq(struct efadv_cq *efadv_cq)
{
return container_of(efadv_cq, struct efa_cq, dv_cq);
}
static inline struct efa_qp *to_efa_qp(struct ibv_qp *ibvqp)
{
return container_of(ibvqp, struct efa_qp, verbs_qp.qp);
}
static inline struct efa_qp *to_efa_qp_ex(struct ibv_qp_ex *ibvqpx)
{
return container_of(ibvqpx, struct efa_qp, verbs_qp.qp_ex);
}
static inline struct efa_ah *to_efa_ah(struct ibv_ah *ibvah)
{
return container_of(ibvah, struct efa_ah, ibvah);
}
bool is_efa_dev(struct ibv_device *device);
#endif /* __EFA_H__ */
|