From: Konstantin Taranov <kotaranov@microsoft.com>
Date: Tue, 6 Aug 2024 10:02:24 +0200
Subject: providers/mana: Process Error cqes

Add processing of error CQEs to mana RC connections.

Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
Origin: upstream, https://github.com/linux-rdma/rdma-core/pull/1556
---
 providers/mana/cq.c           | 223 ++++++++++++++++++++++++++++++++++--------
 providers/mana/gdma.h         | 160 ++++++++++++++++++++----------
 providers/mana/mana.c         |  31 +++++-
 providers/mana/mana.h         |  18 ++--
 providers/mana/qp.c           | 172 ++++++++++++++++++++------------
 providers/mana/shadow_queue.h |  28 +++++-
 providers/mana/wr.c           |  18 +++-
 7 files changed, 477 insertions(+), 173 deletions(-)

diff --git a/providers/mana/cq.c b/providers/mana/cq.c
index bb9a1b0..dd8c54c 100644
--- a/providers/mana/cq.c
+++ b/providers/mana/cq.c
@@ -68,7 +68,6 @@ struct ibv_cq *mana_create_cq(struct ibv_context *context, int cqe,
 		cq->cqe = cqe; // to preserve old behaviour for DPDK
 	cq->head = INITIALIZED_OWNER_BIT(ilog32(cq->cqe) - 1);
 	cq->last_armed_head = cq->head - 1;
-	cq->ready_wcs = 0;
 
 	cmd_drv = &cmd.drv_payload;
 	cmd_drv->buf_addr = (uintptr_t)cq->buf;
@@ -146,25 +145,26 @@ int mana_arm_cq(struct ibv_cq *ibcq, int solicited)
 	return 0;
 }
 
-static inline uint32_t handle_rc_requester_cqe(struct mana_qp *qp, struct gdma_cqe *cqe)
+static inline bool get_next_signal_psn(struct mana_qp *qp, uint32_t *psn)
 {
-	struct mana_gdma_queue *recv_queue = &qp->rc_qp.queues[USER_RC_RECV_QUEUE_REQUESTER];
-	struct mana_gdma_queue *send_queue = &qp->rc_qp.queues[USER_RC_SEND_QUEUE_REQUESTER];
-	uint32_t syndrome = cqe->rdma_cqe.rc_armed_completion.syndrome;
-	uint32_t psn = cqe->rdma_cqe.rc_armed_completion.psn;
-	struct rc_sq_shadow_wqe *shadow_wqe;
-	uint32_t wcs = 0;
+	struct rc_sq_shadow_wqe *shadow_wqe =
+		(struct rc_sq_shadow_wqe *)shadow_queue_get_next_to_signal(&qp->shadow_sq);
 
-	if (!IB_IS_ACK(syndrome))
-		return 0;
+	if (!shadow_wqe)
+		return false;
 
-	if (!PSN_GT(psn, qp->rc_qp.sq_highest_completed_psn))
-		return 0;
+	*psn = shadow_wqe->end_psn;
+	return true;
+}
 
-	qp->rc_qp.sq_highest_completed_psn = psn;
+static inline void advance_send_completions(struct mana_qp *qp, uint32_t psn)
+{
+	struct mana_gdma_queue *recv_queue = &qp->rc_qp.queues[USER_RC_RECV_QUEUE_REQUESTER];
+	struct mana_gdma_queue *send_queue = &qp->rc_qp.queues[USER_RC_SEND_QUEUE_REQUESTER];
+	struct rc_sq_shadow_wqe *shadow_wqe;
 
 	if (!PSN_LT(psn, qp->rc_qp.sq_psn))
-		return 0;
+		return;
 
 	while ((shadow_wqe = (struct rc_sq_shadow_wqe *)
 		shadow_queue_get_next_to_complete(&qp->shadow_sq)) != NULL) {
@@ -182,34 +182,41 @@ static inline uint32_t handle_rc_requester_cqe(struct mana_qp *qp, struct gdma_c
 		mana_ib_update_shared_mem_left_offset(qp, offset & GDMA_QUEUE_OFFSET_MASK);
 
 		shadow_queue_advance_next_to_complete(&qp->shadow_sq);
-		if (shadow_wqe->header.flags != MANA_NO_SIGNAL_WC)
-			wcs++;
 	}
+}
 
-	uint32_t prev_psn = PSN_DEC(qp->rc_qp.sq_psn);
+static inline void handle_rc_requester_cqe(struct mana_qp *qp, struct gdma_cqe *cqe)
+{
+	struct mana_gdma_queue *recv_queue = &qp->rc_qp.queues[USER_RC_RECV_QUEUE_REQUESTER];
+	uint32_t syndrome = cqe->rdma_cqe.rc_armed_completion.syndrome;
+	uint32_t psn = cqe->rdma_cqe.rc_armed_completion.psn;
+	uint32_t arm_psn;
 
-	if (qp->rc_qp.sq_highest_completed_psn == prev_psn)
-		gdma_arm_normal_cqe(recv_queue, qp->rc_qp.sq_psn);
-	else
-		gdma_arm_normal_cqe(recv_queue, prev_psn);
+	if (!IB_IS_ACK(syndrome))
+		return;
 
-	return wcs;
+	advance_send_completions(qp, psn);
+
+	if (!get_next_signal_psn(qp, &arm_psn))
+		arm_psn = PSN_INC(psn);
+
+	gdma_arm_normal_cqe(recv_queue, arm_psn);
 }
 
-static inline uint32_t handle_rc_responder_cqe(struct mana_qp *qp, struct gdma_cqe *cqe)
+static inline void handle_rc_responder_cqe(struct mana_qp *qp, struct gdma_cqe *cqe)
 {
 	struct mana_gdma_queue *recv_queue = &qp->rc_qp.queues[USER_RC_RECV_QUEUE_RESPONDER];
 	struct rc_rq_shadow_wqe *shadow_wqe;
 
 	shadow_wqe = (struct rc_rq_shadow_wqe *)shadow_queue_get_next_to_complete(&qp->shadow_rq);
 	if (!shadow_wqe)
-		return 0;
+		return;
 
 	uint32_t offset_cqe = cqe->rdma_cqe.rc_recv.rx_wqe_offset / GDMA_WQE_ALIGNMENT_UNIT_SIZE;
 	uint32_t offset_wqe = shadow_wqe->header.unmasked_queue_offset & GDMA_QUEUE_OFFSET_MASK;
 
 	if (offset_cqe != offset_wqe)
-		return 0;
+		return;
 
 	shadow_wqe->byte_len = cqe->rdma_cqe.rc_recv.msg_len;
 	shadow_wqe->imm_or_rkey = cqe->rdma_cqe.rc_recv.imm_data;
@@ -232,24 +239,59 @@ static inline uint32_t handle_rc_responder_cqe(struct mana_qp *qp, struct gdma_c
 	recv_queue->cons_idx &= GDMA_QUEUE_OFFSET_MASK;
 
 	shadow_queue_advance_next_to_complete(&qp->shadow_rq);
-	return 1;
 }
 
-static inline uint32_t mana_handle_cqe(struct mana_context *ctx, struct gdma_cqe *cqe)
+static inline bool error_cqe_is_send(struct mana_qp *qp, struct gdma_cqe *cqe)
 {
-	struct mana_qp *qp;
+	if (cqe->is_sq &&
+	    qp->rc_qp.queues[USER_RC_SEND_QUEUE_REQUESTER].id == cqe->wqid)
+		return true;
+	if (!cqe->is_sq &&
+	    qp->rc_qp.queues[USER_RC_RECV_QUEUE_REQUESTER].id == cqe->wqid)
+		return true;
+
+	return false;
+}
+
+static inline uint32_t error_cqe_get_psn(struct gdma_cqe *cqe)
+{
+	return cqe->rdma_cqe.error.psn;
+}
+
+static inline void handle_rc_error_cqe(struct mana_qp *qp, struct gdma_cqe *cqe)
+{
+	uint32_t vendor_error = cqe->rdma_cqe.error.vendor_error;
+	bool is_send_error = error_cqe_is_send(qp, cqe);
+	uint32_t psn = error_cqe_get_psn(cqe);
+	struct shadow_queue *queue_with_error;
+	struct shadow_wqe_header *shadow_wqe;
+
+	mana_qp_move_flush_err(&qp->ibqp.qp);
+	advance_send_completions(qp, psn);
+
+	queue_with_error = is_send_error ? &qp->shadow_sq : &qp->shadow_rq;
+	shadow_wqe = shadow_queue_get_next_to_complete(queue_with_error);
+
+	if (shadow_wqe) {
+		shadow_wqe->flags = 0;
+		shadow_wqe->vendor_error = vendor_error;
+		shadow_queue_advance_next_to_complete(queue_with_error);
+	}
+}
 
-	if (cqe->is_sq) // impossible for rc
-		return 0;
+static inline void mana_handle_cqe(struct mana_context *ctx, struct gdma_cqe *cqe)
+{
+	struct mana_qp *qp = mana_get_qp(ctx, cqe->wqid, cqe->is_sq);
 
-	qp = mana_get_qp_from_rq(ctx, cqe->wqid);
 	if (!qp)
-		return 0;
+		return;
 
-	if (cqe->rdma_cqe.cqe_type == CQE_TYPE_ARMED_CMPL)
-		return handle_rc_requester_cqe(qp, cqe);
+	if (cqe->rdma_cqe.cqe_type == CQE_TYPE_ERROR)
+		handle_rc_error_cqe(qp, cqe);
+	else if (cqe->rdma_cqe.cqe_type == CQE_TYPE_ARMED_CMPL)
+		handle_rc_requester_cqe(qp, cqe);
 	else
-		return handle_rc_responder_cqe(qp, cqe);
+		handle_rc_responder_cqe(qp, cqe);
 }
 
 static inline int gdma_read_cqe(struct mana_cq *cq, struct gdma_cqe *cqe)
@@ -275,15 +317,86 @@ static inline int gdma_read_cqe(struct mana_cq *cq, struct gdma_cqe *cqe)
 	return 1;
 }
 
+static enum ibv_wc_status vendor_error_to_wc_error(uint32_t vendor_error)
+{
+	switch (vendor_error) {
+	case VENDOR_ERR_OK:
+		return IBV_WC_SUCCESS;
+	case VENDOR_ERR_RX_PKT_LEN:
+	case VENDOR_ERR_RX_MSG_LEN_OVFL:
+	case VENDOR_ERR_RX_READRESP_LEN_MISMATCH:
+		return IBV_WC_LOC_LEN_ERR;
+	case VENDOR_ERR_TX_GDMA_CORRUPTED_WQE:
+	case VENDOR_ERR_TX_PCIE_WQE:
+	case VENDOR_ERR_TX_PCIE_MSG:
+	case VENDOR_ERR_RX_MALFORMED_WQE:
+	case VENDOR_ERR_TX_GDMA_INVALID_STATE:
+	case VENDOR_ERR_TX_MISBEHAVING_CLIENT:
+	case VENDOR_ERR_TX_RDMA_MALFORMED_WQE_SIZE:
+	case VENDOR_ERR_TX_RDMA_MALFORMED_WQE_FIELD:
+	case VENDOR_ERR_TX_RDMA_WQE_UNSUPPORTED:
+	case VENDOR_ERR_TX_RDMA_WQE_LEN_ERR:
+	case VENDOR_ERR_TX_RDMA_MTU_ERR:
+		return IBV_WC_LOC_QP_OP_ERR;
+	case VENDOR_ERR_TX_ATB_MSG_ACCESS_VIOLATION:
+	case VENDOR_ERR_TX_ATB_MSG_ADDR_RANGE:
+	case VENDOR_ERR_TX_ATB_MSG_CONFIG_ERR:
+	case VENDOR_ERR_TX_ATB_WQE_ACCESS_VIOLATION:
+	case VENDOR_ERR_TX_ATB_WQE_ADDR_RANGE:
+	case VENDOR_ERR_TX_ATB_WQE_CONFIG_ERR:
+	case VENDOR_ERR_TX_RDMA_ATB_CMD_MISS:
+	case VENDOR_ERR_TX_RDMA_ATB_CMD_IDX_ERROR:
+	case VENDOR_ERR_TX_RDMA_ATB_CMD_TAG_MISMATCH_ERROR:
+	case VENDOR_ERR_TX_RDMA_ATB_CMD_PDID_MISMATCH_ERROR:
+	case VENDOR_ERR_TX_RDMA_ATB_CMD_AR_ERROR:
+	case VENDOR_ERR_TX_RDMA_ATB_CMD_PT_OVF:
+	case VENDOR_ERR_TX_RDMA_ATB_CMD_PT_LENGHT_MISMATCH:
+	case VENDOR_ERR_TX_RDMA_ATB_CMD_ILLEGAL_CMD:
+		return IBV_WC_LOC_PROT_ERR;
+	case VENDOR_ERR_RX_ATB_SGE_MISSCONFIG:
+	case VENDOR_ERR_RX_ATB_SGE_ADDR_RIGHT:
+	case VENDOR_ERR_RX_ATB_SGE_ADDR_RANGE:
+	case VENDOR_ERR_RX_GFID:
+		return IBV_WC_LOC_ACCESS_ERR;
+	case VENDOR_ERR_RX_OP_REQ:
+		return IBV_WC_REM_INV_REQ_ERR;
+	case VENDOR_ERR_RX_ATB_RKEY_MISCONFIG_ERR:
+	case VENDOR_ERR_RX_ATB_RKEY_ADDR_RIGHT:
+	case VENDOR_ERR_RX_ATB_RKEY_ADDR_RANGE:
+	case VENDOR_ERR_RX_REMOTE_ACCESS_NAK:
+		return IBV_WC_REM_ACCESS_ERR;
+	case VENDOR_ERR_RX_INVALID_REQ_NAK:
+	case VENDOR_ERR_RX_REMOTE_OP_ERR_NAK:
+		return IBV_WC_REM_OP_ERR;
+	case VENDOR_ERR_RX_MISBEHAVING_CLIENT:
+	case VENDOR_ERR_RX_CLIENT_ID:
+	case VENDOR_ERR_RX_PCIE:
+	case VENDOR_ERR_RX_NO_AVAIL_WQE:
+	case VENDOR_ERR_RX_ATB_WQE_MISCONFIG:
+	case VENDOR_ERR_RX_ATB_WQE_ADDR_RIGHT:
+	case VENDOR_ERR_RX_ATB_WQE_ADDR_RANGE:
+	case VENDOR_ERR_TX_RDMA_INVALID_STATE:
+	case VENDOR_ERR_TX_RDMA_INVALID_NPT:
+	case VENDOR_ERR_TX_RDMA_INVALID_SGID:
+	case VENDOR_ERR_TX_RDMA_VFID_MISMATCH:
+		return IBV_WC_FATAL_ERR;
+	case VENDOR_ERR_RX_NOT_EMPTY_ON_DISABLE:
+	case VENDOR_ERR_SW_FLUSHED:
+		return IBV_WC_WR_FLUSH_ERR;
+	default:
+		return IBV_WC_GENERAL_ERR;
+	}
+}
+
 static void fill_verbs_from_shadow_wqe(struct mana_qp *qp, struct ibv_wc *wc,
 				       const struct shadow_wqe_header *shadow_wqe)
 {
 	const struct rc_rq_shadow_wqe *rc_wqe = (const struct rc_rq_shadow_wqe *)shadow_wqe;
 
 	wc->wr_id = shadow_wqe->wr_id;
-	wc->status = IBV_WC_SUCCESS;
+	wc->status = vendor_error_to_wc_error(shadow_wqe->vendor_error);
 	wc->opcode = shadow_wqe->opcode;
-	wc->vendor_err = 0;
+	wc->vendor_err = shadow_wqe->vendor_error;
 	wc->wc_flags = shadow_wqe->flags;
 	wc->qp_num = qp->ibqp.qp.qp_num;
 	wc->pkey_index = 0;
@@ -332,17 +445,44 @@ out:
 	return wc_index;
 }
 
+static void mana_flush_completions(struct mana_cq *cq)
+{
+	struct shadow_wqe_header *shadow_wqe;
+	struct mana_qp *qp;
+
+	list_for_each(&cq->send_qp_list, qp, send_cq_node) {
+		if (qp->ibqp.qp.state != IBV_QPS_ERR)
+			continue;
+		while ((shadow_wqe = shadow_queue_get_next_to_complete(&qp->shadow_sq))
+				!= NULL) {
+			shadow_wqe->vendor_error = VENDOR_ERR_SW_FLUSHED;
+			shadow_wqe->flags = 0;
+			shadow_queue_advance_next_to_complete(&qp->shadow_sq);
+		}
+	}
+
+	list_for_each(&cq->recv_qp_list, qp, recv_cq_node) {
+		if (qp->ibqp.qp.state != IBV_QPS_ERR)
+			continue;
+		while ((shadow_wqe = shadow_queue_get_next_to_complete(&qp->shadow_rq))
+				!= NULL) {
+			shadow_wqe->vendor_error = VENDOR_ERR_SW_FLUSHED;
+			shadow_queue_advance_next_to_complete(&qp->shadow_rq);
+		}
+	}
+}
+
 int mana_poll_cq(struct ibv_cq *ibcq, int nwc, struct ibv_wc *wc)
 {
 	struct mana_cq *cq = container_of(ibcq, struct mana_cq, ibcq);
 	struct mana_context *ctx = to_mctx(ibcq->context);
 	struct gdma_cqe gdma_cqe;
 	int num_polled = 0;
-	int ret;
+	int ret, i;
 
 	pthread_spin_lock(&cq->lock);
 
-	while (cq->ready_wcs < nwc) {
+	for (i = 0; i < nwc; i++) {
 		ret = gdma_read_cqe(cq, &gdma_cqe);
 		if (ret < 0) {
 			num_polled = -1;
@@ -350,11 +490,12 @@ int mana_poll_cq(struct ibv_cq *ibcq, int nwc, struct ibv_wc *wc)
 		}
 		if (ret == 0)
 			break;
-		cq->ready_wcs += mana_handle_cqe(ctx, &gdma_cqe);
+		mana_handle_cqe(ctx, &gdma_cqe);
 	}
 
+	mana_flush_completions(cq);
 	num_polled = mana_process_completions(cq, nwc, wc);
-	cq->ready_wcs -= num_polled;
+
 out:
 	pthread_spin_unlock(&cq->lock);
 
diff --git a/providers/mana/gdma.h b/providers/mana/gdma.h
index 7c69cff..fd02c23 100644
--- a/providers/mana/gdma.h
+++ b/providers/mana/gdma.h
@@ -164,65 +164,127 @@ enum {
 	CQE_TYPE_RC_WRITE_IMM = 6,
 	CQE_TYPE_ARMED_CMPL = 7,
 	CQE_TYPE_LWR = 8,
-	CQE_TYPE_RC_FENCE = 9,
-	CQE_TYPE_MAX
+	CQE_TYPE_ERROR = 34,
 }; /* HW DATA */
 
-struct mana_rdma_cqe {
-	uint32_t cqe_type	: 8;
-	uint32_t vendor_error	: 8;
-	uint32_t reserved1	: 16;
-	union {
-		uint32_t data[GDMA_COMP_DATA_SIZE / sizeof(uint32_t) - 4];
-		struct {
-			uint32_t msg_len;
-			uint32_t psn	: 24;
-			uint32_t reserved	: 8;
-			uint32_t imm_data;
-			uint32_t rx_wqe_offset;
-		} rc_recv;
-		struct {
-			uint32_t sge_offset	: 5;
-			uint32_t rx_wqe_offset	: 27;
-			uint32_t sge_byte_offset;
-		} ud_send;
-		struct {
-			uint32_t msg_len;
-			uint32_t src_qpn	: 24;
-			uint32_t reserved	: 8;
-			uint32_t imm_data;
-			uint32_t rx_wqe_offset;
-		} ud_recv;
-
-		struct {
-			uint32_t reserved1;
-			uint32_t psn	: 24;
-			uint32_t reserved2	: 8;
-			uint32_t imm_data;
-			uint32_t rx_wqe_offset;
-		} rc_write_with_imm;
-		struct {
-			uint32_t msn	: 24;
-			uint32_t syndrome	: 8;
-			uint32_t psn	: 24;
-			uint32_t reserved	: 8;
-			uint32_t read_resp_psn	: 24;
-		} rc_armed_completion;
+union mana_rdma_cqe {
+	struct {
+		uint8_t cqe_type;
+		uint8_t data[GDMA_COMP_DATA_SIZE - 1];
 	};
-	uint32_t timestamp_hi;
-	uint32_t timestamp_lo;
-	uint32_t reserved3;
+	struct {
+		uint32_t cqe_type	: 8;
+		uint32_t reserved1	: 24;
+		uint32_t msg_len;
+		uint32_t psn		: 24;
+		uint32_t reserved2	: 8;
+		uint32_t imm_data;
+		uint32_t rx_wqe_offset;
+	} rc_recv;
+	struct {
+		uint32_t cqe_type	: 8;
+		uint32_t vendor_error	: 9;
+		uint32_t reserved1	: 15;
+		uint32_t sge_offset	: 5;
+		uint32_t tx_wqe_offset	: 27;
+	} ud_send;
+	struct {
+		uint32_t cqe_type	: 8;
+		uint32_t reserved1	: 24;
+		uint32_t msg_len;
+		uint32_t src_qpn	: 24;
+		uint32_t reserved2	: 8;
+		uint32_t imm_data;
+		uint32_t rx_wqe_offset;
+	} ud_recv;
+	struct {
+		uint32_t cqe_type	: 8;
+		uint32_t vendor_error	: 10;
+		uint32_t reserved1	: 14;
+		uint32_t msn		: 24;
+		uint32_t syndrome	: 8;
+		uint32_t psn		: 24;
+		uint32_t opcode		: 8;
+		uint32_t rsp_msn	: 24;
+		uint32_t reserved2	: 8;
+		uint32_t rsp_psn	: 24;
+		uint32_t reserved3	: 8;
+	} error;
+	struct {
+		uint32_t cqe_type	: 8;
+		uint32_t reserved1	: 24;
+		uint32_t msn		: 24;
+		uint32_t syndrome	: 8;
+		uint32_t psn		: 24;
+		uint32_t reserved2	: 8;
+	} rc_armed_completion;
 }; /* HW DATA */
+static_assert(sizeof(union mana_rdma_cqe) == GDMA_COMP_DATA_SIZE, "bad size");
 
 struct gdma_cqe {
-	union {
-		uint8_t data[GDMA_COMP_DATA_SIZE];
-		struct mana_rdma_cqe rdma_cqe;
-	};
+	union mana_rdma_cqe rdma_cqe;
 	uint32_t wqid	: 24;
 	uint32_t is_sq	: 1;
 	uint32_t reserved	: 4;
 	uint32_t owner_bits	: 3;
 }; /* HW DATA */
 
+enum mana_error_code {
+	VENDOR_ERR_OK					= 0x0,
+	VENDOR_ERR_RX_OP_REQ                            = 0x03,
+	VENDOR_ERR_RX_PKT_LEN                           = 0x05,
+	VENDOR_ERR_RX_ATB_RKEY_MISCONFIG_ERR            = 0x43,
+	VENDOR_ERR_RX_ATB_RKEY_ADDR_RIGHT               = 0x83,
+	VENDOR_ERR_RX_ATB_RKEY_ADDR_RANGE               = 0xc3,
+	VENDOR_ERR_RX_MSG_LEN_OVFL                      = 0x102,
+	VENDOR_ERR_RX_MISBEHAVING_CLIENT                = 0x108,
+	VENDOR_ERR_RX_MALFORMED_WQE                     = 0x109,
+	VENDOR_ERR_RX_CLIENT_ID                         = 0x10a,
+	VENDOR_ERR_RX_GFID                              = 0x10b,
+	VENDOR_ERR_RX_READRESP_LEN_MISMATCH             = 0x10f,
+	VENDOR_ERR_RX_PCIE                              = 0x10c,
+	VENDOR_ERR_RX_NO_AVAIL_WQE                      = 0x111,
+	VENDOR_ERR_RX_ATB_SGE_MISSCONFIG                = 0x143,
+	VENDOR_ERR_RX_ATB_WQE_MISCONFIG                 = 0x145,
+	VENDOR_ERR_RX_INVALID_REQ_NAK			= 0x161,
+	VENDOR_ERR_RX_REMOTE_ACCESS_NAK			= 0x162,
+	VENDOR_ERR_RX_REMOTE_OP_ERR_NAK			= 0x163,
+	VENDOR_ERR_RX_ATB_SGE_ADDR_RIGHT                = 0x183,
+	VENDOR_ERR_RX_ATB_WQE_ADDR_RIGHT                = 0x185,
+	VENDOR_ERR_RX_ATB_SGE_ADDR_RANGE                = 0x1c3,
+	VENDOR_ERR_RX_ATB_WQE_ADDR_RANGE                = 0x1c5,
+	VENDOR_ERR_RX_NOT_EMPTY_ON_DISABLE              = 0x1c7,
+	VENDOR_ERR_TX_GDMA_CORRUPTED_WQE                = 0x201,
+	VENDOR_ERR_TX_ATB_WQE_ACCESS_VIOLATION          = 0x202,
+	VENDOR_ERR_TX_ATB_WQE_ADDR_RANGE                = 0x203,
+	VENDOR_ERR_TX_ATB_WQE_CONFIG_ERR                = 0x204,
+	VENDOR_ERR_TX_PCIE_WQE                          = 0x205,
+	VENDOR_ERR_TX_ATB_MSG_ACCESS_VIOLATION          = 0x206,
+	VENDOR_ERR_TX_ATB_MSG_ADDR_RANGE                = 0x207,
+	VENDOR_ERR_TX_ATB_MSG_CONFIG_ERR                = 0x208,
+	VENDOR_ERR_TX_PCIE_MSG                          = 0x209,
+	VENDOR_ERR_TX_GDMA_INVALID_STATE                = 0x20a,
+	VENDOR_ERR_TX_MISBEHAVING_CLIENT                = 0x20b,
+	VENDOR_ERR_TX_RDMA_MALFORMED_WQE_SIZE           = 0x210,
+	VENDOR_ERR_TX_RDMA_MALFORMED_WQE_FIELD          = 0x211,
+	VENDOR_ERR_TX_RDMA_INVALID_STATE                = 0x212,
+	VENDOR_ERR_TX_RDMA_INVALID_NPT                  = 0x213,
+	VENDOR_ERR_TX_RDMA_INVALID_SGID                 = 0x214,
+	VENDOR_ERR_TX_RDMA_WQE_UNSUPPORTED              = 0x215,
+	VENDOR_ERR_TX_RDMA_WQE_LEN_ERR                  = 0x216,
+	VENDOR_ERR_TX_RDMA_MTU_ERR                      = 0x217,
+	VENDOR_ERR_TX_RDMA_VFID_MISMATCH                = 0x218,
+	VENDOR_ERR_TX_RDMA_ATB_CMD_MISS                 = 0x220,
+	VENDOR_ERR_TX_RDMA_ATB_CMD_IDX_ERROR            = 0x221,
+	VENDOR_ERR_TX_RDMA_ATB_CMD_TAG_MISMATCH_ERROR   = 0x222,
+	VENDOR_ERR_TX_RDMA_ATB_CMD_PDID_MISMATCH_ERROR  = 0x223,
+	VENDOR_ERR_TX_RDMA_ATB_CMD_AR_ERROR             = 0x224,
+	VENDOR_ERR_TX_RDMA_ATB_CMD_PT_OVF               = 0x225,
+	VENDOR_ERR_TX_RDMA_ATB_CMD_PT_LENGHT_MISMATCH   = 0x226,
+	VENDOR_ERR_TX_RDMA_ATB_CMD_ILLEGAL_CMD          = 0x227,
+	VENDOR_ERR_HW_MAX                               = 0x3ff,
+	/* SW vendor errors */
+	VENDOR_ERR_SW_FLUSHED				= 0xfff,
+};
+
 #endif //_GDMA_H_
diff --git a/providers/mana/mana.c b/providers/mana/mana.c
index 22e5ff9..581725f 100644
--- a/providers/mana/mana.c
+++ b/providers/mana/mana.c
@@ -204,8 +204,10 @@ static void mana_free_context(struct ibv_context *ibctx)
 	int i;
 
 	for (i = 0; i < MANA_QP_TABLE_SIZE; ++i) {
-		if (context->qp_table[i].refcnt)
-			free(context->qp_table[i].table);
+		if (context->qp_stable[i].refcnt)
+			free(context->qp_stable[i].table);
+		if (context->qp_rtable[i].refcnt)
+			free(context->qp_rtable[i].table);
 	}
 	pthread_mutex_destroy(&context->qp_table_mutex);
 
@@ -214,8 +216,27 @@ static void mana_free_context(struct ibv_context *ibctx)
 	free(context);
 }
 
+static void mana_async_event(struct ibv_context *context,
+			     struct ibv_async_event *event)
+{
+	struct ibv_qp *ibvqp;
+
+	switch (event->event_type) {
+	case IBV_EVENT_QP_FATAL:
+	case IBV_EVENT_QP_REQ_ERR:
+	case IBV_EVENT_QP_ACCESS_ERR:
+	case IBV_EVENT_PATH_MIG_ERR:
+		ibvqp = event->element.qp;
+		mana_qp_move_flush_err(ibvqp);
+		break;
+	default:
+		break;
+	}
+}
+
 static const struct verbs_context_ops mana_ctx_ops = {
 	.alloc_pd = mana_alloc_pd,
+	.async_event = mana_async_event,
 	.alloc_parent_domain = mana_alloc_parent_domain,
 	.create_cq = mana_create_cq,
 	.create_qp = mana_create_qp,
@@ -283,8 +304,10 @@ static struct verbs_context *mana_alloc_context(struct ibv_device *ibdev,
 	verbs_set_ops(&context->ibv_ctx, &mana_ctx_ops);
 
 	pthread_mutex_init(&context->qp_table_mutex, NULL);
-	for (i = 0; i < MANA_QP_TABLE_SIZE; ++i)
-		context->qp_table[i].refcnt = 0;
+	for (i = 0; i < MANA_QP_TABLE_SIZE; ++i) {
+		context->qp_stable[i].refcnt = 0;
+		context->qp_rtable[i].refcnt = 0;
+	}
 
 	context->db_page = mmap(NULL, DOORBELL_PAGE_SIZE, PROT_WRITE,
 				MAP_SHARED, context->ibv_ctx.context.cmd_fd, 0);
diff --git a/providers/mana/mana.h b/providers/mana/mana.h
index 29350d7..7d31cae 100644
--- a/providers/mana/mana.h
+++ b/providers/mana/mana.h
@@ -71,12 +71,15 @@ static inline uint32_t get_large_wqe_size(uint32_t sge)
 	return align(wqe_size, GDMA_WQE_ALIGNMENT_UNIT_SIZE);
 }
 
+struct mana_table {
+	struct mana_qp **table;
+	int refcnt;
+};
+
 struct mana_context {
 	struct verbs_context ibv_ctx;
-	struct {
-		struct mana_qp **table;
-		int refcnt;
-	} qp_table[MANA_QP_TABLE_SIZE];
+	struct mana_table qp_rtable[MANA_QP_TABLE_SIZE];
+	struct mana_table qp_stable[MANA_QP_TABLE_SIZE];
 	pthread_mutex_t qp_table_mutex;
 
 	struct manadv_ctx_allocators extern_alloc;
@@ -110,10 +113,8 @@ struct mana_ib_raw_qp {
 
 struct mana_ib_rc_qp {
 	struct mana_gdma_queue queues[USER_RC_QUEUE_TYPE_MAX];
-
 	uint32_t sq_ssn;
 	uint32_t sq_psn;
-	uint32_t sq_highest_completed_psn;
 };
 
 struct mana_qp {
@@ -157,7 +158,6 @@ struct mana_cq {
 	pthread_spinlock_t lock;
 	uint32_t head;
 	uint32_t last_armed_head;
-	uint32_t ready_wcs;
 	void *db_page;
 	/* list of qp's that use this cq for send completions */
 	struct list_head send_qp_list;
@@ -240,5 +240,7 @@ int mana_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
 
 int mana_arm_cq(struct ibv_cq *ibcq, int solicited);
 
-struct mana_qp *mana_get_qp_from_rq(struct mana_context *ctx, uint32_t qpn);
+struct mana_qp *mana_get_qp(struct mana_context *ctx, uint32_t qpn, bool is_sq);
+
+void mana_qp_move_flush_err(struct ibv_qp *ibqp);
 #endif
diff --git a/providers/mana/qp.c b/providers/mana/qp.c
index 67c945b..0934804 100644
--- a/providers/mana/qp.c
+++ b/providers/mana/qp.c
@@ -113,68 +113,114 @@ free_qp:
 	return NULL;
 }
 
-static int mana_store_qp(struct mana_context *ctx, struct mana_qp *qp, uint32_t qid)
+static int mana_store_qid(struct mana_table *qp_table, struct mana_qp *qp, uint32_t qid)
 {
 	uint32_t tbl_idx, tbl_off;
 	int ret = 0;
 
-	pthread_mutex_lock(&ctx->qp_table_mutex);
-
 	tbl_idx = qid >> MANA_QP_TABLE_SHIFT;
 	tbl_off = qid & MANA_QP_TABLE_MASK;
 
-	if (ctx->qp_table[tbl_idx].refcnt == 0) {
-		ctx->qp_table[tbl_idx].table =
+	if (qp_table[tbl_idx].refcnt == 0) {
+		qp_table[tbl_idx].table =
 			calloc(MANA_QP_TABLE_SIZE, sizeof(struct mana_qp *));
-		if (!ctx->qp_table[tbl_idx].table) {
+		if (!qp_table[tbl_idx].table) {
 			ret = ENOMEM;
 			goto out;
 		}
 	}
 
-	if (ctx->qp_table[tbl_idx].table[tbl_off]) {
+	if (qp_table[tbl_idx].table[tbl_off]) {
 		ret = EBUSY;
 		goto out;
 	}
 
-	ctx->qp_table[tbl_idx].table[tbl_off] = qp;
-	ctx->qp_table[tbl_idx].refcnt++;
+	qp_table[tbl_idx].table[tbl_off] = qp;
+	qp_table[tbl_idx].refcnt++;
 
 out:
-	pthread_mutex_unlock(&ctx->qp_table_mutex);
 	return ret;
 }
 
-static void mana_remove_qp(struct mana_context *ctx, uint32_t qid)
+static void mana_remove_qid(struct mana_table *qp_table, uint32_t qid)
 {
 	uint32_t tbl_idx, tbl_off;
 
-	pthread_mutex_lock(&ctx->qp_table_mutex);
 	tbl_idx = qid >> MANA_QP_TABLE_SHIFT;
 	tbl_off = qid & MANA_QP_TABLE_MASK;
 
-	ctx->qp_table[tbl_idx].table[tbl_off] = NULL;
-	ctx->qp_table[tbl_idx].refcnt--;
+	qp_table[tbl_idx].table[tbl_off] = NULL;
+	qp_table[tbl_idx].refcnt--;
 
-	if (ctx->qp_table[tbl_idx].refcnt == 0) {
-		free(ctx->qp_table[tbl_idx].table);
-		ctx->qp_table[tbl_idx].table = NULL;
+	if (qp_table[tbl_idx].refcnt == 0) {
+		free(qp_table[tbl_idx].table);
+		qp_table[tbl_idx].table = NULL;
 	}
+}
+
+static int mana_store_qp(struct mana_context *ctx, struct mana_qp *qp)
+{
+	uint32_t sreq = qp->rc_qp.queues[USER_RC_SEND_QUEUE_REQUESTER].id;
+	uint32_t srep = qp->rc_qp.queues[USER_RC_SEND_QUEUE_RESPONDER].id;
+	uint32_t rreq = qp->rc_qp.queues[USER_RC_RECV_QUEUE_REQUESTER].id;
+	uint32_t rrep = qp->rc_qp.queues[USER_RC_RECV_QUEUE_RESPONDER].id;
+	int ret;
+
+	pthread_mutex_lock(&ctx->qp_table_mutex);
+	ret = mana_store_qid(ctx->qp_stable, qp, sreq);
+	if (ret)
+		goto error;
+	ret = mana_store_qid(ctx->qp_stable, qp, srep);
+	if (ret)
+		goto remove_sreq;
+	ret = mana_store_qid(ctx->qp_rtable, qp, rreq);
+	if (ret)
+		goto remove_srep;
+	ret = mana_store_qid(ctx->qp_rtable, qp, rrep);
+	if (ret)
+		goto remove_rreq;
 
 	pthread_mutex_unlock(&ctx->qp_table_mutex);
+	return 0;
+
+remove_rreq:
+	mana_remove_qid(ctx->qp_rtable, rreq);
+remove_srep:
+	mana_remove_qid(ctx->qp_stable, srep);
+remove_sreq:
+	mana_remove_qid(ctx->qp_stable, sreq);
+error:
+	pthread_mutex_unlock(&ctx->qp_table_mutex);
+	return ret;
+}
+
+static void mana_remove_qp(struct mana_context *ctx, struct mana_qp *qp)
+{
+	uint32_t sreq = qp->rc_qp.queues[USER_RC_SEND_QUEUE_REQUESTER].id;
+	uint32_t srep = qp->rc_qp.queues[USER_RC_SEND_QUEUE_RESPONDER].id;
+	uint32_t rreq = qp->rc_qp.queues[USER_RC_RECV_QUEUE_REQUESTER].id;
+	uint32_t rrep = qp->rc_qp.queues[USER_RC_RECV_QUEUE_RESPONDER].id;
+
+	pthread_mutex_lock(&ctx->qp_table_mutex);
+	mana_remove_qid(ctx->qp_stable, sreq);
+	mana_remove_qid(ctx->qp_stable, srep);
+	mana_remove_qid(ctx->qp_rtable, rreq);
+	mana_remove_qid(ctx->qp_rtable, rrep);
+	pthread_mutex_unlock(&ctx->qp_table_mutex);
 }
 
-struct mana_qp *mana_get_qp_from_rq(struct mana_context *ctx, uint32_t qid)
+struct mana_qp *mana_get_qp(struct mana_context *ctx, uint32_t qid, bool is_sq)
 {
+	struct mana_table *qp_table = is_sq ? ctx->qp_stable : ctx->qp_rtable;
 	uint32_t tbl_idx, tbl_off;
 
 	tbl_idx = qid >> MANA_QP_TABLE_SHIFT;
 	tbl_off = qid & MANA_QP_TABLE_MASK;
 
-	if (!ctx->qp_table[tbl_idx].table)
+	if (!qp_table[tbl_idx].table)
 		return NULL;
 
-	return ctx->qp_table[tbl_idx].table[tbl_off];
+	return qp_table[tbl_idx].table[tbl_off];
 }
 
 static uint32_t get_queue_size(struct ibv_qp_init_attr *attr, enum user_queue_types type)
@@ -284,16 +330,11 @@ static struct ibv_qp *mana_create_qp_rc(struct ibv_pd *ibpd,
 
 	qp->ibqp.qp.qp_num = qp->rc_qp.queues[USER_RC_RECV_QUEUE_RESPONDER].id;
 
-	ret = mana_store_qp(ctx, qp, qp->rc_qp.queues[USER_RC_RECV_QUEUE_REQUESTER].id);
+	ret = mana_store_qp(ctx, qp);
 	if (ret) {
 		errno = ret;
 		goto destroy_qp;
 	}
-	ret = mana_store_qp(ctx, qp, qp->rc_qp.queues[USER_RC_RECV_QUEUE_RESPONDER].id);
-	if (ret) {
-		errno = ret;
-		goto remove_qp_req;
-	}
 
 	pthread_spin_lock(&send_cq->lock);
 	list_add(&send_cq->send_qp_list, &qp->send_cq_node);
@@ -305,8 +346,6 @@ static struct ibv_qp *mana_create_qp_rc(struct ibv_pd *ibpd,
 
 	return &qp->ibqp.qp;
 
-remove_qp_req:
-	mana_remove_qp(ctx, qp->rc_qp.queues[USER_RC_RECV_QUEUE_REQUESTER].id);
 destroy_qp:
 	ibv_cmd_destroy_qp(&qp->ibqp.qp);
 free_rb:
@@ -346,29 +385,32 @@ static void mana_ib_modify_rc_qp(struct mana_qp *qp, struct ibv_qp_attr *attr, i
 	if (attr_mask & IBV_QP_PATH_MTU)
 		qp->mtu = attr->path_mtu;
 
-	switch (attr->qp_state) {
-	case IBV_QPS_RESET:
-		for (i = 0; i < USER_RC_QUEUE_TYPE_MAX; ++i) {
-			qp->rc_qp.queues[i].prod_idx = 0;
-			qp->rc_qp.queues[i].cons_idx = 0;
-		}
-		mana_ib_reset_rb_shmem(qp);
-		reset_shadow_queue(&qp->shadow_rq);
-		reset_shadow_queue(&qp->shadow_sq);
-	case IBV_QPS_INIT:
-		break;
-	case IBV_QPS_RTR:
-		break;
-	case IBV_QPS_RTS:
-		if (attr_mask & IBV_QP_SQ_PSN) {
-			qp->rc_qp.sq_ssn = 1;
-			qp->rc_qp.sq_psn = attr->sq_psn;
-			qp->rc_qp.sq_highest_completed_psn = PSN_DEC(attr->sq_psn);
-			gdma_arm_normal_cqe(&qp->rc_qp.queues[USER_RC_RECV_QUEUE_REQUESTER], attr->sq_psn);
+	if (attr_mask & IBV_QP_STATE) {
+		qp->ibqp.qp.state = attr->qp_state;
+		switch (attr->qp_state) {
+		case IBV_QPS_RESET:
+			for (i = 0; i < USER_RC_QUEUE_TYPE_MAX; ++i) {
+				qp->rc_qp.queues[i].prod_idx = 0;
+				qp->rc_qp.queues[i].cons_idx = 0;
+			}
+			mana_ib_reset_rb_shmem(qp);
+			reset_shadow_queue(&qp->shadow_rq);
+			reset_shadow_queue(&qp->shadow_sq);
+		case IBV_QPS_INIT:
+			break;
+		case IBV_QPS_RTR:
+			break;
+		case IBV_QPS_RTS:
+			if (attr_mask & IBV_QP_SQ_PSN) {
+				qp->rc_qp.sq_ssn = 1;
+				qp->rc_qp.sq_psn = attr->sq_psn;
+				gdma_arm_normal_cqe(&qp->rc_qp.queues[USER_RC_RECV_QUEUE_REQUESTER],
+						    attr->sq_psn);
+			}
+			break;
+		default:
+			break;
 		}
-		break;
-	default:
-		break;
 	}
 }
 
@@ -381,18 +423,21 @@ int mana_modify_qp(struct ibv_qp *ibqp, struct ibv_qp_attr *attr, int attr_mask)
 	if (ibqp->qp_type != IBV_QPT_RC)
 		return EOPNOTSUPP;
 
-	if (!(attr_mask & IBV_QP_STATE))
-		return 0;
+	pthread_spin_lock(&qp->sq_lock);
+	pthread_spin_lock(&qp->rq_lock);
 
 	err = ibv_cmd_modify_qp(ibqp, attr, attr_mask, &cmd, sizeof(cmd));
 	if (err) {
 		verbs_err(verbs_get_ctx(ibqp->context), "Failed to modify qp\n");
-		return err;
+		goto cleanup;
 	}
 
 	mana_ib_modify_rc_qp(qp, attr, attr_mask);
 
-	return 0;
+cleanup:
+	pthread_spin_unlock(&qp->rq_lock);
+	pthread_spin_unlock(&qp->sq_lock);
+	return err;
 }
 
 static void mana_drain_cqes(struct mana_qp *qp)
@@ -401,18 +446,10 @@ static void mana_drain_cqes(struct mana_qp *qp)
 	struct mana_cq *recv_cq = container_of(qp->ibqp.qp.recv_cq, struct mana_cq, ibcq);
 
 	pthread_spin_lock(&send_cq->lock);
-	while (shadow_queue_get_next_to_consume(&qp->shadow_sq)) {
-		shadow_queue_advance_consumer(&qp->shadow_sq);
-		send_cq->ready_wcs--;
-	}
 	list_del(&qp->send_cq_node);
 	pthread_spin_unlock(&send_cq->lock);
 
 	pthread_spin_lock(&recv_cq->lock);
-	while (shadow_queue_get_next_to_consume(&qp->shadow_rq)) {
-		shadow_queue_advance_consumer(&qp->shadow_rq);
-		recv_cq->ready_wcs--;
-	}
 	list_del(&qp->recv_cq_node);
 	pthread_spin_unlock(&recv_cq->lock);
 }
@@ -424,8 +461,7 @@ int mana_destroy_qp(struct ibv_qp *ibqp)
 	int ret, i;
 
 	if (ibqp->qp_type == IBV_QPT_RC) {
-		mana_remove_qp(ctx, qp->rc_qp.queues[USER_RC_RECV_QUEUE_REQUESTER].id);
-		mana_remove_qp(ctx, qp->rc_qp.queues[USER_RC_RECV_QUEUE_RESPONDER].id);
+		mana_remove_qp(ctx, qp);
 		mana_drain_cqes(qp);
 	}
 
@@ -546,3 +582,11 @@ struct ibv_qp *mana_create_qp_ex(struct ibv_context *context,
 
 	return NULL;
 }
+
+void mana_qp_move_flush_err(struct ibv_qp *ibqp)
+{
+	struct ibv_qp_attr attr = {};
+
+	attr.qp_state = IBV_QPS_ERR;
+	mana_modify_qp(ibqp, &attr, IBV_QP_STATE);
+}
diff --git a/providers/mana/shadow_queue.h b/providers/mana/shadow_queue.h
index 6c86cdb..1073f7c 100644
--- a/providers/mana/shadow_queue.h
+++ b/providers/mana/shadow_queue.h
@@ -17,13 +17,13 @@
 
 struct shadow_wqe_header {
 	/* ibv_wc_opcode */
-	uint8_t opcode;
+	uint64_t opcode : 8;
 	/* ibv_wc_flags or MANA_NO_SIGNAL_WC */
-	uint8_t flags;
+	uint64_t flags : 8;
+	uint64_t posted_wqe_size_in_bu : 8;
 	/* ibv_wc_status */
-	uint8_t vendor_error_code;
-	uint8_t posted_wqe_size_in_bu;
-	uint32_t unmasked_queue_offset;
+	uint64_t vendor_error : 12;
+	uint64_t unmasked_queue_offset : 28;
 	uint64_t wr_id;
 };
 
@@ -43,6 +43,7 @@ struct shadow_queue {
 	uint64_t prod_idx;
 	uint64_t cons_idx;
 	uint64_t next_to_complete_idx;
+	uint64_t next_to_signal_idx;
 	uint32_t length;
 	uint32_t stride;
 	void *buffer;
@@ -53,6 +54,7 @@ static inline void reset_shadow_queue(struct shadow_queue *queue)
 	queue->prod_idx = 0;
 	queue->cons_idx = 0;
 	queue->next_to_complete_idx = 0;
+	queue->next_to_signal_idx = 0;
 }
 
 static inline int create_shadow_queue(struct shadow_queue *queue, uint32_t length, uint32_t stride)
@@ -148,4 +150,20 @@ static inline void shadow_queue_advance_next_to_complete(struct shadow_queue *qu
 	queue->next_to_complete_idx++;
 }
 
+static inline struct shadow_wqe_header *
+shadow_queue_get_next_to_signal(struct shadow_queue *queue)
+{
+	struct shadow_wqe_header *wqe = NULL;
+
+	queue->next_to_signal_idx = max(queue->next_to_signal_idx, queue->next_to_complete_idx);
+	while (queue->next_to_signal_idx < queue->prod_idx) {
+		wqe = shadow_queue_get_element(queue, queue->next_to_signal_idx);
+		queue->next_to_signal_idx++;
+		if (wqe->flags != MANA_NO_SIGNAL_WC)
+			return wqe;
+	}
+
+	return NULL;
+}
+
 #endif //_SHADOW_QUEUE_H_
diff --git a/providers/mana/wr.c b/providers/mana/wr.c
index 5661045..a9a5fe5 100644
--- a/providers/mana/wr.c
+++ b/providers/mana/wr.c
@@ -146,6 +146,13 @@ static int mana_ib_rc_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
 	int ret = 0;
 
 	pthread_spin_lock(&qp->rq_lock);
+
+	if (unlikely(ibqp->state == IBV_QPS_RESET || ibqp->state == IBV_QPS_ERR)) {
+		verbs_err(verbs_get_ctx(ibqp->context), "Invalid QP state\n");
+		ret = EINVAL;
+		goto cleanup;
+	}
+
 	for (; wr; wr = wr->next) {
 		if (shadow_queue_full(&qp->shadow_rq)) {
 			verbs_err(&mc->ibv_ctx, "recv shadow queue full\n");
@@ -371,8 +378,14 @@ static int mana_ib_rc_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
 
 	pthread_spin_lock(&qp->sq_lock);
 
+	if (unlikely(ibqp->state != IBV_QPS_RTS)) {
+		verbs_err(verbs_get_ctx(ibqp->context), "Invalid QP state\n");
+		ret = EINVAL;
+		goto cleanup;
+	}
+
 	for (; wr; wr = wr->next) {
-		if ((wr->send_flags & IBV_SEND_SIGNALED) && shadow_queue_full(&qp->shadow_sq)) {
+		if (shadow_queue_full(&qp->shadow_sq)) {
 			verbs_err(verbs_get_ctx(ibqp->context), "shadow queue full\n");
 			ret = ENOMEM;
 			goto cleanup;
@@ -397,7 +410,8 @@ static int mana_ib_rc_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
 		ring = true;
 
 		shadow_queue_advance_producer(&qp->shadow_sq);
-		mana_ib_update_shared_mem_right_offset(qp, shadow_wqe->header.unmasked_queue_offset);
+		mana_ib_update_shared_mem_right_offset(qp,
+						       shadow_wqe->header.unmasked_queue_offset);
 	}
 
 cleanup:
