677 lines
22 KiB
Diff
677 lines
22 KiB
Diff
From 2db3c164aea36d297eb3db7c54804037c2754c80 Mon Sep 17 00:00:00 2001
|
|
From: =?UTF-8?q?=E6=9D=8E=E5=AF=8C=E8=89=B3?= <li.fuyan@zte.com.cn>
|
|
Date: Fri, 28 Mar 2025 15:56:16 +0800
|
|
Subject: [PATCH] libzrdma:Fix capability related bugs
|
|
MIME-Version: 1.0
|
|
Content-Type: text/plain; charset=UTF-8
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
Signed-off-by: 李富艳 <li.fuyan@zte.com.cn>
|
|
---
|
|
providers/zrdma/zxdh_defs.h | 33 +-----
|
|
providers/zrdma/zxdh_hw.c | 47 ++++-----
|
|
providers/zrdma/zxdh_verbs.c | 196 +++++++++++++++++++++--------------
|
|
providers/zrdma/zxdh_verbs.h | 2 +-
|
|
4 files changed, 141 insertions(+), 137 deletions(-)
|
|
|
|
diff --git a/providers/zrdma/zxdh_defs.h b/providers/zrdma/zxdh_defs.h
|
|
index 8772e7b..ec0bebe 100644
|
|
--- a/providers/zrdma/zxdh_defs.h
|
|
+++ b/providers/zrdma/zxdh_defs.h
|
|
@@ -41,7 +41,7 @@
|
|
#define ZXDH_SQ_WQE_BYTESIZE 32
|
|
#define ZXDH_SRQ_WQE_MIN_SIZE 16
|
|
|
|
-#define ZXDH_SQ_RSVD 258
|
|
+#define ZXDH_SQ_RSVD 1
|
|
#define ZXDH_RQ_RSVD 1
|
|
#define ZXDH_SRQ_RSVD 1
|
|
|
|
@@ -252,29 +252,7 @@
|
|
(_retcode) = ZXDH_ERR_RING_FULL; \
|
|
} \
|
|
}
|
|
-#define ZXDH_SQ_RING_MOVE_HEAD(_ring, _retcode) \
|
|
- { \
|
|
- register __u32 size; \
|
|
- size = (_ring).size; \
|
|
- if (!ZXDH_SQ_RING_FULL_ERR(_ring)) { \
|
|
- (_ring).head = ((_ring).head + 1) % size; \
|
|
- (_retcode) = 0; \
|
|
- } else { \
|
|
- (_retcode) = ZXDH_ERR_RING_FULL; \
|
|
- } \
|
|
- }
|
|
-#define ZXDH_SQ_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
|
|
- { \
|
|
- register __u32 size; \
|
|
- size = (_ring).size; \
|
|
- if ((ZXDH_RING_USED_QUANTA(_ring) + (_count)) < \
|
|
- (size - 256)) { \
|
|
- (_ring).head = ((_ring).head + (_count)) % size; \
|
|
- (_retcode) = 0; \
|
|
- } else { \
|
|
- (_retcode) = ZXDH_ERR_RING_FULL; \
|
|
- } \
|
|
- }
|
|
+
|
|
#define ZXDH_RING_MOVE_HEAD_BY_COUNT_NOCHECK(_ring, _count) \
|
|
(_ring).head = ((_ring).head + (_count)) % (_ring).size
|
|
|
|
@@ -298,13 +276,6 @@
|
|
#define ZXDH_ERR_RING_FULL3(_ring) \
|
|
((ZXDH_RING_USED_QUANTA(_ring) == ((_ring).size - 3)))
|
|
|
|
-#define ZXDH_SQ_RING_FULL_ERR(_ring) \
|
|
- ((ZXDH_RING_USED_QUANTA(_ring) == ((_ring).size - 257)))
|
|
-
|
|
-#define ZXDH_ERR_SQ_RING_FULL2(_ring) \
|
|
- ((ZXDH_RING_USED_QUANTA(_ring) == ((_ring).size - 258)))
|
|
-#define ZXDH_ERR_SQ_RING_FULL3(_ring) \
|
|
- ((ZXDH_RING_USED_QUANTA(_ring) == ((_ring).size - 259)))
|
|
#define ZXDH_RING_MORE_WORK(_ring) ((ZXDH_RING_USED_QUANTA(_ring) != 0))
|
|
|
|
#define ZXDH_RING_USED_QUANTA(_ring) \
|
|
diff --git a/providers/zrdma/zxdh_hw.c b/providers/zrdma/zxdh_hw.c
|
|
index fb8f016..0ea5a85 100644
|
|
--- a/providers/zrdma/zxdh_hw.c
|
|
+++ b/providers/zrdma/zxdh_hw.c
|
|
@@ -10,14 +10,6 @@
|
|
#include <stdlib.h>
|
|
#include <stdio.h>
|
|
#include <unistd.h>
|
|
-#include <stdbool.h>
|
|
-#include <string.h>
|
|
-#include <util/udma_barrier.h>
|
|
-#include <util/util.h>
|
|
-#include <linux/types.h>
|
|
-#include <inttypes.h>
|
|
-#include <pthread.h>
|
|
-#include <endian.h>
|
|
#define ERROR_CODE_VALUE 65
|
|
|
|
static inline void qp_tx_psn_add(__u32 *x, __u32 y, __u16 mtu)
|
|
@@ -30,11 +22,17 @@ static inline void qp_tx_psn_add(__u32 *x, __u32 y, __u16 mtu)
|
|
*x = (*x + chunks) & 0xffffff;
|
|
}
|
|
|
|
-int zxdh_get_write_imm_split_switch(void)
|
|
+/**
|
|
+ * zxdh_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
|
|
+ * @frag_cnt: number of fragments
|
|
+ * @quanta: quanta for frag_cnt
|
|
+ */
|
|
+static inline enum zxdh_status_code zxdh_fragcnt_to_quanta_sq(__u32 frag_cnt, __u16 *quanta)
|
|
{
|
|
- char *env;
|
|
- env = getenv("ZXDH_WRITE_IMM_SPILT_ENABLE");
|
|
- return (env != NULL) ? atoi(env) : 0;
|
|
+ if (unlikely(frag_cnt > ZXDH_MAX_SQ_FRAG))
|
|
+ return ZXDH_ERR_INVALID_FRAG_COUNT;
|
|
+ *quanta = (frag_cnt >> 1) + 1;
|
|
+ return 0;
|
|
}
|
|
|
|
/**
|
|
@@ -2153,6 +2151,9 @@ int zxdh_qp_round_up(__u32 wqdepth)
|
|
{
|
|
int scount = 1;
|
|
|
|
+ if (wqdepth == 0)
|
|
+ return 0;
|
|
+
|
|
for (wqdepth--; scount <= 16; scount *= 2)
|
|
wqdepth |= wqdepth >> scount;
|
|
|
|
@@ -2167,6 +2168,9 @@ int zxdh_cq_round_up(__u32 wqdepth)
|
|
{
|
|
int scount = 1;
|
|
|
|
+ if (wqdepth == 0)
|
|
+ return 0;
|
|
+
|
|
for (wqdepth--; scount <= 16; scount *= 2)
|
|
wqdepth |= wqdepth >> scount;
|
|
|
|
@@ -2364,7 +2368,7 @@ enum zxdh_status_code zxdh_cq_init(struct zxdh_cq *cq,
|
|
void zxdh_clean_cq(void *q, struct zxdh_cq *cq)
|
|
{
|
|
__le64 *cqe;
|
|
- __u64 qword3, comp_ctx;
|
|
+ __u64 qword0, comp_ctx;
|
|
__u32 cq_head;
|
|
__u8 polarity, temp;
|
|
|
|
@@ -2377,8 +2381,8 @@ void zxdh_clean_cq(void *q, struct zxdh_cq *cq)
|
|
.buf;
|
|
else
|
|
cqe = cq->cq_base[cq_head].buf;
|
|
- get_64bit_val(cqe, 24, &qword3);
|
|
- polarity = (__u8)FIELD_GET(ZXDH_CQ_VALID, qword3);
|
|
+ get_64bit_val(cqe, 0, &qword0);
|
|
+ polarity = (__u8)FIELD_GET(ZXDH_CQ_VALID, qword0);
|
|
|
|
if (polarity != temp)
|
|
break;
|
|
@@ -2432,19 +2436,6 @@ enum zxdh_status_code zxdh_nop(struct zxdh_qp *qp, __u64 wr_id, bool signaled,
|
|
return 0;
|
|
}
|
|
|
|
-/**
|
|
- * zxdh_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
|
|
- * @frag_cnt: number of fragments
|
|
- * @quanta: quanta for frag_cnt
|
|
- */
|
|
-enum zxdh_status_code zxdh_fragcnt_to_quanta_sq(__u32 frag_cnt, __u16 *quanta)
|
|
-{
|
|
- if (frag_cnt > ZXDH_MAX_SQ_FRAG)
|
|
- return ZXDH_ERR_INVALID_FRAG_COUNT;
|
|
- *quanta = frag_cnt / 2 + 1;
|
|
- return 0;
|
|
-}
|
|
-
|
|
/**
|
|
* zxdh_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
|
|
* @frag_cnt: number of fragments
|
|
diff --git a/providers/zrdma/zxdh_verbs.c b/providers/zrdma/zxdh_verbs.c
|
|
index 9cf1240..39ff401 100644
|
|
--- a/providers/zrdma/zxdh_verbs.c
|
|
+++ b/providers/zrdma/zxdh_verbs.c
|
|
@@ -59,6 +59,7 @@ static int zxdh_get_inline_data(uint8_t *inline_data, struct ibv_send_wr *ib_wr,
|
|
while (num < ib_wr->num_sge) {
|
|
*len += ib_wr->sg_list[num].length;
|
|
if (*len > ZXDH_MAX_INLINE_DATA_SIZE) {
|
|
+ printf("err:inline bytes over max inline length\n");
|
|
return -EINVAL;
|
|
}
|
|
memcpy(inline_data + offset,
|
|
@@ -343,12 +344,8 @@ static void zxdh_free_hw_buf(void *buf, size_t size)
|
|
*/
|
|
static inline int get_cq_size(int ncqe)
|
|
{
|
|
- ncqe++;
|
|
-
|
|
- /* Completions with immediate require 1 extra entry */
|
|
if (ncqe < ZXDH_U_MINCQ_SIZE)
|
|
ncqe = ZXDH_U_MINCQ_SIZE;
|
|
-
|
|
return ncqe;
|
|
}
|
|
|
|
@@ -380,6 +377,7 @@ static struct ibv_cq_ex *ucreate_cq(struct ibv_context *context,
|
|
size_t total_size;
|
|
__u32 cq_pages;
|
|
int ret, ncqe;
|
|
+ __u64 resize_supported;
|
|
|
|
iwvctx = container_of(context, struct zxdh_uvcontext, ibv_ctx.context);
|
|
dev_attrs = &iwvctx->dev_attrs;
|
|
@@ -390,6 +388,13 @@ static struct ibv_cq_ex *ucreate_cq(struct ibv_context *context,
|
|
return NULL;
|
|
}
|
|
|
|
+ info.cq_size = get_cq_size(attr_ex->cqe);
|
|
+ info.cq_size = zxdh_cq_round_up(info.cq_size);
|
|
+ if (info.cq_size > dev_attrs->max_hw_cq_size) {
|
|
+ errno = EINVAL;
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
/* save the cqe requested by application */
|
|
ncqe = attr_ex->cqe;
|
|
iwucq = calloc(1, sizeof(*iwucq));
|
|
@@ -404,14 +409,13 @@ static struct ibv_cq_ex *ucreate_cq(struct ibv_context *context,
|
|
}
|
|
|
|
iwucq->resize_enable = false;
|
|
- info.cq_size = get_cq_size(attr_ex->cqe);
|
|
- info.cq_size = zxdh_cq_round_up(info.cq_size);
|
|
iwucq->comp_vector = attr_ex->comp_vector;
|
|
list_head_init(&iwucq->resize_list);
|
|
total_size = get_cq_total_bytes(info.cq_size);
|
|
cq_pages = total_size >> ZXDH_HW_PAGE_SHIFT;
|
|
+ resize_supported = dev_attrs->feature_flags & ZXDH_FEATURE_CQ_RESIZE;
|
|
|
|
- if (!(dev_attrs->feature_flags & ZXDH_FEATURE_CQ_RESIZE))
|
|
+ if (!resize_supported)
|
|
total_size = (cq_pages << ZXDH_HW_PAGE_SHIFT) +
|
|
ZXDH_DB_SHADOW_AREA_SIZE;
|
|
|
|
@@ -436,7 +440,7 @@ static struct ibv_cq_ex *ucreate_cq(struct ibv_context *context,
|
|
|
|
iwucq->vmr.ibv_mr.pd = &iwvctx->iwupd->ibv_pd;
|
|
|
|
- if (dev_attrs->feature_flags & ZXDH_FEATURE_CQ_RESIZE) {
|
|
+ if (resize_supported) {
|
|
info.shadow_area = zxdh_alloc_hw_buf(ZXDH_DB_SHADOW_AREA_SIZE);
|
|
if (!info.shadow_area)
|
|
goto err_dereg_mr;
|
|
@@ -457,7 +461,6 @@ static struct ibv_cq_ex *ucreate_cq(struct ibv_context *context,
|
|
}
|
|
|
|
iwucq->vmr_shadow_area.ibv_mr.pd = &iwvctx->iwupd->ibv_pd;
|
|
-
|
|
} else {
|
|
info.shadow_area = (__le64 *)((__u8 *)info.cq_base +
|
|
(cq_pages << ZXDH_HW_PAGE_SHIFT));
|
|
@@ -491,7 +494,9 @@ err_dereg_shadow:
|
|
ibv_cmd_dereg_mr(&iwucq->vmr);
|
|
if (iwucq->vmr_shadow_area.ibv_mr.handle) {
|
|
ibv_cmd_dereg_mr(&iwucq->vmr_shadow_area);
|
|
- zxdh_free_hw_buf(info.shadow_area, ZXDH_DB_SHADOW_AREA_SIZE);
|
|
+ if (resize_supported)
|
|
+ zxdh_free_hw_buf(info.shadow_area,
|
|
+ ZXDH_DB_SHADOW_AREA_SIZE);
|
|
}
|
|
err_dereg_mr:
|
|
zxdh_free_hw_buf(info.cq_base, total_size);
|
|
@@ -553,7 +558,7 @@ static int zxdh_process_resize_list(struct zxdh_ucq *iwucq,
|
|
struct zxdh_cq_buf *cq_buf, *next;
|
|
int cq_cnt = 0;
|
|
|
|
- list_for_each_safe (&iwucq->resize_list, cq_buf, next, list) {
|
|
+ list_for_each_safe(&iwucq->resize_list, cq_buf, next, list) {
|
|
if (cq_buf == lcqe_buf)
|
|
return cq_cnt;
|
|
|
|
@@ -774,7 +779,8 @@ static inline void zxdh_process_cqe(struct ibv_wc *entry,
|
|
*
|
|
* Returns the internal zxdh device error code or 0 on success
|
|
*/
|
|
-static int zxdh_poll_one(struct zxdh_cq *cq, struct zxdh_cq_poll_info *cur_cqe,
|
|
+static int zxdh_poll_one(struct zxdh_cq *cq,
|
|
+ struct zxdh_cq_poll_info *cur_cqe,
|
|
struct ibv_wc *entry)
|
|
{
|
|
int ret = zxdh_cq_poll_cmpl(cq, cur_cqe);
|
|
@@ -811,7 +817,7 @@ static int __zxdh_upoll_resize_cq(struct zxdh_ucq *iwucq, int num_entries,
|
|
int ret;
|
|
|
|
/* go through the list of previously resized CQ buffers */
|
|
- list_for_each_safe (&iwucq->resize_list, cq_buf, next, list) {
|
|
+ list_for_each_safe(&iwucq->resize_list, cq_buf, next, list) {
|
|
while (npolled < num_entries) {
|
|
ret = zxdh_poll_one(&cq_buf->cq, cur_cqe,
|
|
entry ? entry + npolled : NULL);
|
|
@@ -829,6 +835,7 @@ static int __zxdh_upoll_resize_cq(struct zxdh_ucq *iwucq, int num_entries,
|
|
cq_new_cqe = true;
|
|
continue;
|
|
}
|
|
+ printf("__zrdma_upoll_cq resize goto error failed\n");
|
|
goto error;
|
|
}
|
|
|
|
@@ -856,6 +863,7 @@ static int __zxdh_upoll_resize_cq(struct zxdh_ucq *iwucq, int num_entries,
|
|
cq_new_cqe = true;
|
|
continue;
|
|
}
|
|
+ printf("__zrdma_upoll_cq goto error failed\n");
|
|
goto error;
|
|
}
|
|
if (cq_new_cqe)
|
|
@@ -1038,7 +1046,7 @@ static uint64_t zxdh_wc_read_completion_wallclock_ns(struct ibv_cq_ex *ibvcq_ex)
|
|
container_of(ibvcq_ex, struct zxdh_ucq, verbs_cq.cq_ex);
|
|
|
|
/* RTT is in usec */
|
|
- return iwucq->cur_cqe.tcp_seq_num_rtt * 1000;
|
|
+ return (uint64_t)iwucq->cur_cqe.tcp_seq_num_rtt * 1000;
|
|
}
|
|
|
|
static enum ibv_wc_opcode zxdh_wc_read_opcode(struct ibv_cq_ex *ibvcq_ex)
|
|
@@ -1682,6 +1690,37 @@ int zxdh_uquery_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask,
|
|
sizeof(cmd));
|
|
}
|
|
|
|
+/**
|
|
+ * zxdh_clean_cqes - clean cq entries for qp
|
|
+ * @qp: qp for which completions are cleaned
|
|
+ * @iwcq: cq to be cleaned
|
|
+ */
|
|
+static void zxdh_clean_cqes(struct zxdh_qp *qp, struct zxdh_ucq *iwucq)
|
|
+{
|
|
+ struct zxdh_cq *ukcq = &iwucq->cq;
|
|
+ int ret;
|
|
+
|
|
+ ret = pthread_spin_lock(&iwucq->lock);
|
|
+ if (ret)
|
|
+ return;
|
|
+
|
|
+ zxdh_clean_cq(qp, ukcq);
|
|
+ pthread_spin_unlock(&iwucq->lock);
|
|
+}
|
|
+
|
|
+static void zxdh_init_qp_indices(struct zxdh_qp *qp)
|
|
+{
|
|
+ __u32 sq_ring_size;
|
|
+ sq_ring_size = ZXDH_RING_SIZE(qp->sq_ring);
|
|
+ ZXDH_RING_INIT(qp->sq_ring, sq_ring_size);
|
|
+ ZXDH_RING_INIT(qp->initial_ring, sq_ring_size);
|
|
+ qp->swqe_polarity = 0;
|
|
+ qp->swqe_polarity_deferred = 1;
|
|
+ qp->rwqe_polarity = 0;
|
|
+ qp->rwqe_signature = 0;
|
|
+ ZXDH_RING_INIT(qp->rq_ring, qp->rq_size);
|
|
+}
|
|
+
|
|
/**
|
|
* zxdh_umodify_qp - send qp modify to driver
|
|
* @qp: qp to modify
|
|
@@ -1705,6 +1744,18 @@ int zxdh_umodify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, int attr_mask)
|
|
} else {
|
|
ret = ibv_cmd_modify_qp(qp, attr, attr_mask, &cmd, sizeof(cmd));
|
|
}
|
|
+
|
|
+ if (!ret &&
|
|
+ (attr_mask & IBV_QP_STATE) &&
|
|
+ attr->qp_state == IBV_QPS_RESET) {
|
|
+ if (iwuqp->send_cq)
|
|
+ zxdh_clean_cqes(&iwuqp->qp, iwuqp->send_cq);
|
|
+
|
|
+ if (iwuqp->recv_cq && iwuqp->recv_cq != iwuqp->send_cq)
|
|
+ zxdh_clean_cqes(&iwuqp->qp, iwuqp->recv_cq);
|
|
+ zxdh_init_qp_indices(&iwuqp->qp);
|
|
+ }
|
|
+
|
|
if (!ret && (attr_mask & IBV_QP_PATH_MTU) &&
|
|
qp->qp_type == IBV_QPT_RC) {
|
|
mtu = mtu_enum_to_int(attr->path_mtu);
|
|
@@ -1736,24 +1787,6 @@ static void zxdh_issue_flush(struct ibv_qp *qp, bool sq_flush, bool rq_flush)
|
|
sizeof(cmd_ex), &resp, sizeof(resp));
|
|
}
|
|
|
|
-/**
|
|
- * zxdh_clean_cqes - clean cq entries for qp
|
|
- * @qp: qp for which completions are cleaned
|
|
- * @iwcq: cq to be cleaned
|
|
- */
|
|
-static void zxdh_clean_cqes(struct zxdh_qp *qp, struct zxdh_ucq *iwucq)
|
|
-{
|
|
- struct zxdh_cq *cq = &iwucq->cq;
|
|
- int ret;
|
|
-
|
|
- ret = pthread_spin_lock(&iwucq->lock);
|
|
- if (ret)
|
|
- return;
|
|
-
|
|
- zxdh_clean_cq(qp, cq);
|
|
- pthread_spin_unlock(&iwucq->lock);
|
|
-}
|
|
-
|
|
/**
|
|
* zxdh_udestroy_qp - destroy qp
|
|
* @qp: qp to destroy
|
|
@@ -1851,16 +1884,10 @@ int zxdh_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
|
|
struct zxdh_umr *umr = NULL;
|
|
__u64 mr_va = 0, mw_va = 0, value_dffer = 0, mw_pa_pble_index = 0;
|
|
__u16 mr_offset = 0;
|
|
- iwvctx = container_of(ib_qp->context, struct zxdh_uvcontext,
|
|
- ibv_ctx.context);
|
|
- if (ib_qp->state != IBV_QPS_RTS) {
|
|
- *bad_wr = ib_wr;
|
|
- verbs_err(&iwvctx->ibv_ctx, "zrdma: post send at state:%d\n",
|
|
- ib_qp->state);
|
|
- return -EINVAL;
|
|
- }
|
|
|
|
iwuqp = container_of(ib_qp, struct zxdh_uqp, vqp.qp);
|
|
+ iwvctx = container_of(ib_qp->context, struct zxdh_uvcontext,
|
|
+ ibv_ctx.context);
|
|
dev_attrs = &iwvctx->dev_attrs;
|
|
|
|
err = pthread_spin_lock(&iwuqp->lock);
|
|
@@ -1918,9 +1945,7 @@ int zxdh_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
|
|
iwuqp->inline_data, ib_wr,
|
|
&info.op.inline_rdma_send.len);
|
|
if (ret) {
|
|
- verbs_err(
|
|
- &iwvctx->ibv_ctx,
|
|
- "zrdma: get inline data fail\n");
|
|
+ printf("err:zxdh_get_inline_data fail\n");
|
|
pthread_spin_unlock(&iwuqp->lock);
|
|
return -EINVAL;
|
|
}
|
|
@@ -1937,11 +1962,11 @@ int zxdh_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
|
|
ib_wr->wr.ud.remote_qkey;
|
|
info.op.inline_rdma_send.dest_qp =
|
|
ib_wr->wr.ud.remote_qpn;
|
|
- ret = zxdh_ud_inline_send(&iwuqp->qp,
|
|
- &info, false);
|
|
+ ret = zxdh_ud_inline_send(
|
|
+ &iwuqp->qp, &info, false);
|
|
} else {
|
|
- ret = zxdh_rc_inline_send(&iwuqp->qp,
|
|
- &info, false);
|
|
+ ret = zxdh_rc_inline_send(
|
|
+ &iwuqp->qp, &info, false);
|
|
}
|
|
} else {
|
|
info.op.send.num_sges = ib_wr->num_sge;
|
|
@@ -1960,10 +1985,10 @@ int zxdh_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
|
|
info.op.inline_rdma_send.dest_qp =
|
|
ib_wr->wr.ud.remote_qpn;
|
|
ret = zxdh_ud_send(&iwuqp->qp, &info,
|
|
- false);
|
|
+ false);
|
|
} else {
|
|
ret = zxdh_rc_send(&iwuqp->qp, &info,
|
|
- false);
|
|
+ false);
|
|
}
|
|
}
|
|
if (ret)
|
|
@@ -1995,9 +2020,7 @@ int zxdh_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
|
|
iwuqp->inline_data, ib_wr,
|
|
&info.op.inline_rdma_write.len);
|
|
if (ret) {
|
|
- verbs_err(
|
|
- &iwvctx->ibv_ctx,
|
|
- "zrdma: get inline data fail\n");
|
|
+ printf("err:zxdh_get_inline_data fail\n");
|
|
pthread_spin_unlock(&iwuqp->lock);
|
|
return -EINVAL;
|
|
}
|
|
@@ -2007,8 +2030,8 @@ int zxdh_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
|
|
ib_wr->wr.rdma.remote_addr;
|
|
info.op.inline_rdma_write.rem_addr.stag =
|
|
ib_wr->wr.rdma.rkey;
|
|
- ret = zxdh_inline_rdma_write(&iwuqp->qp, &info,
|
|
- false);
|
|
+ ret = zxdh_inline_rdma_write(&iwuqp->qp,
|
|
+ &info, false);
|
|
} else {
|
|
info.op.rdma_write.lo_sg_list =
|
|
(void *)ib_wr->sg_list;
|
|
@@ -2017,7 +2040,8 @@ int zxdh_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
|
|
ib_wr->wr.rdma.remote_addr;
|
|
info.op.rdma_write.rem_addr.stag =
|
|
ib_wr->wr.rdma.rkey;
|
|
- ret = zxdh_rdma_write(&iwuqp->qp, &info, false);
|
|
+ ret = zxdh_rdma_write(&iwuqp->qp, &info,
|
|
+ false);
|
|
}
|
|
if (ret)
|
|
err = (ret == ZXDH_ERR_QP_TOOMANY_WRS_POSTED) ?
|
|
@@ -2036,7 +2060,8 @@ int zxdh_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
|
|
|
|
info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
|
|
info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
|
|
- ret = zxdh_rdma_read(&iwuqp->qp, &info, false, false);
|
|
+ ret = zxdh_rdma_read(&iwuqp->qp, &info, false,
|
|
+ false);
|
|
if (ret)
|
|
err = (ret == ZXDH_ERR_QP_TOOMANY_WRS_POSTED) ?
|
|
ENOMEM :
|
|
@@ -2383,20 +2408,17 @@ int zxdh_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
|
|
|
|
ret = zxdh_mw_bind(&iwuqp->qp, &info, false);
|
|
if (ret)
|
|
- err = (ret == ZXDH_ERR_QP_TOOMANY_WRS_POSTED) ?
|
|
- ENOMEM :
|
|
- EINVAL;
|
|
+ err = ENOMEM;
|
|
+
|
|
break;
|
|
case IBV_WR_LOCAL_INV:
|
|
info.op_type = ZXDH_OP_TYPE_LOCAL_INV;
|
|
info.op.inv_local_stag.target_stag =
|
|
ib_wr->invalidate_rkey;
|
|
ret = zxdh_stag_local_invalidate(&iwuqp->qp, &info,
|
|
- true);
|
|
+ true);
|
|
if (ret)
|
|
- err = (ret == ZXDH_ERR_QP_TOOMANY_WRS_POSTED) ?
|
|
- ENOMEM :
|
|
- EINVAL;
|
|
+ err = ENOMEM;
|
|
break;
|
|
default:
|
|
/* error */
|
|
@@ -2441,6 +2463,7 @@ int zxdh_upost_recv(struct ibv_qp *ib_qp, struct ibv_recv_wr *ib_wr,
|
|
|
|
if (unlikely(ib_qp->state == IBV_QPS_RESET || ib_qp->srq)) {
|
|
*bad_wr = ib_wr;
|
|
+ printf("err:post recv at reset or using srq\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
@@ -2490,9 +2513,18 @@ error:
|
|
struct ibv_ah *zxdh_ucreate_ah(struct ibv_pd *ibpd, struct ibv_ah_attr *attr)
|
|
{
|
|
struct zxdh_uah *ah;
|
|
+ union ibv_gid sgid;
|
|
struct zxdh_ucreate_ah_resp resp;
|
|
int err;
|
|
|
|
+ memset(&resp, 0, sizeof(resp));
|
|
+ err = ibv_query_gid(ibpd->context, attr->port_num, attr->grh.sgid_index,
|
|
+ &sgid);
|
|
+ if (err) {
|
|
+ errno = err;
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
ah = calloc(1, sizeof(*ah));
|
|
if (!ah)
|
|
return NULL;
|
|
@@ -2584,10 +2616,10 @@ int zxdh_uresize_cq(struct ibv_cq *cq, int cqe)
|
|
if (!(dev_attrs->feature_flags & ZXDH_FEATURE_CQ_RESIZE))
|
|
return -EOPNOTSUPP;
|
|
|
|
- if (cqe > ZXDH_MAX_CQ_SIZE)
|
|
+ if (cqe > dev_attrs->max_hw_cq_size)
|
|
return -EINVAL;
|
|
|
|
- cqe_needed = zxdh_cq_round_up(cqe + 1);
|
|
+ cqe_needed = zxdh_cq_round_up(cqe);
|
|
|
|
if (cqe_needed < ZXDH_U_MINCQ_SIZE)
|
|
cqe_needed = ZXDH_U_MINCQ_SIZE;
|
|
@@ -2609,6 +2641,10 @@ int zxdh_uresize_cq(struct ibv_cq *cq, int cqe)
|
|
goto err_buf;
|
|
}
|
|
|
|
+ ret = pthread_spin_lock(&iwucq->lock);
|
|
+ if (ret)
|
|
+ goto err_lock;
|
|
+
|
|
new_mr.ibv_mr.pd = iwucq->vmr.ibv_mr.pd;
|
|
reg_mr_cmd.reg_type = ZXDH_MEMREG_TYPE_CQ;
|
|
reg_mr_cmd.cq_pages = cq_pages;
|
|
@@ -2620,10 +2656,6 @@ int zxdh_uresize_cq(struct ibv_cq *cq, int cqe)
|
|
if (ret)
|
|
goto err_dereg_mr;
|
|
|
|
- ret = pthread_spin_lock(&iwucq->lock);
|
|
- if (ret)
|
|
- goto err_lock;
|
|
-
|
|
cmd.user_cq_buffer = (__u64)((uintptr_t)cq_base);
|
|
ret = ibv_cmd_resize_cq(&iwucq->verbs_cq.cq, cqe_needed, &cmd.ibv_cmd,
|
|
sizeof(cmd), &resp, sizeof(resp));
|
|
@@ -2642,10 +2674,10 @@ int zxdh_uresize_cq(struct ibv_cq *cq, int cqe)
|
|
return ret;
|
|
|
|
err_resize:
|
|
- pthread_spin_unlock(&iwucq->lock);
|
|
-err_lock:
|
|
ibv_cmd_dereg_mr(&new_mr);
|
|
err_dereg_mr:
|
|
+ pthread_spin_unlock(&iwucq->lock);
|
|
+err_lock:
|
|
free(cq_buf);
|
|
err_buf:
|
|
zxdh_free_hw_buf(cq_base, cq_size);
|
|
@@ -2735,7 +2767,8 @@ static int zxdh_alloc_srq_buf(struct zxdh_usrq *iwusrq,
|
|
return 0;
|
|
}
|
|
|
|
-static int zxdh_reg_srq_mr(struct ibv_pd *pd, struct zxdh_srq_init_info *info,
|
|
+static int zxdh_reg_srq_mr(struct ibv_pd *pd,
|
|
+ struct zxdh_srq_init_info *info,
|
|
size_t total_srq_size, uint16_t srq_pages,
|
|
uint16_t srq_list_pages, struct zxdh_usrq *iwusrq)
|
|
{
|
|
@@ -3043,7 +3076,8 @@ int zxdh_uquery_srq(struct ibv_srq *srq, struct ibv_srq_attr *srq_attr)
|
|
}
|
|
|
|
static int zxdh_check_srq_valid(struct ibv_recv_wr *recv_wr,
|
|
- struct zxdh_usrq *iwusrq, struct zxdh_srq *srq)
|
|
+ struct zxdh_usrq *iwusrq,
|
|
+ struct zxdh_srq *srq)
|
|
{
|
|
if (unlikely(recv_wr->num_sge > iwusrq->max_sge))
|
|
return -EINVAL;
|
|
@@ -3054,8 +3088,9 @@ static int zxdh_check_srq_valid(struct ibv_recv_wr *recv_wr,
|
|
return 0;
|
|
}
|
|
|
|
-static void zxdh_fill_srq_wqe(struct zxdh_usrq *iwusrq, struct zxdh_srq *srq,
|
|
- __le64 *wqe_64, struct ibv_recv_wr *recv_wr)
|
|
+static void zxdh_fill_srq_wqe(struct zxdh_usrq *iwusrq,
|
|
+ struct zxdh_srq *srq, __le64 *wqe_64,
|
|
+ struct ibv_recv_wr *recv_wr)
|
|
{
|
|
__u32 byte_off;
|
|
int i;
|
|
@@ -3099,8 +3134,8 @@ static void zxdh_fill_srq_wqe(struct zxdh_usrq *iwusrq, struct zxdh_srq *srq,
|
|
zxdh_dbg(ZXDH_DBG_SRQ, "%s wqe_64[5]:0x%llx\n", __func__, wqe_64[5]);
|
|
}
|
|
|
|
-static void zxdh_get_wqe_index(struct zxdh_srq *srq, __le16 *wqe_16, __u16 *buf,
|
|
- __u16 nreq, __u16 *idx)
|
|
+static void zxdh_get_wqe_index(struct zxdh_srq *srq, __le16 *wqe_16,
|
|
+ __u16 *buf, __u16 nreq, __u16 *idx)
|
|
{
|
|
int i;
|
|
|
|
@@ -3207,3 +3242,10 @@ void zxdh_set_debug_mask(void)
|
|
if (env)
|
|
zxdh_debug_mask = strtol(env, NULL, 0);
|
|
}
|
|
+
|
|
+int zxdh_get_write_imm_split_switch(void)
|
|
+{
|
|
+ char *env;
|
|
+ env = getenv("ZXDH_WRITE_IMM_SPILT_ENABLE");
|
|
+ return (env != NULL) ? atoi(env) : 0;
|
|
+}
|
|
diff --git a/providers/zrdma/zxdh_verbs.h b/providers/zrdma/zxdh_verbs.h
|
|
index e3974c1..b72fa74 100644
|
|
--- a/providers/zrdma/zxdh_verbs.h
|
|
+++ b/providers/zrdma/zxdh_verbs.h
|
|
@@ -661,10 +661,10 @@ __le64 *zxdh_qp_get_next_recv_wqe(struct zxdh_qp *qp, __u32 *wqe_idx);
|
|
void zxdh_clean_cq(void *q, struct zxdh_cq *cq);
|
|
enum zxdh_status_code zxdh_nop(struct zxdh_qp *qp, __u64 wr_id, bool signaled,
|
|
bool post_sq);
|
|
-enum zxdh_status_code zxdh_fragcnt_to_quanta_sq(__u32 frag_cnt, __u16 *quanta);
|
|
enum zxdh_status_code zxdh_fragcnt_to_wqesize_rq(__u32 frag_cnt,
|
|
__u16 *wqe_size);
|
|
void zxdh_get_sq_wqe_shift(__u32 sge, __u32 inline_data, __u8 *shift);
|
|
+
|
|
void zxdh_get_rq_wqe_shift(__u32 sge, __u8 *shift);
|
|
enum zxdh_status_code zxdh_get_sqdepth(struct zxdh_dev_attrs *dev_attrs,
|
|
__u32 sq_size, __u8 shift,
|
|
--
|
|
2.27.0
|
|
|