290 lines
8.2 KiB
Diff
290 lines
8.2 KiB
Diff
From 2ee2190cf494361a79fae68bd35a4c280026414e Mon Sep 17 00:00:00 2001
|
|
From: Wenkai Lin <linwenkai6@hisilicon.com>
|
|
Date: Sat, 12 Oct 2024 11:15:08 +0800
|
|
Subject: [PATCH 36/39] uadk/v1: replace wd_spinlock to pthread_spin_lock
|
|
|
|
Due to memory differences, using wd_spinlock may
|
|
cause synchronization problems, it is better to use
|
|
the standard pthread spin lock of glibc.
|
|
|
|
Signed-off-by: Wenkai Lin <linwenkai6@hisilicon.com>
|
|
Signed-off-by: Qi Tao <taoqi10@huawei.com>
|
|
---
|
|
v1/drv/hisi_qm_udrv.c | 51 +++++++++++++++++++++++++++++-------------
|
|
v1/drv/hisi_qm_udrv.h | 4 ++--
|
|
v1/drv/hisi_rng_udrv.c | 25 +++++++++++++++------
|
|
v1/drv/hisi_rng_udrv.h | 2 +-
|
|
4 files changed, 56 insertions(+), 26 deletions(-)
|
|
|
|
diff --git a/v1/drv/hisi_qm_udrv.c b/v1/drv/hisi_qm_udrv.c
|
|
index 175a5c4..1d4f1d8 100644
|
|
--- a/v1/drv/hisi_qm_udrv.c
|
|
+++ b/v1/drv/hisi_qm_udrv.c
|
|
@@ -20,6 +20,7 @@
|
|
#include <sys/mman.h>
|
|
#include <string.h>
|
|
#include <stdint.h>
|
|
+#include <pthread.h>
|
|
#include <sys/ioctl.h>
|
|
#include <sys/epoll.h>
|
|
#include <sys/eventfd.h>
|
|
@@ -458,6 +459,11 @@ static int qm_init_queue_info(struct wd_queue *q)
|
|
struct hisi_qp_ctx qp_ctx = {0};
|
|
int ret;
|
|
|
|
+ if (!info->sqe_size) {
|
|
+ WD_ERR("invalid: sqe size is 0!\n");
|
|
+ return -WD_EINVAL;
|
|
+ }
|
|
+
|
|
info->sq_tail_index = 0;
|
|
info->cq_head_index = 0;
|
|
info->cqc_phase = 1;
|
|
@@ -502,11 +508,6 @@ static int qm_set_queue_info(struct wd_queue *q)
|
|
ret = qm_set_queue_regions(q);
|
|
if (ret)
|
|
return -WD_EINVAL;
|
|
- if (!info->sqe_size) {
|
|
- WD_ERR("sqe size =%d err!\n", info->sqe_size);
|
|
- ret = -WD_EINVAL;
|
|
- goto err_with_regions;
|
|
- }
|
|
info->cq_base = (void *)((uintptr_t)info->sq_base +
|
|
info->sqe_size * info->sq_depth);
|
|
|
|
@@ -534,8 +535,24 @@ static int qm_set_queue_info(struct wd_queue *q)
|
|
goto err_with_regions;
|
|
}
|
|
|
|
+ ret = pthread_spin_init(&info->sd_lock, PTHREAD_PROCESS_PRIVATE);
|
|
+ if (ret) {
|
|
+ WD_ERR("failed to init qinfo sd_lock!\n");
|
|
+ goto free_cache;
|
|
+ }
|
|
+
|
|
+ ret = pthread_spin_init(&info->rc_lock, PTHREAD_PROCESS_PRIVATE);
|
|
+ if (ret) {
|
|
+ WD_ERR("failed to init qinfo rc_lock!\n");
|
|
+ goto uninit_lock;
|
|
+ }
|
|
+
|
|
return 0;
|
|
|
|
+uninit_lock:
|
|
+ pthread_spin_destroy(&info->sd_lock);
|
|
+free_cache:
|
|
+ free(info->req_cache);
|
|
err_with_regions:
|
|
qm_unset_queue_regions(q);
|
|
return ret;
|
|
@@ -576,8 +593,10 @@ void qm_uninit_queue(struct wd_queue *q)
|
|
struct q_info *qinfo = q->qinfo;
|
|
struct qm_queue_info *info = qinfo->priv;
|
|
|
|
- qm_unset_queue_regions(q);
|
|
+ pthread_spin_destroy(&info->rc_lock);
|
|
+ pthread_spin_destroy(&info->sd_lock);
|
|
free(info->req_cache);
|
|
+ qm_unset_queue_regions(q);
|
|
free(qinfo->priv);
|
|
qinfo->priv = NULL;
|
|
}
|
|
@@ -605,10 +624,10 @@ int qm_send(struct wd_queue *q, void **req, __u32 num)
|
|
int ret;
|
|
__u32 i;
|
|
|
|
- wd_spinlock(&info->sd_lock);
|
|
+ pthread_spin_lock(&info->sd_lock);
|
|
if (unlikely((__u32)__atomic_load_n(&info->used, __ATOMIC_RELAXED) >
|
|
info->sq_depth - num - 1)) {
|
|
- wd_unspinlock(&info->sd_lock);
|
|
+ pthread_spin_unlock(&info->sd_lock);
|
|
WD_ERR("queue is full!\n");
|
|
return -WD_EBUSY;
|
|
}
|
|
@@ -617,7 +636,7 @@ int qm_send(struct wd_queue *q, void **req, __u32 num)
|
|
ret = info->sqe_fill[qinfo->atype](req[i], qinfo->priv,
|
|
info->sq_tail_index);
|
|
if (unlikely(ret != WD_SUCCESS)) {
|
|
- wd_unspinlock(&info->sd_lock);
|
|
+ pthread_spin_unlock(&info->sd_lock);
|
|
WD_ERR("sqe fill error, ret %d!\n", ret);
|
|
return -WD_EINVAL;
|
|
}
|
|
@@ -629,7 +648,7 @@ int qm_send(struct wd_queue *q, void **req, __u32 num)
|
|
}
|
|
|
|
ret = qm_tx_update(info, num);
|
|
- wd_unspinlock(&info->sd_lock);
|
|
+ pthread_spin_unlock(&info->sd_lock);
|
|
|
|
return ret;
|
|
}
|
|
@@ -662,9 +681,9 @@ static int check_ds_rx_base(struct qm_queue_info *info,
|
|
return 0;
|
|
|
|
if (before) {
|
|
- wd_spinlock(&info->rc_lock);
|
|
+ pthread_spin_lock(&info->rc_lock);
|
|
qm_rx_from_cache(info, resp, num);
|
|
- wd_unspinlock(&info->rc_lock);
|
|
+ pthread_spin_unlock(&info->rc_lock);
|
|
WD_ERR("wd queue hw error happened before qm receive!\n");
|
|
} else {
|
|
WD_ERR("wd queue hw error happened after qm receive!\n");
|
|
@@ -705,7 +724,7 @@ int qm_recv(struct wd_queue *q, void **resp, __u32 num)
|
|
if (unlikely(ret))
|
|
return ret;
|
|
|
|
- wd_spinlock(&info->rc_lock);
|
|
+ pthread_spin_lock(&info->rc_lock);
|
|
for (i = 0; i < num; i++) {
|
|
cqe = info->cq_base + info->cq_head_index * sizeof(struct cqe);
|
|
if (info->cqc_phase != CQE_PHASE(cqe))
|
|
@@ -714,7 +733,7 @@ int qm_recv(struct wd_queue *q, void **resp, __u32 num)
|
|
mb(); /* make sure the data is all in memory before read */
|
|
sq_head = CQE_SQ_HEAD_INDEX(cqe);
|
|
if (unlikely(sq_head >= info->sq_depth)) {
|
|
- wd_unspinlock(&info->rc_lock);
|
|
+ pthread_spin_unlock(&info->rc_lock);
|
|
WD_ERR("CQE_SQ_HEAD_INDEX(%u) error\n", sq_head);
|
|
return -WD_EIO;
|
|
}
|
|
@@ -726,7 +745,7 @@ int qm_recv(struct wd_queue *q, void **resp, __u32 num)
|
|
if (!ret) {
|
|
break;
|
|
} else if (ret < 0) {
|
|
- wd_unspinlock(&info->rc_lock);
|
|
+ pthread_spin_unlock(&info->rc_lock);
|
|
WD_ERR("recv sqe error %u\n", sq_head);
|
|
return ret;
|
|
}
|
|
@@ -747,7 +766,7 @@ int qm_recv(struct wd_queue *q, void **resp, __u32 num)
|
|
ret = i;
|
|
}
|
|
|
|
- wd_unspinlock(&info->rc_lock);
|
|
+ pthread_spin_unlock(&info->rc_lock);
|
|
|
|
return ret;
|
|
}
|
|
diff --git a/v1/drv/hisi_qm_udrv.h b/v1/drv/hisi_qm_udrv.h
|
|
index 4d54cf6..06ac66a 100644
|
|
--- a/v1/drv/hisi_qm_udrv.h
|
|
+++ b/v1/drv/hisi_qm_udrv.h
|
|
@@ -166,8 +166,8 @@ struct qm_queue_info {
|
|
qm_sqe_parse sqe_parse[WCRYPTO_MAX_ALG];
|
|
hisi_qm_sqe_fill_priv sqe_fill_priv;
|
|
hisi_qm_sqe_parse_priv sqe_parse_priv;
|
|
- struct wd_lock sd_lock;
|
|
- struct wd_lock rc_lock;
|
|
+ pthread_spinlock_t sd_lock;
|
|
+ pthread_spinlock_t rc_lock;
|
|
struct wd_queue *q;
|
|
int (*sgl_info)(struct hw_sgl_info *info);
|
|
int (*sgl_init)(void *pool, struct wd_sgl *sgl);
|
|
diff --git a/v1/drv/hisi_rng_udrv.c b/v1/drv/hisi_rng_udrv.c
|
|
index 86a20cb..605ef27 100644
|
|
--- a/v1/drv/hisi_rng_udrv.c
|
|
+++ b/v1/drv/hisi_rng_udrv.c
|
|
@@ -17,6 +17,7 @@
|
|
#include <stdlib.h>
|
|
#include <unistd.h>
|
|
#include <stdio.h>
|
|
+#include <pthread.h>
|
|
#include <sys/mman.h>
|
|
#include <string.h>
|
|
#include <stdint.h>
|
|
@@ -34,6 +35,7 @@ int rng_init_queue(struct wd_queue *q)
|
|
{
|
|
struct q_info *qinfo = q->qinfo;
|
|
struct rng_queue_info *info;
|
|
+ int ret;
|
|
|
|
info = calloc(1, sizeof(*info));
|
|
if (!info) {
|
|
@@ -41,12 +43,20 @@ int rng_init_queue(struct wd_queue *q)
|
|
return -ENOMEM;
|
|
}
|
|
|
|
+ ret = pthread_spin_init(&info->lock, PTHREAD_PROCESS_PRIVATE);
|
|
+ if (ret) {
|
|
+ free(info);
|
|
+ WD_ERR("failed to init rng qinfo lock!\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
qinfo->priv = info;
|
|
info->mmio_base = wd_drv_mmap_qfr(q, WD_UACCE_QFRT_MMIO, 0);
|
|
if (info->mmio_base == MAP_FAILED) {
|
|
info->mmio_base = NULL;
|
|
- free(qinfo->priv);
|
|
qinfo->priv = NULL;
|
|
+ pthread_spin_destroy(&info->lock);
|
|
+ free(info);
|
|
WD_ERR("mmap trng mmio fail\n");
|
|
return -ENOMEM;
|
|
}
|
|
@@ -63,6 +73,7 @@ void rng_uninit_queue(struct wd_queue *q)
|
|
|
|
free(qinfo->priv);
|
|
qinfo->priv = NULL;
|
|
+ pthread_spin_destroy(&info->lock);
|
|
}
|
|
|
|
int rng_send(struct wd_queue *q, void **req, __u32 num)
|
|
@@ -70,14 +81,14 @@ int rng_send(struct wd_queue *q, void **req, __u32 num)
|
|
struct q_info *qinfo = q->qinfo;
|
|
struct rng_queue_info *info = qinfo->priv;
|
|
|
|
- wd_spinlock(&info->lock);
|
|
+ pthread_spin_lock(&info->lock);
|
|
if (!info->req_cache[info->send_idx]) {
|
|
info->req_cache[info->send_idx] = req[0];
|
|
info->send_idx++;
|
|
- wd_unspinlock(&info->lock);
|
|
+ pthread_spin_unlock(&info->lock);
|
|
return 0;
|
|
}
|
|
- wd_unspinlock(&info->lock);
|
|
+ pthread_spin_unlock(&info->lock);
|
|
|
|
WD_ERR("queue is full!\n");
|
|
return -WD_EBUSY;
|
|
@@ -128,16 +139,16 @@ int rng_recv(struct wd_queue *q, void **resp, __u32 num)
|
|
struct wcrypto_cb_tag *tag;
|
|
__u32 currsize = 0;
|
|
|
|
- wd_spinlock(&info->lock);
|
|
+ pthread_spin_lock(&info->lock);
|
|
msg = info->req_cache[info->recv_idx];
|
|
if (!msg) {
|
|
- wd_unspinlock(&info->lock);
|
|
+ pthread_spin_unlock(&info->lock);
|
|
return 0;
|
|
}
|
|
|
|
info->req_cache[info->recv_idx] = NULL;
|
|
info->recv_idx++;
|
|
- wd_unspinlock(&info->lock);
|
|
+ pthread_spin_unlock(&info->lock);
|
|
|
|
tag = (void *)(uintptr_t)msg->usr_tag;
|
|
if (usr && tag->ctx_id != usr)
|
|
diff --git a/v1/drv/hisi_rng_udrv.h b/v1/drv/hisi_rng_udrv.h
|
|
index 56814a4..3efa10e 100644
|
|
--- a/v1/drv/hisi_rng_udrv.h
|
|
+++ b/v1/drv/hisi_rng_udrv.h
|
|
@@ -29,7 +29,7 @@ struct rng_queue_info {
|
|
void *req_cache[TRNG_Q_DEPTH];
|
|
__u8 send_idx;
|
|
__u8 recv_idx;
|
|
- struct wd_lock lock;
|
|
+ pthread_spinlock_t lock;
|
|
};
|
|
|
|
int rng_init_queue(struct wd_queue *q);
|
|
--
|
|
2.25.1
|
|
|