qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v4 5/8] migration/multifd: implement initialization of qpl compre


From: Yuan Liu
Subject: [PATCH v4 5/8] migration/multifd: implement initialization of qpl compression
Date: Mon, 4 Mar 2024 22:00:25 +0800

the qpl initialization includes memory allocation for compressed
data and the qpl job initialization.

the qpl initialization will check whether the In-Memory Analytics
Accelerator(IAA) hardware is available, if the platform does not
have IAA hardware or the IAA hardware is not available, the QPL
compression initialization will fail.

Signed-off-by: Yuan Liu <yuan1.liu@intel.com>
Reviewed-by: Nanhai Zou <nanhai.zou@intel.com>
---
 migration/multifd-qpl.c | 128 ++++++++++++++++++++++++++++++++++++++--
 1 file changed, 122 insertions(+), 6 deletions(-)

diff --git a/migration/multifd-qpl.c b/migration/multifd-qpl.c
index 6b94e732ac..f4db97ca01 100644
--- a/migration/multifd-qpl.c
+++ b/migration/multifd-qpl.c
@@ -33,6 +33,100 @@ struct qpl_data {
     uint32_t *zbuf_hdr;
 };
 
+static void free_zbuf(struct qpl_data *qpl)
+{
+    if (qpl->zbuf != NULL) {
+        munmap(qpl->zbuf, qpl->job_num * qpl->data_size);
+        qpl->zbuf = NULL;
+    }
+    if (qpl->zbuf_hdr != NULL) {
+        g_free(qpl->zbuf_hdr);
+        qpl->zbuf_hdr = NULL;
+    }
+}
+
+static int alloc_zbuf(struct qpl_data *qpl, uint8_t chan_id, Error **errp)
+{
+    int flags = MAP_PRIVATE | MAP_POPULATE | MAP_ANONYMOUS;
+    uint32_t size = qpl->job_num * qpl->data_size;
+    uint8_t *buf;
+
+    buf = (uint8_t *) mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0);
+    if (buf == MAP_FAILED) {
+        error_setg(errp, "multifd: %u: alloc_zbuf failed, job num %u, size %u",
+                   chan_id, qpl->job_num, qpl->data_size);
+        return -1;
+    }
+    qpl->zbuf = buf;
+    qpl->zbuf_hdr = g_new0(uint32_t, qpl->job_num);
+    return 0;
+}
+
+static void free_jobs(struct qpl_data *qpl)
+{
+    for (int i = 0; i < qpl->job_num; i++) {
+        qpl_fini_job(qpl->job_array[i]);
+        g_free(qpl->job_array[i]);
+        qpl->job_array[i] = NULL;
+    }
+    g_free(qpl->job_array);
+    qpl->job_array = NULL;
+}
+
+static int alloc_jobs(struct qpl_data *qpl, uint8_t chan_id, Error **errp)
+{
+    qpl_status status;
+    uint32_t job_size = 0;
+    qpl_job *job = NULL;
+    /* always use IAA hardware accelerator */
+    qpl_path_t path = qpl_path_hardware;
+
+    status = qpl_get_job_size(path, &job_size);
+    if (status != QPL_STS_OK) {
+        error_setg(errp, "multifd: %u: qpl_get_job_size failed with error %d",
+                   chan_id, status);
+        return -1;
+    }
+    qpl->job_array = g_new0(qpl_job *, qpl->job_num);
+    for (int i = 0; i < qpl->job_num; i++) {
+        job = g_malloc0(job_size);
+        status = qpl_init_job(path, job);
+        if (status != QPL_STS_OK) {
+            error_setg(errp, "multifd: %u: qpl_init_job failed with error %d",
+                       chan_id, status);
+            free_jobs(qpl);
+            return -1;
+        }
+        qpl->job_array[i] = job;
+    }
+    return 0;
+}
+
+static int init_qpl(struct qpl_data *qpl, uint32_t job_num, uint32_t data_size,
+                    uint8_t chan_id, Error **errp)
+{
+    qpl->job_num = job_num;
+    qpl->data_size = data_size;
+    if (alloc_zbuf(qpl, chan_id, errp) != 0) {
+        return -1;
+    }
+    if (alloc_jobs(qpl, chan_id, errp) != 0) {
+        free_zbuf(qpl);
+        return -1;
+    }
+    return 0;
+}
+
+static void deinit_qpl(struct qpl_data *qpl)
+{
+    if (qpl != NULL) {
+        free_jobs(qpl);
+        free_zbuf(qpl);
+        qpl->job_num = 0;
+        qpl->data_size = 0;
+    }
+}
+
 /**
  * qpl_send_setup: setup send side
  *
@@ -45,8 +139,15 @@ struct qpl_data {
  */
 static int qpl_send_setup(MultiFDSendParams *p, Error **errp)
 {
-    /* Implement in next patch */
-    return -1;
+    struct qpl_data *qpl;
+
+    qpl = g_new0(struct qpl_data, 1);
+    if (init_qpl(qpl, p->page_count, p->page_size, p->id, errp) != 0) {
+        g_free(qpl);
+        return -1;
+    }
+    p->data = qpl;
+    return 0;
 }
 
 /**
@@ -59,7 +160,11 @@ static int qpl_send_setup(MultiFDSendParams *p, Error 
**errp)
  */
 static void qpl_send_cleanup(MultiFDSendParams *p, Error **errp)
 {
-    /* Implement in next patch */
+    struct qpl_data *qpl = p->data;
+
+    deinit_qpl(qpl);
+    g_free(p->data);
+    p->data = NULL;
 }
 
 /**
@@ -91,8 +196,15 @@ static int qpl_send_prepare(MultiFDSendParams *p, Error 
**errp)
  */
 static int qpl_recv_setup(MultiFDRecvParams *p, Error **errp)
 {
-    /* Implement in next patch */
-    return -1;
+    struct qpl_data *qpl;
+
+    qpl = g_new0(struct qpl_data, 1);
+    if (init_qpl(qpl, p->page_count, p->page_size, p->id, errp) != 0) {
+        g_free(qpl);
+        return -1;
+    }
+    p->data = qpl;
+    return 0;
 }
 
 /**
@@ -104,7 +216,11 @@ static int qpl_recv_setup(MultiFDRecvParams *p, Error 
**errp)
  */
 static void qpl_recv_cleanup(MultiFDRecvParams *p)
 {
-    /* Implement in next patch */
+    struct qpl_data *qpl = p->data;
+
+    deinit_qpl(qpl);
+    g_free(p->data);
+    p->data = NULL;
 }
 
 /**
-- 
2.39.3




reply via email to

[Prev in Thread] Current Thread [Next in Thread]