qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

RE: [PATCH v7 03/11] hw/block/nvme: Add support for Namespace Types


From: Dmitry Fomichev
Subject: RE: [PATCH v7 03/11] hw/block/nvme: Add support for Namespace Types
Date: Wed, 21 Oct 2020 01:50:24 +0000

> -----Original Message-----
> From: Klaus Jensen <its@irrelevant.dk>
> Sent: Monday, October 19, 2020 4:54 PM
> To: Dmitry Fomichev <Dmitry.Fomichev@wdc.com>
> Cc: Keith Busch <kbusch@kernel.org>; Klaus Jensen
> <k.jensen@samsung.com>; Kevin Wolf <kwolf@redhat.com>; Philippe
> Mathieu-Daudé <philmd@redhat.com>; Maxim Levitsky
> <mlevitsk@redhat.com>; Fam Zheng <fam@euphon.net>; Niklas Cassel
> <Niklas.Cassel@wdc.com>; Damien Le Moal <Damien.LeMoal@wdc.com>;
> qemu-block@nongnu.org; qemu-devel@nongnu.org; Alistair Francis
> <Alistair.Francis@wdc.com>; Matias Bjorling <Matias.Bjorling@wdc.com>
> Subject: Re: [PATCH v7 03/11] hw/block/nvme: Add support for Namespace
> Types
> 
> On Oct 19 11:17, Dmitry Fomichev wrote:
> > From: Niklas Cassel <niklas.cassel@wdc.com>
> >
> > Define the structures and constants required to implement
> > Namespace Types support.
> >
> > Namespace Types introduce a new command set, "I/O Command Sets",
> > that allows the host to retrieve the command sets associated with
> > a namespace. Introduce support for the command set and enable
> > detection for the NVM Command Set.
> >
> > The new workflows for identify commands rely heavily on zero-filled
> > identify structs. E.g., certain CNS commands are defined to return
> > a zero-filled identify struct when an inactive namespace NSID
> > is supplied.
> >
> > Add a helper function in order to avoid code duplication when
> > reporting zero-filled identify structures.
> >
> > Signed-off-by: Niklas Cassel <niklas.cassel@wdc.com>
> > Signed-off-by: Dmitry Fomichev <dmitry.fomichev@wdc.com>
> > ---
> >  hw/block/nvme-ns.c    |   2 +
> >  hw/block/nvme-ns.h    |   1 +
> >  hw/block/nvme.c       | 169 +++++++++++++++++++++++++++++++++++-------
> >  hw/block/trace-events |   7 ++
> >  include/block/nvme.h  |  65 ++++++++++++----
> >  5 files changed, 202 insertions(+), 42 deletions(-)
> >
> > diff --git a/hw/block/nvme-ns.c b/hw/block/nvme-ns.c
> > index de735eb9f3..c0362426cc 100644
> > --- a/hw/block/nvme-ns.c
> > +++ b/hw/block/nvme-ns.c
> > @@ -41,6 +41,8 @@ static void nvme_ns_init(NvmeNamespace *ns)
> >
> >      id_ns->nsze = cpu_to_le64(nvme_ns_nlbas(ns));
> >
> > +    ns->csi = NVME_CSI_NVM;
> > +
> >      /* no thin provisioning */
> >      id_ns->ncap = id_ns->nsze;
> >      id_ns->nuse = id_ns->ncap;
> > diff --git a/hw/block/nvme-ns.h b/hw/block/nvme-ns.h
> > index a38071884a..d795e44bab 100644
> > --- a/hw/block/nvme-ns.h
> > +++ b/hw/block/nvme-ns.h
> > @@ -31,6 +31,7 @@ typedef struct NvmeNamespace {
> >      int64_t      size;
> >      NvmeIdNs     id_ns;
> >      const uint32_t *iocs;
> > +    uint8_t      csi;
> >
> >      NvmeNamespaceParams params;
> >  } NvmeNamespace;
> > diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> > index 29139d8a17..ca0d0abf5c 100644
> > --- a/hw/block/nvme.c
> > +++ b/hw/block/nvme.c
> > @@ -1503,6 +1503,13 @@ static uint16_t nvme_create_cq(NvmeCtrl *n,
> NvmeRequest *req)
> >      return NVME_SUCCESS;
> >  }
> >
> > +static uint16_t nvme_rpt_empty_id_struct(NvmeCtrl *n, NvmeRequest
> *req)
> > +{
> > +    uint8_t id[NVME_IDENTIFY_DATA_SIZE] = {};
> 
> [-pedantic] empty initializer list
> 
> > +
> > +    return nvme_dma(n, id, sizeof(id), DMA_DIRECTION_FROM_DEVICE,
> req);
> > +}
> > +
> >  static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeRequest *req)
> >  {
> >      trace_pci_nvme_identify_ctrl();
> > @@ -1511,11 +1518,23 @@ static uint16_t nvme_identify_ctrl(NvmeCtrl
> *n, NvmeRequest *req)
> >                      DMA_DIRECTION_FROM_DEVICE, req);
> >  }
> >
> > +static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req)
> > +{
> > +    NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
> > +
> > +    trace_pci_nvme_identify_ctrl_csi(c->csi);
> > +
> > +    if (c->csi == NVME_CSI_NVM) {
> > +        return nvme_rpt_empty_id_struct(n, req);
> > +    }
> > +
> > +    return NVME_INVALID_FIELD | NVME_DNR;
> > +}
> > +
> >  static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
> >  {
> >      NvmeNamespace *ns;
> >      NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
> > -    NvmeIdNs *id_ns, inactive = { 0 };
> >      uint32_t nsid = le32_to_cpu(c->nsid);
> >
> >      trace_pci_nvme_identify_ns(nsid);
> > @@ -1526,23 +1545,46 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n,
> NvmeRequest *req)
> >
> >      ns = nvme_ns(n, nsid);
> >      if (unlikely(!ns)) {
> > -        id_ns = &inactive;
> > -    } else {
> > -        id_ns = &ns->id_ns;
> > +        return nvme_rpt_empty_id_struct(n, req);
> >      }
> >
> > -    return nvme_dma(n, (uint8_t *)id_ns, sizeof(NvmeIdNs),
> > +    return nvme_dma(n, (uint8_t *)&ns->id_ns, sizeof(NvmeIdNs),
> >                      DMA_DIRECTION_FROM_DEVICE, req);
> >  }
> >
> > +static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req)
> > +{
> > +    NvmeNamespace *ns;
> > +    NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
> > +    uint32_t nsid = le32_to_cpu(c->nsid);
> > +
> > +    trace_pci_nvme_identify_ns_csi(nsid, c->csi);
> > +
> > +    if (!nvme_nsid_valid(n, nsid) || nsid == NVME_NSID_BROADCAST) {
> > +        return NVME_INVALID_NSID | NVME_DNR;
> > +    }
> > +
> > +    ns = nvme_ns(n, nsid);
> > +    if (unlikely(!ns)) {
> > +        return nvme_rpt_empty_id_struct(n, req);
> > +    }
> > +
> > +    if (c->csi == NVME_CSI_NVM) {
> > +        return nvme_rpt_empty_id_struct(n, req);
> > +    }
> > +
> > +    return NVME_INVALID_FIELD | NVME_DNR;
> > +}
> > +
> >  static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
> >  {
> > +    NvmeNamespace *ns;
> >      NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
> > -    static const int data_len = NVME_IDENTIFY_DATA_SIZE;
> >      uint32_t min_nsid = le32_to_cpu(c->nsid);
> > -    uint32_t *list;
> > -    uint16_t ret;
> > -    int j = 0;
> > +    uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {};
> 
> [-pedantic] empty initializer list
> 
> > +    static const int data_len = sizeof(list);
> > +    uint32_t *list_ptr = (uint32_t *)list;
> > +    int i, j = 0;
> >
> >      trace_pci_nvme_identify_nslist(min_nsid);
> >
> > @@ -1556,20 +1598,54 @@ static uint16_t nvme_identify_nslist(NvmeCtrl
> *n, NvmeRequest *req)
> >          return NVME_INVALID_NSID | NVME_DNR;
> >      }
> >
> > -    list = g_malloc0(data_len);
> > -    for (int i = 1; i <= n->num_namespaces; i++) {
> > -        if (i <= min_nsid || !nvme_ns(n, i)) {
> > +    for (i = 1; i <= n->num_namespaces; i++) {
> > +        ns = nvme_ns(n, i);
> > +        if (!ns) {
> >              continue;
> >          }
> > -        list[j++] = cpu_to_le32(i);
> > +        if (ns->params.nsid < min_nsid) {
> 
> Since i == ns->params.nsid, this should be '<=' like the code you
> removed. It really shouldn't be called min_nsid, but oh well.

Right, needs to be <=. We can rename min_nsid to start_nsid or similer
since we are touching this code anyway.
> 
> > +            continue;
> > +        }
> > +        list_ptr[j++] = cpu_to_le32(ns->params.nsid);
> >          if (j == data_len / sizeof(uint32_t)) {
> >              break;
> >          }
> >      }
> > -    ret = nvme_dma(n, (uint8_t *)list, data_len,
> DMA_DIRECTION_FROM_DEVICE,
> > -                   req);
> > -    g_free(list);
> > -    return ret;
> > +
> > +    return nvme_dma(n, list, data_len, DMA_DIRECTION_FROM_DEVICE,
> req);
> > +}
> > +
> > +static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req)
> > +{
> > +    NvmeNamespace *ns;
> > +    NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
> > +    uint32_t min_nsid = le32_to_cpu(c->nsid);
> > +    uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {};
> > +    static const int data_len = sizeof(list);
> > +    uint32_t *list_ptr = (uint32_t *)list;
> > +    int i, j = 0;
> > +
> > +    trace_pci_nvme_identify_nslist_csi(min_nsid, c->csi);
> > +
> > +    if (c->csi != NVME_CSI_NVM) {
> > +        return NVME_INVALID_FIELD | NVME_DNR;
> > +    }
> > +
> 
> This is missing the check for 0xffffffff and 0xfffffffe like above.

Will add the similar check here.

> 
> > +    for (i = 1; i <= n->num_namespaces; i++) {
> > +        ns = nvme_ns(n, i);
> > +        if (!ns) {
> > +            continue;
> > +        }
> > +        if (ns->params.nsid < min_nsid) {
> 
> Should be '<='.
> 
> > +            continue;
> > +        }
> > +        list_ptr[j++] = cpu_to_le32(ns->params.nsid);
> > +        if (j == data_len / sizeof(uint32_t)) {
> > +            break;
> > +        }
> > +    }
> > +
> > +    return nvme_dma(n, list, data_len, DMA_DIRECTION_FROM_DEVICE,
> req);
> >  }
> >
> >  static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest
> *req)
> > @@ -1577,13 +1653,17 @@ static uint16_t
> nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req)
> >      NvmeNamespace *ns;
> >      NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
> >      uint32_t nsid = le32_to_cpu(c->nsid);
> > -    uint8_t list[NVME_IDENTIFY_DATA_SIZE];
> > +    uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {};
> 
> [-pedantic] empty initializer list
> 
> >
> >      struct data {
> >          struct {
> >              NvmeIdNsDescr hdr;
> > -            uint8_t v[16];
> > +            uint8_t v[NVME_NIDL_UUID];
> >          } uuid;
> > +        struct {
> > +            NvmeIdNsDescr hdr;
> > +            uint8_t v;
> > +        } csi;
> >      };
> >
> >      struct data *ns_descrs = (struct data *)list;
> > @@ -1599,19 +1679,31 @@ static uint16_t
> nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req)
> >          return NVME_INVALID_FIELD | NVME_DNR;
> >      }
> >
> > -    memset(list, 0x0, sizeof(list));
> > -
> >      /*
> >       * Because the NGUID and EUI64 fields are 0 in the Identify Namespace
> data
> >       * structure, a Namespace UUID (nidt = 0x3) must be reported in the
> >       * Namespace Identification Descriptor. Add the namespace UUID here.
> >       */
> >      ns_descrs->uuid.hdr.nidt = NVME_NIDT_UUID;
> > -    ns_descrs->uuid.hdr.nidl = NVME_NIDT_UUID_LEN;
> > -    memcpy(&ns_descrs->uuid.v, ns->params.uuid.data,
> NVME_NIDT_UUID_LEN);
> > +    ns_descrs->uuid.hdr.nidl = NVME_NIDL_UUID;
> > +    memcpy(&ns_descrs->uuid.v, ns->params.uuid.data,
> NVME_NIDL_UUID);
> >
> > -    return nvme_dma(n, list, NVME_IDENTIFY_DATA_SIZE,
> > -                    DMA_DIRECTION_FROM_DEVICE, req);
> > +    ns_descrs->csi.hdr.nidt = NVME_NIDT_CSI;
> > +    ns_descrs->csi.hdr.nidl = NVME_NIDL_CSI;
> > +    ns_descrs->csi.v = ns->csi;
> > +
> > +    return nvme_dma(n, list, sizeof(list), DMA_DIRECTION_FROM_DEVICE,
> req);
> > +}
> > +
> > +static uint16_t nvme_identify_cmd_set(NvmeCtrl *n, NvmeRequest *req)
> > +{
> > +    uint8_t list[NVME_IDENTIFY_DATA_SIZE] = {};
> 
> [-pedantic] empty initializer list
> 
> > +    static const int data_len = sizeof(list);
> > +
> > +    trace_pci_nvme_identify_cmd_set();
> > +
> > +    NVME_SET_CSI(*list, NVME_CSI_NVM);
> > +    return nvme_dma(n, list, data_len, DMA_DIRECTION_FROM_DEVICE,
> req);
> >  }
> >
> 
> --
> One of us - No more doubt, silence or taboo about mental illness.

reply via email to

[Prev in Thread] Current Thread [Next in Thread]