qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [RFC 3/4] hw/nvme: add NVMe Admin Security SPDM support


From: Wilfred Mallawa
Subject: Re: [RFC 3/4] hw/nvme: add NVMe Admin Security SPDM support
Date: Wed, 15 Jan 2025 23:54:32 +0000

On Fri, 2025-01-10 at 11:03 +0100, Klaus Jensen wrote:
> On Jan  7 15:29, Wilfred Mallawa via wrote:
> > Adds the NVMe Admin Security Send/Receive command support with
> > support
> > for DMTFs SPDM. The transport binding for SPDM is defined in the
> > DMTF DSP0286.
> > 
> > Signed-off-by: Wilfred Mallawa <wilfred.mallawa@wdc.com>
> > ---
> >  hw/nvme/ctrl.c       | 207
> > ++++++++++++++++++++++++++++++++++++++++++-
> >  hw/nvme/nvme.h       |   5 ++
> >  include/block/nvme.h |  15 ++++
> >  3 files changed, 226 insertions(+), 1 deletion(-)
> > 
> > diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
> > index 68903d1d70..68341e735f 100644
> > --- a/hw/nvme/ctrl.c
> > +++ b/hw/nvme/ctrl.c
> > @@ -283,6 +283,8 @@ static const uint32_t nvme_cse_acs[256] = {
> >      [NVME_ADM_CMD_FORMAT_NVM]       = NVME_CMD_EFF_CSUPP |
> > NVME_CMD_EFF_LBCC,
> >      [NVME_ADM_CMD_DIRECTIVE_RECV]   = NVME_CMD_EFF_CSUPP,
> >      [NVME_ADM_CMD_DIRECTIVE_SEND]   = NVME_CMD_EFF_CSUPP,
> > +    [NVME_ADM_CMD_SECURITY_SEND]   = NVME_CMD_EFF_CSUPP,
> > +    [NVME_ADM_CMD_SECURITY_RECV]   = NVME_CMD_EFF_CSUPP,
> >  };
> >  
> >  static const uint32_t nvme_cse_iocs_none[256];
> > @@ -7182,6 +7184,205 @@ static uint16_t nvme_dbbuf_config(NvmeCtrl
> > *n, const NvmeRequest *req)
> >      return NVME_SUCCESS;
> >  }
> >  
> > +static uint16_t nvme_sec_prot_spdm_send(NvmeCtrl *n, NvmeRequest
> > *req)
> > +{
> > +    StorageSpdmTransportHeader hdr = {0};
> > +    uint8_t *sec_buf;
> > +    uint32_t transfer_len = le32_to_cpu(req->cmd.cdw11);
> > +    uint32_t transport_transfer_len = transfer_len;
> > +    uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
> > +    uint32_t recvd;
> > +    uint16_t nvme_cmd_status;
> > +    uint16_t ret;
> > +    uint8_t secp = (dw10 >> 24) & 0xFF;
> > +    uint8_t spsp1 = (dw10 >> 16) & 0xFF;
> > +    uint8_t spsp0 = (dw10 >> 8) & 0xFF;
> > +    bool spdm_res;
> > +
> > +    transport_transfer_len += sizeof(hdr);
> > +    if (transport_transfer_len >
> > SPDM_SOCKET_MAX_MESSAGE_BUFFER_SIZE) {
> > +        return NVME_NO_COMPLETE | NVME_DNR;
> > +    }
> > +
> > +    /* Generate the NVMe transport header */
> > +    hdr.security_protocol = secp;
> > +    hdr.security_protocol_specific = cpu_to_le16((spsp1 << 8) |
> > spsp0);
> > +    hdr.inc_512 = false;
> > +    hdr.length = cpu_to_le32(transport_transfer_len);
> > +
> > +    sec_buf = g_malloc0(transport_transfer_len);
> > +    if (!sec_buf) {
> > +        return NVME_NO_COMPLETE | NVME_DNR;
> > +    }
> > +
> > +    /* Attach the transport header */
> > +    memcpy(sec_buf, &hdr, sizeof(hdr));
> > +    ret = nvme_h2c(n, sec_buf + sizeof(hdr), transfer_len, req);
> > +    if (ret) {
> > +        return NVME_NO_COMPLETE | NVME_DNR;
> > +    }
> > +
> > +    spdm_res = spdm_socket_send(n->spdm_socket,
> > SPDM_SOCKET_STORAGE_CMD_IF_SEND,
> > +                                SPDM_SOCKET_TRANSPORT_TYPE_NVME,
> > sec_buf,
> > +                                transport_transfer_len);
> > +    if (!spdm_res) {
> > +        g_free(sec_buf);
> > +        return NVME_NO_COMPLETE | NVME_DNR;
> > +    }
> > +
> > +    /* The responder shall ack with message status */
> > +    recvd = spdm_socket_receive(n->spdm_socket,
> > SPDM_SOCKET_TRANSPORT_TYPE_NVME,
> > +                                (uint8_t *)&nvme_cmd_status,
> > +                                SPDM_SOCKET_MAX_MSG_STATUS_LEN);
> > +
> > +    nvme_cmd_status = cpu_to_be16(nvme_cmd_status);
> > +
> > +    if (recvd < SPDM_SOCKET_MAX_MSG_STATUS_LEN) {
> > +        g_free(sec_buf);
> > +        return NVME_NO_COMPLETE | NVME_DNR;
> > +    }
> > +
> > +    g_free(sec_buf);
> > +    return nvme_cmd_status;
> > +}
> > +
> > +/* From host to controller */
> > +static uint16_t nvme_security_send(NvmeCtrl *n, NvmeRequest *req)
> > +{
> > +    uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
> > +    uint8_t secp = (dw10 >> 24) & 0xff;
> > +
> > +    switch (secp) {
> > +    case NVME_SEC_PROT_DMTF_SPDM:
> > +        return nvme_sec_prot_spdm_send(n, req);
> > +    default:
> > +        /* Unsupported Security Protocol Type */
> > +        return NVME_INVALID_FIELD | NVME_DNR;
> > +    }
> > +
> > +    return NVME_INVALID_FIELD | NVME_DNR;
> > +}
> > +
> > +static uint16_t nvme_sec_prot_spdm_receive(NvmeCtrl *n,
> > NvmeRequest *req)
> > +{
> > +    StorageSpdmTransportHeader hdr = {0};
> > +    uint8_t *rsp_spdm_buf;
> > +    uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
> > +    uint32_t alloc_len = le32_to_cpu(req->cmd.cdw11);
> > +    uint32_t recvd, spdm_res;
> > +    uint16_t nvme_cmd_status;
> > +    uint16_t ret;
> > +    uint8_t secp = (dw10 >> 24) & 0xFF;
> > +    uint8_t spsp1 = (dw10 >> 16) & 0xFF;
> > +    uint8_t spsp0 = (dw10 >> 8) & 0xFF;
> > +
> > +    if (!alloc_len) {
> > +        return NVME_INVALID_FIELD | NVME_DNR;
> > +    }
> > +
> > +    /* Generate the NVMe transport header */
> > +    hdr.security_protocol = secp;
> > +    hdr.security_protocol_specific = cpu_to_le16((spsp1 << 8) |
> > spsp0);
> > +    hdr.inc_512 = false;
> > +    hdr.length = cpu_to_le32(alloc_len);
> > +
> > +    /* Forward if_recv to the SPDM Server with SPSP0 */
> > +    spdm_res = spdm_socket_send(n->spdm_socket,
> > SPDM_SOCKET_STORAGE_CMD_IF_RECV,
> > +                                SPDM_SOCKET_TRANSPORT_TYPE_NVME,
> > +                                (uint8_t *)&hdr, sizeof(hdr));
> > +    if (!spdm_res) {
> > +        return NVME_NO_COMPLETE | NVME_DNR;
> > +    }
> > +
> > +    /* The responder shall ack with message status */
> > +    recvd = spdm_socket_receive(n->spdm_socket,
> > SPDM_SOCKET_TRANSPORT_TYPE_NVME,
> > +                                (uint8_t *)&nvme_cmd_status,
> > +                                SPDM_SOCKET_MAX_MSG_STATUS_LEN);
> > +
> > +    nvme_cmd_status = cpu_to_be16(nvme_cmd_status);
> > +
> > +
> > +    if (recvd < SPDM_SOCKET_MAX_MSG_STATUS_LEN) {
> > +        return NVME_NO_COMPLETE | NVME_DNR;
> > +    }
> > +
> > +    /* An error here implies the prior if_recv from requester was
> > spurious */
> > +    if (nvme_cmd_status != NVME_SUCCESS) {
> > +        return nvme_cmd_status;
> > +    }
> > +
> > +    /* Clear to start receiving data from the server */
> > +    rsp_spdm_buf = g_malloc0(alloc_len);
> 
> You might allocate quite a bit of memory here. Is that necessary?
> Would
> be better to received chunked? Is that even possible with the socket
> receive api?
Chunking is supported but it's done at a lower layer so for example if
SPDM server is using `libspdm`, the server can configure a maximum
transfer size. For example `spdm-utils` uses 0x1200.

We could always add a check here to not exceed a specified length, but
I figured if memory was an issue the `g_malloc0` failing would be
sufficient?

> 
> > +    if (!rsp_spdm_buf) {
> > +        return NVME_NO_COMPLETE | NVME_DNR;
> > +    }
> > +
> > +    recvd = spdm_socket_receive(n->spdm_socket,
> > +                                SPDM_SOCKET_TRANSPORT_TYPE_NVME,
> > +                                rsp_spdm_buf, alloc_len);
> > +    if (!recvd) {
> > +        g_free(rsp_spdm_buf);
> > +        return NVME_NO_COMPLETE | NVME_DNR;
> > +    }
> > +
> > +    ret = nvme_c2h(n, rsp_spdm_buf, MIN(recvd, alloc_len), req);
> > +    g_free(rsp_spdm_buf);
> > +
> > +    if (alloc_len < recvd)  {
> > +        return NVME_NO_COMPLETE | NVME_DNR;
> > +    }
> 
> If this condition was true, wouldn't that mean you had more bytes
> written into rsp_spdm_buf than you requested? And you would have
> written
> out of bounds?
Good catch, this condition shouldn't ever actually trigger with the
current way the socket api works. The socket api checks that the
`bytes_to_receive` (`alloc_len` in this case) is bigger than the
expected incoming message size. I'll fixup in V2.
> 
> > +
> > +    if (ret) {
> > +        return NVME_NO_COMPLETE | NVME_DNR;
> > +    }
> > +
> > +    return NVME_SUCCESS;
> > +}
> > +
> > +static uint16_t nvme_get_sec_prot_info(NvmeCtrl *n, NvmeRequest
> > *req)
> > +{
> > +    uint32_t alloc_len = le32_to_cpu(req->cmd.cdw11);
> > +    uint8_t resp[12] = {0};
> > +
> > +    if (alloc_len < 12) {
> > +        return NVME_INVALID_FIELD | NVME_DNR;
> > +    }
> 
> NVMe specifies that "Allocation Length (AL): The value of this field
> is
> specific to the Security Protocol In command with the INC_512 field
> cleared to 0h as defined in SPC-5.". I cant seem to find the dword
> alignment requirement anywhere in the referenced specifications.
> 
Ah good point, will update this check.
> > +
> > +    /* Support Security Protol List Length */
> > +    resp[6] = 0; /* MSB */
> > +    resp[7] = 2; /* LSB */
> > +    /* Support Security Protocol List */
> > +    resp[8] = SFSC_SECURITY_PROT_INFO;
> > +    resp[9] = NVME_SEC_PROT_DMTF_SPDM;
> > +
> > +    return nvme_c2h(n, resp, sizeof(resp), req);
> > +}
> > +
> > +/* From controller to host */
> > +static uint16_t nvme_security_receive(NvmeCtrl *n, NvmeRequest
> > *req)
> > +{
> > +    uint32_t dw10 = le32_to_cpu(req->cmd.cdw10);
> > +    uint16_t spsp = dw10 & 0xFFFF;
> 
> Should this be (dw10 >> 8) & 0xffff ? Lower 8 bits are the NVMe
> Security
> Specific Field (NSSF).
Yep! Thank you! Will fixup.
> 
> > +    uint8_t secp = (dw10 >> 24) & 0xff;
> > +
> > +    switch (secp) {
> > +    case SFSC_SECURITY_PROT_INFO:
> > +        switch (spsp) {
> > +        case 0:
> > +            /* Supported security protocol list */
> > +            return nvme_get_sec_prot_info(n, req);
> > +        case 1:
> > +            /* Certificate data */
> > +        default:
> > +            return NVME_INVALID_FIELD | NVME_DNR;
> > +        }
> > +    case NVME_SEC_PROT_DMTF_SPDM:
> > +        return nvme_sec_prot_spdm_receive(n, req);
> > +    default:
> > +        return NVME_INVALID_FIELD | NVME_DNR;
> > +    }
> > +}
> > +
> >  static uint16_t nvme_directive_send(NvmeCtrl *n, NvmeRequest *req)
> >  {
> >      return NVME_INVALID_FIELD | NVME_DNR;
> > @@ -7289,6 +7490,10 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n,
> > NvmeRequest *req)
> >          return nvme_directive_send(n, req);
> >      case NVME_ADM_CMD_DIRECTIVE_RECV:
> >          return nvme_directive_receive(n, req);
> > +    case NVME_ADM_CMD_SECURITY_SEND:
> > +        return nvme_security_send(n, req);
> > +    case NVME_ADM_CMD_SECURITY_RECV:
> > +        return nvme_security_receive(n, req);
> >      default:
> >          g_assert_not_reached();
> >      }
> > @@ -8708,7 +8913,7 @@ static void nvme_init_ctrl(NvmeCtrl *n,
> > PCIDevice *pci_dev)
> >      id->ver = cpu_to_le32(NVME_SPEC_VER);
> >      id->oacs =
> >          cpu_to_le16(NVME_OACS_NS_MGMT | NVME_OACS_FORMAT |
> > NVME_OACS_DBBUF |
> > -                    NVME_OACS_DIRECTIVES);
> > +                    NVME_OACS_DIRECTIVES | NVME_OACS_SECURITY);
> >      id->cntrltype = 0x1;
> >  
> >      /*
> > diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h
> > index 7242206910..c8ad20ee34 100644
> > --- a/hw/nvme/nvme.h
> > +++ b/hw/nvme/nvme.h
> > @@ -459,6 +459,8 @@ static inline const char
> > *nvme_adm_opc_str(uint8_t opc)
> >      case NVME_ADM_CMD_DIRECTIVE_RECV:   return
> > "NVME_ADM_CMD_DIRECTIVE_RECV";
> >      case NVME_ADM_CMD_DBBUF_CONFIG:     return
> > "NVME_ADM_CMD_DBBUF_CONFIG";
> >      case NVME_ADM_CMD_FORMAT_NVM:       return
> > "NVME_ADM_CMD_FORMAT_NVM";
> > +    case NVME_ADM_CMD_SECURITY_SEND:    return
> > "NVME_ADM_CMD_SECURITY_SEND";
> > +    case NVME_ADM_CMD_SECURITY_RECV:    return
> > "NVME_ADM_CMD_SECURITY_RECV";
> >      default:                            return
> > "NVME_ADM_CMD_UNKNOWN";
> >      }
> >  }
> > @@ -636,6 +638,9 @@ typedef struct NvmeCtrl {
> >      } next_pri_ctrl_cap;    /* These override pri_ctrl_cap after
> > reset */
> >      uint32_t    dn; /* Disable Normal */
> >      NvmeAtomic  atomic;
> > +
> > +    /* Socket mapping to SPDM over NVMe Security In/Out commands
> > */
> > +    int spdm_socket;
> >  } NvmeCtrl;
> >  
> >  typedef enum NvmeResetType {
> > diff --git a/include/block/nvme.h b/include/block/nvme.h
> > index f4d108841b..e2352cfb1e 100644
> > --- a/include/block/nvme.h
> > +++ b/include/block/nvme.h
> > @@ -1733,6 +1733,21 @@ enum NvmeDirectiveOperations {
> >      NVME_DIRECTIVE_RETURN_PARAMS = 0x1,
> >  };
> >  
> > +typedef enum SfscSecurityProtocol {
> > +    SFSC_SECURITY_PROT_INFO = 0x00,
> > +} SfscSecurityProtocol;
> > +
> > +typedef enum NvmeSecurityProtocols {
> > +    NVME_SEC_PROT_DMTF_SPDM    = 0xE8,
> > +} NvmeSecurityProtocols;
> > +
> > +typedef enum SpdmOperationCodes {
> > +    SPDM_STORAGE_DISCOVERY      = 0x1, /* Mandatory */
> > +    SPDM_STORAGE_PENDING_INFO   = 0x2, /* Optional */
> > +    SPDM_STORAGE_MSG            = 0x5, /* Mandatory */
> > +    SPDM_STORAGE_SEC_MSG        = 0x6, /* Optional */
> > +} SpdmOperationCodes;
> > +
> >  typedef struct QEMU_PACKED NvmeFdpConfsHdr {
> >      uint16_t num_confs;
> >      uint8_t  version;
> > -- 
> > 2.47.1
> > 
> > 


reply via email to

[Prev in Thread] Current Thread [Next in Thread]