[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-stable] [Qemu-devel] [PATCH] net: reduce the unnecessary memor
From: |
Anthony Liguori |
Subject: |
Re: [Qemu-stable] [Qemu-devel] [PATCH] net: reduce the unnecessary memory allocation of multiqueue |
Date: |
Thu, 21 Feb 2013 16:32:38 -0600 |
User-agent: |
Notmuch/0.13.2+93~ged93d79 (http://notmuchmail.org) Emacs/23.3.1 (x86_64-pc-linux-gnu) |
Jason Wang <address@hidden> writes:
> Edivaldo reports a problem that the array of NetClientState in NICState is too
> large - MAX_QUEUE_NUM(1024) which will wastes memory even if multiqueue is not
> used.
>
> Instead of static arrays, solving this issue by allocating the queues on
> demand
> for both the NetClientState array in NICState and VirtIONetQueue array in
> VirtIONet.
>
> Tested by myself, with single virtio-net-pci device. The memory allocation is
> almost the same as when multiqueue is not merged.
>
> Cc: Edivaldo de Araujo Pereira <address@hidden>
> Cc: address@hidden
> Signed-off-by: Jason Wang <address@hidden>
> ---
> hw/virtio-net.c | 6 ++++--
> include/net/net.h | 2 +-
> net/net.c | 19 +++++++++----------
> 3 files changed, 14 insertions(+), 13 deletions(-)
>
> diff --git a/hw/virtio-net.c b/hw/virtio-net.c
> index 573c669..70ab641 100644
> --- a/hw/virtio-net.c
> +++ b/hw/virtio-net.c
> @@ -44,7 +44,7 @@ typedef struct VirtIONet
> VirtIODevice vdev;
> uint8_t mac[ETH_ALEN];
> uint16_t status;
> - VirtIONetQueue vqs[MAX_QUEUE_NUM];
> + VirtIONetQueue *vqs;
> VirtQueue *ctrl_vq;
> NICState *nic;
> uint32_t tx_timeout;
> @@ -1326,8 +1326,9 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf
> *conf,
> n->vdev.set_status = virtio_net_set_status;
> n->vdev.guest_notifier_mask = virtio_net_guest_notifier_mask;
> n->vdev.guest_notifier_pending = virtio_net_guest_notifier_pending;
> + n->max_queues = MAX(conf->queues, 1);
I think you mean MIN here.
> + n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
> n->vqs[0].rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx);
> - n->max_queues = conf->queues;
> n->curr_queues = 1;
> n->vqs[0].n = n;
> n->tx_timeout = net->txtimer;
> @@ -1397,6 +1398,7 @@ void virtio_net_exit(VirtIODevice *vdev)
>
> g_free(n->mac_table.macs);
> g_free(n->vlans);
> + g_free(n->vqs);
>
> for (i = 0; i < n->max_queues; i++) {
> VirtIONetQueue *q = &n->vqs[i];
> diff --git a/include/net/net.h b/include/net/net.h
> index 43a045e..cb049a1 100644
> --- a/include/net/net.h
> +++ b/include/net/net.h
> @@ -72,7 +72,7 @@ struct NetClientState {
> };
>
> typedef struct NICState {
> - NetClientState ncs[MAX_QUEUE_NUM];
> + NetClientState *ncs;
> NICConf *conf;
> void *opaque;
> bool peer_deleted;
> diff --git a/net/net.c b/net/net.c
> index be03a8d..6262ed0 100644
> --- a/net/net.c
> +++ b/net/net.c
> @@ -235,23 +235,20 @@ NICState *qemu_new_nic(NetClientInfo *info,
> const char *name,
> void *opaque)
> {
> - NetClientState *nc;
> NetClientState **peers = conf->peers.ncs;
> NICState *nic;
> - int i;
> + int i, queues = MAX(1, conf->queues);
And here. This patch is what had created problems for me.
Regards,
Anthony Liguori
>
> assert(info->type == NET_CLIENT_OPTIONS_KIND_NIC);
> assert(info->size >= sizeof(NICState));
>
> - nc = qemu_new_net_client(info, peers[0], model, name);
> - nc->queue_index = 0;
> -
> - nic = qemu_get_nic(nc);
> + nic = g_malloc0(info->size + sizeof(NetClientState) * queues);
> + nic->ncs = (void *)nic + info->size;
> nic->conf = conf;
> nic->opaque = opaque;
>
> - for (i = 1; i < conf->queues; i++) {
> - qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, nc->name,
> + for (i = 0; i < queues; i++) {
> + qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name,
> NULL);
> nic->ncs[i].queue_index = i;
> }
> @@ -261,7 +258,7 @@ NICState *qemu_new_nic(NetClientInfo *info,
>
> NetClientState *qemu_get_subqueue(NICState *nic, int queue_index)
> {
> - return &nic->ncs[queue_index];
> + return nic->ncs + queue_index;
> }
>
> NetClientState *qemu_get_queue(NICState *nic)
> @@ -273,7 +270,7 @@ NICState *qemu_get_nic(NetClientState *nc)
> {
> NetClientState *nc0 = nc - nc->queue_index;
>
> - return DO_UPCAST(NICState, ncs[0], nc0);
> + return (NICState *)((void *)nc0 - nc->info->size);
> }
>
> void *qemu_get_nic_opaque(NetClientState *nc)
> @@ -368,6 +365,8 @@ void qemu_del_nic(NICState *nic)
> qemu_cleanup_net_client(nc);
> qemu_free_net_client(nc);
> }
> +
> + g_free(nic);
> }
>
> void qemu_foreach_nic(qemu_nic_foreach func, void *opaque)
> --
> 1.7.1