From: Laurent Vivier <lvivier@redhat.com>
To: passt-dev@passt.top
Cc: Laurent Vivier <lvivier@redhat.com>
Subject: [PATCH 1/4] vhost-user: Enable multiqueue
Date: Fri, 7 Nov 2025 15:38:58 +0100 [thread overview]
Message-ID: <20251107143901.89955-2-lvivier@redhat.com> (raw)
In-Reply-To: <20251107143901.89955-1-lvivier@redhat.com>
Add the --max-queues parameter to specify the maximum number of queue
pairs supported in vhost-user mode. This enables multi-queue support
by allowing configuration of up to 16 queue pairs (32 virtqueues).
For the moment, only the first RX queue is used, the TX queue is
selected by the guest kernel.
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
---
conf.c | 31 ++++++++++++++++++++++++++++++-
passt.h | 2 ++
tap.c | 15 +++++++++++++--
vhost_user.c | 38 +++++++++++++++++++++-----------------
virtio.h | 2 +-
5 files changed, 67 insertions(+), 21 deletions(-)
diff --git a/conf.c b/conf.c
index 66b9e63400ec..4738436e6245 100644
--- a/conf.c
+++ b/conf.c
@@ -862,7 +862,9 @@ static void usage(const char *name, FILE *f, int status)
" --vhost-user Enable vhost-user mode\n"
" UNIX domain socket is provided by -s option\n"
" --print-capabilities print back-end capabilities in JSON format,\n"
- " only meaningful for vhost-user mode\n");
+ " only meaningful for vhost-user mode\n"
+ " --max-queues Specify the maximum number of queue pairs\n"
+ );
FPRINTF(f,
" --repair-path PATH path for passt-repair(1)\n"
" default: append '.repair' to UNIX domain path\n");
@@ -1483,6 +1485,7 @@ void conf(struct ctx *c, int argc, char **argv)
{"migrate-exit", no_argument, NULL, 29 },
{"migrate-no-linger", no_argument, NULL, 30 },
{"stats", required_argument, NULL, 31 },
+ {"max-queues", required_argument, NULL, 32 },
{ 0 },
};
const char *optstring = "+dqfel:hs:F:I:p:P:m:a:n:M:g:i:o:D:S:H:461t:u:T:U:";
@@ -1514,6 +1517,7 @@ void conf(struct ctx *c, int argc, char **argv)
c->tcp.fwd_in.mode = c->tcp.fwd_out.mode = FWD_UNSET;
c->udp.fwd_in.mode = c->udp.fwd_out.mode = FWD_UNSET;
memcpy(c->our_tap_mac, MAC_OUR_LAA, ETH_ALEN);
+ c->max_virtqueues = 2;
optind = 0;
do {
@@ -1717,6 +1721,31 @@ void conf(struct ctx *c, int argc, char **argv)
die("Can't display statistics if not running in foreground");
c->stats = strtol(optarg, NULL, 0);
break;
+ case 32: {
+ unsigned long max_queues;
+ char *e;
+
+ if (c->mode != MODE_VU)
+ die("--max-queues is for vhost-user mode only");
+
+ errno = 0;
+ max_queues = strtoul(optarg, &e, 0);
+
+ if (errno || *e)
+ die("Invalid max-queues: %s", optarg);
+
+ if (max_queues < 1) {
+ die("max-queues %lu too small (min 1)",
+ max_queues);
+ }
+
+ if (max_queues * 2 > VHOST_USER_MAX_VQS) {
+ die("max-queues %lu too big (max %u)",
+ max_queues, VHOST_USER_MAX_VQS / 2);
+ }
+ c->max_virtqueues = max_queues * 2;
+ break;
+ }
case 'd':
c->debug = 1;
c->quiet = 0;
diff --git a/passt.h b/passt.h
index 15801b44bfa8..e73f648009de 100644
--- a/passt.h
+++ b/passt.h
@@ -205,6 +205,7 @@ struct ip6_ctx {
* @low_wmem: Low probed net.core.wmem_max
* @low_rmem: Low probed net.core.rmem_max
* @vdev: vhost-user device
+ * @max_virtqueues: vhost-user maximum number of virtqueues
* @device_state_fd: Device state migration channel
* @device_state_result: Device state migration result
* @migrate_target: Are we the target, on the next migration request?
@@ -283,6 +284,7 @@ struct ctx {
int low_rmem;
struct vu_dev *vdev;
+ unsigned int max_virtqueues;
/* Migration */
int device_state_fd;
diff --git a/tap.c b/tap.c
index bb139d647bae..e18d693a665a 100644
--- a/tap.c
+++ b/tap.c
@@ -1318,8 +1318,19 @@ static void tap_backend_show_hints(struct ctx *c)
break;
case MODE_VU:
info("You can start qemu with:");
- info(" kvm ... -chardev socket,id=chr0,path=%s -netdev vhost-user,id=netdev0,chardev=chr0 -device virtio-net,netdev=netdev0 -object memory-backend-memfd,id=memfd0,share=on,size=$RAMSIZE -numa node,memdev=memfd0\n",
- c->sock_path);
+ if (c->max_virtqueues > 2) {
+ info(" kvm ... -chardev socket,id=chr0,path=%s "
+ "-netdev vhost-user,id=netdev0,chardev=chr0,queues=%d "
+ "-device virtio-net,netdev=netdev0,mq=true "
+ "-object memory-backend-memfd,id=memfd0,share=on,size=$RAMSIZE "
+ "-numa node,memdev=memfd0\n", c->sock_path, c->max_virtqueues / 2);
+ } else {
+ info(" kvm ... -chardev socket,id=chr0,path=%s "
+ "-netdev vhost-user,id=netdev0,chardev=chr0 "
+ "-device virtio-net,netdev=netdev0 "
+ "-object memory-backend-memfd,id=memfd0,share=on,size=$RAMSIZE "
+ "-numa node,memdev=memfd0\n", c->sock_path);
+ }
break;
}
}
diff --git a/vhost_user.c b/vhost_user.c
index aa7c869d9e56..c668159e29b4 100644
--- a/vhost_user.c
+++ b/vhost_user.c
@@ -323,6 +323,7 @@ static bool vu_get_features_exec(struct vu_dev *vdev,
uint64_t features =
1ULL << VIRTIO_F_VERSION_1 |
1ULL << VIRTIO_NET_F_MRG_RXBUF |
+ 1ULL << VIRTIO_NET_F_MQ |
1ULL << VHOST_F_LOG_ALL |
1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
@@ -342,9 +343,9 @@ static bool vu_get_features_exec(struct vu_dev *vdev,
*/
static void vu_set_enable_all_rings(struct vu_dev *vdev, bool enable)
{
- uint16_t i;
+ unsigned int i;
- for (i = 0; i < VHOST_USER_MAX_VQS; i++)
+ for (i = 0; i < vdev->context->max_virtqueues; i++)
vdev->vq[i].enable = enable;
}
@@ -476,7 +477,7 @@ static bool vu_set_mem_table_exec(struct vu_dev *vdev,
close(vmsg->fds[i]);
}
- for (i = 0; i < VHOST_USER_MAX_VQS; i++) {
+ for (i = 0; i < vdev->context->max_virtqueues; i++) {
if (vdev->vq[i].vring.desc) {
if (map_ring(vdev, &vdev->vq[i]))
die("remapping queue %d during setmemtable", i);
@@ -759,15 +760,18 @@ static void vu_set_watch(const struct vu_dev *vdev, int idx)
/**
* vu_check_queue_msg_file() - Check if a message is valid,
* close fds if NOFD bit is set
+ * @vdev: vhost-user device
* @vmsg: vhost-user message
*/
-static void vu_check_queue_msg_file(struct vhost_user_msg *vmsg)
+static void vu_check_queue_msg_file(const struct vu_dev *vdev,
+ struct vhost_user_msg *vmsg)
{
bool nofd = vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK;
- int idx = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
+ unsigned int idx = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
- if (idx >= VHOST_USER_MAX_VQS)
- die("Invalid vhost-user queue index: %u", idx);
+ if (idx >= vdev->context->max_virtqueues)
+ die("Invalid vhost-user queue index: %u (max_virtqueues %u)", idx,
+ vdev->context->max_virtqueues);
if (nofd) {
vmsg_close_fds(vmsg);
@@ -794,7 +798,7 @@ static bool vu_set_vring_kick_exec(struct vu_dev *vdev,
debug("u64: 0x%016"PRIx64, vmsg->payload.u64);
- vu_check_queue_msg_file(vmsg);
+ vu_check_queue_msg_file(vdev, vmsg);
if (vdev->vq[idx].kick_fd != -1) {
epoll_del(vdev->context->epollfd, vdev->vq[idx].kick_fd);
@@ -834,7 +838,7 @@ static bool vu_set_vring_call_exec(struct vu_dev *vdev,
debug("u64: 0x%016"PRIx64, vmsg->payload.u64);
- vu_check_queue_msg_file(vmsg);
+ vu_check_queue_msg_file(vdev, vmsg);
if (vdev->vq[idx].call_fd != -1) {
close(vdev->vq[idx].call_fd);
@@ -869,7 +873,7 @@ static bool vu_set_vring_err_exec(struct vu_dev *vdev,
debug("u64: 0x%016"PRIx64, vmsg->payload.u64);
- vu_check_queue_msg_file(vmsg);
+ vu_check_queue_msg_file(vdev, vmsg);
if (vdev->vq[idx].err_fd != -1) {
close(vdev->vq[idx].err_fd);
@@ -896,7 +900,8 @@ static bool vu_get_protocol_features_exec(struct vu_dev *vdev,
uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK |
1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD |
1ULL << VHOST_USER_PROTOCOL_F_DEVICE_STATE |
- 1ULL << VHOST_USER_PROTOCOL_F_RARP;
+ 1ULL << VHOST_USER_PROTOCOL_F_RARP |
+ 1ULL << VHOST_USER_PROTOCOL_F_MQ;
(void)vdev;
vmsg_set_reply_u64(vmsg, features);
@@ -935,10 +940,9 @@ static bool vu_get_queue_num_exec(struct vu_dev *vdev,
{
(void)vdev;
- /* NOLINTNEXTLINE(misc-redundant-expression) */
- vmsg_set_reply_u64(vmsg, VHOST_USER_MAX_VQS / 2);
+ vmsg_set_reply_u64(vmsg, vdev->context->max_virtqueues / 2);
- debug("VHOST_USER_MAX_VQS %u", VHOST_USER_MAX_VQS / 2);
+ debug("max_virtqueues %u", vdev->context->max_virtqueues / 2);
return true;
}
@@ -959,7 +963,7 @@ static bool vu_set_vring_enable_exec(struct vu_dev *vdev,
debug("State.index: %u", idx);
debug("State.enable: %u", enable);
- if (idx >= VHOST_USER_MAX_VQS)
+ if (idx >= vdev->context->max_virtqueues)
die("Invalid vring_enable index: %u", idx);
vdev->vq[idx].enable = enable;
@@ -1047,7 +1051,7 @@ static bool vu_check_device_state_exec(struct vu_dev *vdev,
*/
void vu_init(struct ctx *c)
{
- int i;
+ unsigned int i;
c->vdev = &vdev_storage;
c->vdev->context = c;
@@ -1074,7 +1078,7 @@ void vu_cleanup(struct vu_dev *vdev)
{
unsigned int i;
- for (i = 0; i < VHOST_USER_MAX_VQS; i++) {
+ for (i = 0; i < vdev->context->max_virtqueues; i++) {
struct vu_virtq *vq = &vdev->vq[i];
vq->started = false;
diff --git a/virtio.h b/virtio.h
index 12caaa0b6def..176c935cecc7 100644
--- a/virtio.h
+++ b/virtio.h
@@ -88,7 +88,7 @@ struct vu_dev_region {
uint64_t mmap_addr;
};
-#define VHOST_USER_MAX_VQS 2
+#define VHOST_USER_MAX_VQS 32
/*
* Set a reasonable maximum number of ram slots, which will be supported by
--
2.51.0
next prev parent reply other threads:[~2025-11-07 14:39 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-07 14:38 [PATCH 0/4] vhost-user: Add multiqueue support Laurent Vivier
2025-11-07 14:38 ` Laurent Vivier [this message]
2025-11-10 4:48 ` [PATCH 1/4] vhost-user: Enable multiqueue David Gibson
2025-11-17 15:26 ` Laurent Vivier
2025-11-18 0:16 ` David Gibson
2025-11-07 14:38 ` [PATCH 2/4] vhost-user: Add queue parameter throughout the network stack Laurent Vivier
2025-11-10 5:19 ` David Gibson
2025-11-07 14:39 ` [PATCH 3/4] multiqueue: Add queue-aware flow management for multiqueue support Laurent Vivier
2025-11-10 5:54 ` David Gibson
2025-11-07 14:39 ` [PATCH 4/4] test: Add multiqueue support to vhost-user test infrastructure Laurent Vivier
2025-11-10 5:57 ` David Gibson
2025-11-10 4:40 ` [PATCH 0/4] vhost-user: Add multiqueue support David Gibson
2025-11-10 6:00 ` David Gibson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251107143901.89955-2-lvivier@redhat.com \
--to=lvivier@redhat.com \
--cc=passt-dev@passt.top \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
Code repositories for project(s) associated with this public inbox
https://passt.top/passt
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for IMAP folder(s).