public inbox for passt-dev@passt.top
 help / color / mirror / code / Atom feed
From: Laurent Vivier <lvivier@redhat.com>
To: passt-dev@passt.top
Cc: Laurent Vivier <lvivier@redhat.com>
Subject: [PATCH 3/3] vu_common: Move iovec management into vu_collect()
Date: Fri, 13 Mar 2026 08:21:36 +0100	[thread overview]
Message-ID: <20260313072136.4075535-4-lvivier@redhat.com> (raw)
In-Reply-To: <20260313072136.4075535-1-lvivier@redhat.com>

Previously, callers had to pre-initialize virtqueue elements with iovec
entries using vu_set_element() or vu_init_elem() before calling
vu_collect().  This meant each element owned a fixed, pre-assigned iovec
slot.

Move the iovec array into vu_collect() as explicit parameters (in_sg,
max_in_sg, and in_num), letting it pass the remaining iovec capacity
directly to vu_queue_pop().  A running current_iov counter tracks
consumed entries across elements, so multiple elements share a single
iovec pool.  The optional in_num output parameter reports how many iovec
entries were consumed, allowing callers to track usage across multiple
vu_collect() calls.

This removes vu_set_element() and vu_init_elem() which are no longer
needed, and is a prerequisite for multi-buffer support where a single
virtqueue element can use more than one iovec entry.  For now, callers
assert the current single-iovec-per-element invariant until they are
updated to handle multiple iovecs.

Signed-off-by: Laurent Vivier <lvivier@redhat.com>
---
 tcp_vu.c    | 23 ++++++++++--------
 udp_vu.c    | 21 ++++++++++-------
 vu_common.c | 68 ++++++++++++++++++++++++-----------------------------
 vu_common.h | 22 +++--------------
 4 files changed, 59 insertions(+), 75 deletions(-)

diff --git a/tcp_vu.c b/tcp_vu.c
index fd734e857b3b..ff826e53355f 100644
--- a/tcp_vu.c
+++ b/tcp_vu.c
@@ -87,13 +87,13 @@ int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
 
 	hdrlen = tcp_vu_hdrlen(CONN_V6(conn));
 
-	vu_set_element(&flags_elem[0], NULL, &flags_iov[0]);
-
 	elem_cnt = vu_collect(vdev, vq, &flags_elem[0], 1,
+			      &flags_iov[0], 1, NULL,
 			      MAX(hdrlen + sizeof(*opts), ETH_ZLEN + VNET_HLEN), NULL);
 	if (elem_cnt != 1)
 		return -1;
 
+	ASSERT(flags_elem[0].in_num == 1);
 	ASSERT(flags_elem[0].in_sg[0].iov_len >=
 	       MAX(hdrlen + sizeof(*opts), ETH_ZLEN + VNET_HLEN));
 
@@ -148,9 +148,8 @@ int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
 	nb_ack = 1;
 
 	if (flags & DUP_ACK) {
-		vu_set_element(&flags_elem[1], NULL, &flags_iov[1]);
-
 		elem_cnt = vu_collect(vdev, vq, &flags_elem[1], 1,
+				      &flags_iov[1], 1, NULL,
 				      flags_elem[0].in_sg[0].iov_len, NULL);
 		if (elem_cnt == 1 &&
 		    flags_elem[1].in_sg[0].iov_len >=
@@ -191,8 +190,8 @@ static ssize_t tcp_vu_sock_recv(const struct ctx *c, struct vu_virtq *vq,
 	const struct vu_dev *vdev = c->vdev;
 	struct msghdr mh_sock = { 0 };
 	uint16_t mss = MSS_GET(conn);
+	size_t hdrlen, iov_used;
 	int s = conn->sock;
-	size_t hdrlen;
 	int elem_cnt;
 	ssize_t ret;
 	int i;
@@ -201,22 +200,26 @@ static ssize_t tcp_vu_sock_recv(const struct ctx *c, struct vu_virtq *vq,
 
 	hdrlen = tcp_vu_hdrlen(v6);
 
-	vu_init_elem(elem, &iov_vu[DISCARD_IOV_NUM], VIRTQUEUE_MAX_SIZE);
-
+	iov_used = 0;
 	elem_cnt = 0;
 	*head_cnt = 0;
-	while (fillsize > 0 && elem_cnt < VIRTQUEUE_MAX_SIZE) {
+	while (fillsize > 0 && elem_cnt < ARRAY_SIZE(elem) &&
+	       iov_used < VIRTQUEUE_MAX_SIZE) {
+		size_t frame_size, dlen, in_num;
 		struct iovec *iov;
-		size_t frame_size, dlen;
 		int cnt;
 
 		cnt = vu_collect(vdev, vq, &elem[elem_cnt],
-				 VIRTQUEUE_MAX_SIZE - elem_cnt,
+				 ARRAY_SIZE(elem) - elem_cnt,
+				 &iov_vu[DISCARD_IOV_NUM + iov_used],
+				 VIRTQUEUE_MAX_SIZE - iov_used, &in_num,
 				 MAX(MIN(mss, fillsize) + hdrlen, ETH_ZLEN + VNET_HLEN),
 				 &frame_size);
 		if (cnt == 0)
 			break;
+		ASSERT((size_t)cnt == in_num);	/* one iovec per element */
 
+		iov_used += in_num;
 		dlen = frame_size - hdrlen;
 
 		/* reserve space for headers in iov */
diff --git a/udp_vu.c b/udp_vu.c
index 5effca777e0a..acc18d375a0f 100644
--- a/udp_vu.c
+++ b/udp_vu.c
@@ -71,9 +71,10 @@ static int udp_vu_sock_recv(const struct ctx *c, struct vu_virtq *vq, int s,
 			    bool v6, ssize_t *dlen)
 {
 	const struct vu_dev *vdev = c->vdev;
+	int elem_cnt, elem_used, iov_used;
 	struct msghdr msg  = { 0 };
-	int iov_cnt, iov_used;
 	size_t hdrlen, l2len;
+	size_t iov_cnt;
 
 	ASSERT(!c->no_udp);
 
@@ -89,13 +90,14 @@ static int udp_vu_sock_recv(const struct ctx *c, struct vu_virtq *vq, int s,
 	/* compute L2 header length */
 	hdrlen = udp_vu_hdrlen(v6);
 
-	vu_init_elem(elem, iov_vu, VIRTQUEUE_MAX_SIZE);
-
-	iov_cnt = vu_collect(vdev, vq, elem, VIRTQUEUE_MAX_SIZE,
-			     IP_MAX_MTU + ETH_HLEN + VNET_HLEN, NULL);
-	if (iov_cnt == 0)
+	elem_cnt = vu_collect(vdev, vq, elem, ARRAY_SIZE(elem),
+			      iov_vu, ARRAY_SIZE(iov_vu), &iov_cnt,
+			      IP_MAX_MTU + ETH_HLEN + VNET_HLEN, NULL);
+	if (elem_cnt == 0)
 		return -1;
 
+	ASSERT((size_t)elem_cnt == iov_cnt);	/* one iovec per element */
+
 	/* reserve space for the headers */
 	ASSERT(iov_vu[0].iov_len >= MAX(hdrlen, ETH_ZLEN + VNET_HLEN));
 	iov_vu[0].iov_base = (char *)iov_vu[0].iov_base + hdrlen;
@@ -107,7 +109,7 @@ static int udp_vu_sock_recv(const struct ctx *c, struct vu_virtq *vq, int s,
 
 	*dlen = recvmsg(s, &msg, 0);
 	if (*dlen < 0) {
-		vu_queue_rewind(vq, iov_cnt);
+		vu_queue_rewind(vq, elem_cnt);
 		return -1;
 	}
 
@@ -116,15 +118,16 @@ static int udp_vu_sock_recv(const struct ctx *c, struct vu_virtq *vq, int s,
 	iov_vu[0].iov_len += hdrlen;
 
 	iov_used = iov_truncate(iov_vu, iov_cnt, *dlen + hdrlen);
+	elem_used = iov_used; /* one iovec per element */
 
 	/* pad frame to 60 bytes: first buffer is at least ETH_ZLEN long */
 	l2len = *dlen + hdrlen - VNET_HLEN;
 	vu_pad(&iov_vu[0], l2len);
 
-	vu_set_vnethdr(iov_vu[0].iov_base, iov_used);
+	vu_set_vnethdr(iov_vu[0].iov_base, elem_used);
 
 	/* release unused buffers */
-	vu_queue_rewind(vq, iov_cnt - iov_used);
+	vu_queue_rewind(vq, elem_cnt - elem_used);
 
 	return iov_used;
 }
diff --git a/vu_common.c b/vu_common.c
index ed0033d6bb11..d5fca9e52004 100644
--- a/vu_common.c
+++ b/vu_common.c
@@ -51,28 +51,15 @@ int vu_packet_check_range(struct vdev_memory *memory,
 	return -1;
 }
 
-/**
- * vu_init_elem() - initialize an array of virtqueue elements with 1 iov in each
- * @elem:	Array of virtqueue elements to initialize
- * @iov:	Array of iovec to assign to virtqueue element
- * @elem_cnt:	Number of virtqueue element
- */
-void vu_init_elem(struct vu_virtq_element *elem, struct iovec *iov, int elem_cnt)
-{
-	int i;
-
-	for (i = 0; i < elem_cnt; i++)
-		vu_set_element(&elem[i], NULL, &iov[i]);
-}
-
 /**
  * vu_collect() - collect virtio buffers from a given virtqueue
  * @vdev:		vhost-user device
  * @vq:			virtqueue to collect from
- * @elem:		Array of virtqueue element
- * 			each element must be initialized with one iovec entry
- * 			in the in_sg array.
+ * @elem:		Array of @max_elem virtqueue elements
  * @max_elem:		Number of virtqueue elements in the array
+ * @in_sg:		Incoming iovec array for device-writable descriptors
+ * @max_in_sg:		Maximum number of entries in @in_sg
+ * @in_num:		Number of collected entries from @in_sg (output)
  * @size:		Maximum size of the data in the frame
  * @collected:		Collected buffer length, up to @size, set on return
  *
@@ -80,20 +67,21 @@ void vu_init_elem(struct vu_virtq_element *elem, struct iovec *iov, int elem_cnt
  */
 int vu_collect(const struct vu_dev *vdev, struct vu_virtq *vq,
 	       struct vu_virtq_element *elem, int max_elem,
+	       struct iovec *in_sg, size_t max_in_sg, size_t *in_num,
 	       size_t size, size_t *collected)
 {
 	size_t current_size = 0;
+	size_t current_iov = 0;
 	int elem_cnt = 0;
 
-	while (current_size < size && elem_cnt < max_elem) {
-		struct iovec *iov;
+	while (current_size < size && elem_cnt < max_elem &&
+	       current_iov < max_in_sg) {
 		int ret;
 
 		ret = vu_queue_pop(vdev, vq, &elem[elem_cnt],
-				   elem[elem_cnt].in_sg,
-				   elem[elem_cnt].in_num,
-				   elem[elem_cnt].out_sg,
-				   elem[elem_cnt].out_num);
+				   &in_sg[current_iov],
+				   max_in_sg - current_iov,
+				   NULL, 0);
 		if (ret < 0)
 			break;
 
@@ -103,18 +91,22 @@ int vu_collect(const struct vu_dev *vdev, struct vu_virtq *vq,
 			break;
 		}
 
-		iov = &elem[elem_cnt].in_sg[0];
-
-		if (iov->iov_len > size - current_size)
-			iov->iov_len = size - current_size;
+		elem[elem_cnt].in_num = iov_truncate(elem[elem_cnt].in_sg,
+						     elem[elem_cnt].in_num,
+						     size - current_size);
 
-		current_size += iov->iov_len;
+		current_size += iov_size(elem[elem_cnt].in_sg,
+					 elem[elem_cnt].in_num);
+		current_iov += elem[elem_cnt].in_num;
 		elem_cnt++;
 
 		if (!vu_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
 			break;
 	}
 
+	if (in_num)
+		*in_num = current_iov;
+
 	if (collected)
 		*collected = current_size;
 
@@ -147,8 +139,11 @@ void vu_flush(const struct vu_dev *vdev, struct vu_virtq *vq,
 {
 	int i;
 
-	for (i = 0; i < elem_cnt; i++)
-		vu_queue_fill(vdev, vq, &elem[i], elem[i].in_sg[0].iov_len, i);
+	for (i = 0; i < elem_cnt; i++) {
+		size_t elem_size = iov_size(elem[i].in_sg, elem[i].in_num);
+
+		vu_queue_fill(vdev, vq, &elem[i], elem_size, i);
+	}
 
 	vu_queue_flush(vdev, vq, elem_cnt);
 	vu_queue_notify(vdev, vq);
@@ -246,7 +241,7 @@ int vu_send_single(const struct ctx *c, const void *buf, size_t size)
 	struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
 	struct vu_virtq_element elem[VIRTQUEUE_MAX_SIZE];
 	struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
-	size_t total;
+	size_t total, in_num;
 	int elem_cnt;
 	int i;
 
@@ -257,11 +252,10 @@ int vu_send_single(const struct ctx *c, const void *buf, size_t size)
 		return -1;
 	}
 
-	vu_init_elem(elem, in_sg, VIRTQUEUE_MAX_SIZE);
-
 	size += VNET_HLEN;
-	elem_cnt = vu_collect(vdev, vq, elem, VIRTQUEUE_MAX_SIZE, size, &total);
-	if (total < size) {
+	elem_cnt = vu_collect(vdev, vq, elem, ARRAY_SIZE(elem), in_sg,
+			      ARRAY_SIZE(in_sg), &in_num, size, &total);
+	if (elem_cnt == 0 || total < size) {
 		debug("vu_send_single: no space to send the data "
 		      "elem_cnt %d size %zd", elem_cnt, total);
 		goto err;
@@ -272,10 +266,10 @@ int vu_send_single(const struct ctx *c, const void *buf, size_t size)
 	total -= VNET_HLEN;
 
 	/* copy data from the buffer to the iovec */
-	iov_from_buf(in_sg, elem_cnt, VNET_HLEN, buf, total);
+	iov_from_buf(in_sg, in_num, VNET_HLEN, buf, total);
 
 	if (*c->pcap)
-		pcap_iov(in_sg, elem_cnt, VNET_HLEN);
+		pcap_iov(in_sg, in_num, VNET_HLEN);
 
 	vu_flush(vdev, vq, elem, elem_cnt);
 
diff --git a/vu_common.h b/vu_common.h
index 865d9771fa89..6c31630e8712 100644
--- a/vu_common.h
+++ b/vu_common.h
@@ -35,26 +35,10 @@ static inline void *vu_payloadv6(void *base)
 	return (struct ipv6hdr *)vu_ip(base) + 1;
 }
 
-/**
- * vu_set_element() - Initialize a vu_virtq_element
- * @elem:	Element to initialize
- * @out_sg:	One out iovec entry to set in elem
- * @in_sg:	One in iovec entry to set in elem
- */
-static inline void vu_set_element(struct vu_virtq_element *elem,
-				  struct iovec *out_sg, struct iovec *in_sg)
-{
-	elem->out_num = !!out_sg;
-	elem->out_sg = out_sg;
-	elem->in_num = !!in_sg;
-	elem->in_sg = in_sg;
-}
-
-void vu_init_elem(struct vu_virtq_element *elem, struct iovec *iov,
-		  int elem_cnt);
 int vu_collect(const struct vu_dev *vdev, struct vu_virtq *vq,
-	       struct vu_virtq_element *elem, int max_elem, size_t size,
-	       size_t *collected);
+	       struct vu_virtq_element *elem, int max_elem,
+	       struct iovec *in_sg, size_t max_in_sg, size_t *in_num,
+	       size_t size, size_t *collected);
 void vu_set_vnethdr(struct virtio_net_hdr_mrg_rxbuf *vnethdr, int num_buffers);
 void vu_flush(const struct vu_dev *vdev, struct vu_virtq *vq,
 	      struct vu_virtq_element *elem, int elem_cnt);
-- 
2.53.0


      parent reply	other threads:[~2026-03-13  7:21 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-13  7:21 [PATCH 0/3] Decouple iovec management from virtqueue elements Laurent Vivier
2026-03-13  7:21 ` [PATCH 1/3] virtio: Pass iovec arrays as separate parameters to vu_queue_pop() Laurent Vivier
2026-03-13  7:21 ` [PATCH 2/3] vu_handle_tx: Pass actual remaining out_sg capacity " Laurent Vivier
2026-03-13  7:21 ` Laurent Vivier [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260313072136.4075535-4-lvivier@redhat.com \
    --to=lvivier@redhat.com \
    --cc=passt-dev@passt.top \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
Code repositories for project(s) associated with this public inbox

	https://passt.top/passt

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for IMAP folder(s).