public inbox for passt-dev@passt.top
 help / color / mirror / code / Atom feed
From: Laurent Vivier <lvivier@redhat.com>
To: passt-dev@passt.top
Cc: Laurent Vivier <lvivier@redhat.com>
Subject: [PATCH v3 6/8] udp_vu: Move virtqueue management from udp_vu_sock_recv() to its caller
Date: Mon, 16 Mar 2026 19:07:19 +0100	[thread overview]
Message-ID: <20260316180721.2230640-7-lvivier@redhat.com> (raw)
In-Reply-To: <20260316180721.2230640-1-lvivier@redhat.com>

udp_vu_sock_recv() currently mixes two concerns: receiving data from the
socket and managing virtqueue buffers (collecting, rewinding, releasing).
This makes the function harder to reason about and couples socket I/O
with virtqueue state.

Move all virtqueue operations, vu_collect(), vu_init_elem(),
vu_queue_rewind(), vu_set_vnethdr(), and the queue-readiness check, into
udp_vu_sock_to_tap(), which is the only caller.  This turns
udp_vu_sock_recv() into a pure socket receive function that simply reads
into the provided iov array and adjusts its length.

Signed-off-by: Laurent Vivier <lvivier@redhat.com>
---
 udp_vu.c | 110 +++++++++++++++++++++++++------------------------------
 1 file changed, 49 insertions(+), 61 deletions(-)

diff --git a/udp_vu.c b/udp_vu.c
index 8b0de312949c..7f6561f83505 100644
--- a/udp_vu.c
+++ b/udp_vu.c
@@ -33,9 +33,6 @@
 #include "udp_vu.h"
 #include "vu_common.h"
 
-static struct iovec     iov_vu		[VIRTQUEUE_MAX_SIZE];
-static struct vu_virtq_element	elem		[VIRTQUEUE_MAX_SIZE];
-
 /**
  * udp_vu_hdrlen() - Sum size of all headers, from UDP to virtio-net
  * @v6:		Set for IPv6 packet
@@ -58,78 +55,35 @@ static size_t udp_vu_hdrlen(bool v6)
 
 /**
  * udp_vu_sock_recv() - Receive datagrams from socket into vhost-user buffers
- * @c:		Execution context
  * @iov:	IO vector for the frame (in/out)
  * @cnt:	Number of IO vector entries (in/out)
- * @vq:		virtqueue to use to receive data
  * @s:		Socket to receive from
  * @v6:		Set for IPv6 connections
  *
- * Return: size of received data, 0 if the datagram
- *         was discarded because the virtqueue is not ready, -1 on error
+ * Return: size of received data, -1 on error
  */
-static ssize_t udp_vu_sock_recv(const struct ctx *c, struct iovec *iov,
-				size_t *cnt, unsigned *elem_used,
-				struct vu_virtq *vq, int s, bool v6)
+static ssize_t udp_vu_sock_recv(struct iovec *iov, size_t *cnt, int s, bool v6)
 {
-	const struct vu_dev *vdev = c->vdev;
-	struct msghdr msg  = { 0 };
+	struct iovec msg_iov[*cnt];
+	struct msghdr msg = { 0 };
 	struct iov_tail payload;
-	size_t hdrlen, iov_used;
-	unsigned elem_cnt;
-	unsigned i, j;
+	size_t hdrlen;
 	ssize_t dlen;
 
-	ASSERT(!c->no_udp);
-
-	if (!vu_queue_enabled(vq) || !vu_queue_started(vq)) {
-		debug("Got UDP packet, but RX virtqueue not usable yet");
-
-		if (recvmsg(s, &msg, MSG_DONTWAIT) < 0)
-			debug_perror("Failed to discard datagram");
-
-		*cnt = 0;
-		return 0;
-	}
-
 	/* compute L2 header length */
 	hdrlen = udp_vu_hdrlen(v6);
 
-	elem_cnt = vu_collect(vdev, vq, elem, ARRAY_SIZE(elem),
-			      iov, *cnt, &iov_used,
-			      IP_MAX_MTU + ETH_HLEN + VNET_HLEN, NULL);
-	if (elem_cnt == 0)
-		return -1;
-
-	ASSERT((size_t)elem_cnt == iov_used);	/* one iovec per element */
-
-	payload = IOV_TAIL(iov, iov_used, hdrlen);
+	payload = IOV_TAIL(iov, *cnt, hdrlen);
 
-	struct iovec msg_iov[payload.cnt];
 	msg.msg_iov = msg_iov;
 	msg.msg_iovlen = iov_tail_clone(msg.msg_iov, payload.cnt, &payload);
 
 	/* read data from the socket */
 	dlen = recvmsg(s, &msg, 0);
-	if (dlen < 0) {
-		vu_queue_rewind(vq, elem_cnt);
+	if (dlen < 0)
 		return -1;
-	}
-
-	*cnt = vu_pad(iov, iov_used, 0, dlen + hdrlen);
-
-	*elem_used = 0;
-	for (i = 0, j = 0; j < *cnt && i < elem_cnt; i++) {
-		if (j + elem[i].in_num > *cnt)
-			elem[i].in_num = *cnt - j;
-		j += elem[i].in_num;
-		(*elem_used)++;
-	}
 
-	vu_set_vnethdr(iov[0].iov_base, *elem_used);
-
-	/* release unused buffers */
-	vu_queue_rewind(vq, elem_cnt - *elem_used);
+	*cnt = vu_pad(iov, *cnt, 0, dlen + hdrlen);
 
 	return dlen;
 }
@@ -217,26 +171,60 @@ static void udp_vu_csum(const struct flowside *toside,
  */
 void udp_vu_sock_to_tap(const struct ctx *c, int s, int n, flow_sidx_t tosidx)
 {
+	static struct iovec     iov_vu		[VIRTQUEUE_MAX_SIZE];
+	static struct vu_virtq_element	elem	[VIRTQUEUE_MAX_SIZE];
 	const struct flowside *toside = flowside_at_sidx(tosidx);
 	bool v6 = !(inany_v4(&toside->eaddr) && inany_v4(&toside->oaddr));
 	struct vu_dev *vdev = c->vdev;
 	struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
-	struct iov_tail data;
 	int i;
 
+	ASSERT(!c->no_udp);
+
+	if (!vu_queue_enabled(vq) || !vu_queue_started(vq)) {
+		struct msghdr msg = { 0 };
+
+		debug("Got UDP packet, but RX virtqueue not usable yet");
+
+		for (i = 0; i < n; i++) {
+			if (recvmsg(s, &msg, MSG_DONTWAIT) < 0)
+				debug_perror("Failed to discard datagram");
+		}
+
+		return;
+	}
+
 	for (i = 0; i < n; i++) {
-		unsigned elem_used;
+		unsigned elem_used, elem_cnt, j, k;
 		size_t iov_cnt;
 		ssize_t dlen;
 
-		iov_cnt = ARRAY_SIZE(iov_vu);
-		dlen = udp_vu_sock_recv(c, iov_vu, &iov_cnt, &elem_used, vq,
-					s, v6);
-		if (dlen < 0)
+		elem_cnt = vu_collect(vdev, vq, elem, ARRAY_SIZE(elem),
+				      iov_vu, ARRAY_SIZE(iov_vu), &iov_cnt,
+				      IP_MAX_MTU + ETH_HLEN + VNET_HLEN, NULL);
+		if (elem_cnt == 0)
+			break;
+
+		dlen = udp_vu_sock_recv(iov_vu, &iov_cnt, s, v6);
+		if (dlen < 0) {
+			vu_queue_rewind(vq, elem_cnt);
 			break;
+		}
+
+		elem_used = 0;
+		for (j = 0, k = 0; k < iov_cnt && j < elem_cnt; j++) {
+			if (k + elem[j].in_num > iov_cnt)
+				elem[j].in_num = iov_cnt - k;
+			k += elem[j].in_num;
+			elem_used++;
+		}
+
+		/* release unused buffers */
+		vu_queue_rewind(vq, elem_cnt - elem_used);
 
 		if (iov_cnt > 0) {
-			data = IOV_TAIL(iov_vu, iov_cnt, 0);
+			struct iov_tail data = IOV_TAIL(iov_vu, iov_cnt, 0);
+			vu_set_vnethdr(iov_vu[0].iov_base, elem_used);
 			udp_vu_prepare(c, &data, toside, dlen);
 			if (*c->pcap) {
 				udp_vu_csum(toside, &data);
-- 
2.53.0


  parent reply	other threads:[~2026-03-16 18:07 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-16 18:07 [PATCH v3 0/8] vhost-user,udp: Handle multiple iovec entries per virtqueue element Laurent Vivier
2026-03-16 18:07 ` [PATCH v3 1/8] virtio: Pass iovec arrays as separate parameters to vu_queue_pop() Laurent Vivier
2026-03-16 18:07 ` [PATCH v3 2/8] vu_handle_tx: Pass actual remaining out_sg capacity " Laurent Vivier
2026-03-16 18:07 ` [PATCH v3 3/8] vu_common: Move iovec management into vu_collect() Laurent Vivier
2026-03-16 18:07 ` [PATCH v3 4/8] vhost-user: Centralise Ethernet frame padding in vu_collect(), vu_pad() and vu_flush() Laurent Vivier
2026-03-16 18:07 ` [PATCH v3 5/8] udp_vu: Use iov_tail to manage virtqueue buffers Laurent Vivier
2026-03-16 18:07 ` Laurent Vivier [this message]
2026-03-16 18:07 ` [PATCH v3 7/8] iov: Add IOV_PUT_HEADER() and with_header() to write header data back to iov_tail Laurent Vivier
2026-03-16 18:07 ` [PATCH v3 8/8] udp: Pass iov_tail to udp_update_hdr4()/udp_update_hdr6() Laurent Vivier

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260316180721.2230640-7-lvivier@redhat.com \
    --to=lvivier@redhat.com \
    --cc=passt-dev@passt.top \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
Code repositories for project(s) associated with this public inbox

	https://passt.top/passt

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for IMAP folder(s).