From: Laurent Vivier <lvivier@redhat.com>
To: passt-dev@passt.top
Cc: Laurent Vivier <lvivier@redhat.com>
Subject: [PATCH 10/10] vhost-user: Centralise Ethernet frame padding in vu_collect() and vu_pad()
Date: Wed, 1 Apr 2026 21:18:26 +0200 [thread overview]
Message-ID: <20260401191826.1782394-11-lvivier@redhat.com> (raw)
In-Reply-To: <20260401191826.1782394-1-lvivier@redhat.com>
The previous per-protocol padding done by vu_pad() in tcp_vu.c and
udp_vu.c was only correct for single-buffer frames: it assumed the
padding area always fell within the first iov, writing past its end
with a plain memset().
It also required each caller to compute MAX(..., ETH_ZLEN + VNET_HLEN)
for vu_collect() and to call vu_pad() at the right point, duplicating
the minimum-size logic across protocols.
Move the Ethernet minimum size enforcement into vu_collect() itself, so
that enough buffer space is always reserved for padding regardless of
the requested frame size.
Rewrite vu_pad() to take a full iovec array and use iov_memset(),
making it safe for multi-buffer (mergeable rx buffer) frames.
In tcp_vu_sock_recv(), replace iov_truncate() with iov_skip_bytes():
now that all consumers receive explicit data lengths, truncating the
iovecs is no longer needed. In tcp_vu_data_from_sock(), cap each
frame's data length against the remaining bytes actually received from
the socket, so that the last partial frame gets correct headers and
sequence number advancement.
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
---
iov.c | 1 -
tcp_vu.c | 29 ++++++++++++++---------------
udp_vu.c | 14 ++++++++------
vu_common.c | 32 +++++++++++++++-----------------
vu_common.h | 2 +-
5 files changed, 38 insertions(+), 40 deletions(-)
diff --git a/iov.c b/iov.c
index 83b683f3976a..2289b425529e 100644
--- a/iov.c
+++ b/iov.c
@@ -180,7 +180,6 @@ size_t iov_truncate(struct iovec *iov, size_t iov_cnt, size_t size)
* Will write less than @length bytes if it runs out of space in
* the iov
*/
-/* cppcheck-suppress unusedFunction */
void iov_memset(const struct iovec *iov, size_t iov_cnt, size_t offset, int c,
size_t length)
{
diff --git a/tcp_vu.c b/tcp_vu.c
index ae79a6d856b0..cae6926334b9 100644
--- a/tcp_vu.c
+++ b/tcp_vu.c
@@ -72,12 +72,12 @@ int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
struct vu_dev *vdev = c->vdev;
struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
struct vu_virtq_element flags_elem[2];
- size_t optlen, hdrlen, l2len;
struct ipv6hdr *ip6h = NULL;
struct iphdr *ip4h = NULL;
struct iovec flags_iov[2];
struct tcp_syn_opts *opts;
struct iov_tail payload;
+ size_t optlen, hdrlen;
struct tcphdr *th;
struct ethhdr *eh;
uint32_t seq;
@@ -88,7 +88,7 @@ int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
elem_cnt = vu_collect(vdev, vq, &flags_elem[0], 1,
&flags_iov[0], 1, NULL,
- MAX(hdrlen + sizeof(*opts), ETH_ZLEN + VNET_HLEN), NULL);
+ hdrlen + sizeof(*opts), NULL);
if (elem_cnt != 1)
return -1;
@@ -128,7 +128,6 @@ int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
return ret;
}
- iov_truncate(&flags_iov[0], 1, hdrlen + optlen);
payload = IOV_TAIL(flags_elem[0].in_sg, 1, hdrlen);
if (flags & KEEPALIVE)
@@ -137,9 +136,7 @@ int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
tcp_fill_headers(c, conn, eh, ip4h, ip6h, th, &payload,
optlen, NULL, seq, !*c->pcap);
- l2len = optlen + hdrlen - VNET_HLEN;
- vu_pad(&flags_elem[0].in_sg[0], l2len);
-
+ vu_pad(flags_elem[0].in_sg, 1, hdrlen + optlen);
vu_flush(vdev, vq, flags_elem, 1, hdrlen + optlen);
if (*c->pcap)
@@ -149,7 +146,7 @@ int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
if (flags & DUP_ACK) {
elem_cnt = vu_collect(vdev, vq, &flags_elem[1], 1,
&flags_iov[1], 1, NULL,
- flags_elem[0].in_sg[0].iov_len, NULL);
+ hdrlen + optlen, NULL);
if (elem_cnt == 1 &&
flags_elem[1].in_sg[0].iov_len >=
flags_elem[0].in_sg[0].iov_len) {
@@ -213,7 +210,7 @@ static ssize_t tcp_vu_sock_recv(const struct ctx *c, struct vu_virtq *vq,
ARRAY_SIZE(elem) - elem_cnt,
&iov_vu[DISCARD_IOV_NUM + iov_used],
VIRTQUEUE_MAX_SIZE - iov_used, &in_total,
- MAX(MIN(mss, fillsize) + hdrlen, ETH_ZLEN + VNET_HLEN),
+ MIN(mss, fillsize) + hdrlen,
&frame_size);
if (cnt == 0)
break;
@@ -249,8 +246,11 @@ static ssize_t tcp_vu_sock_recv(const struct ctx *c, struct vu_virtq *vq,
if (!peek_offset_cap)
ret -= already_sent;
- /* adjust iov number and length of the last iov */
- i = iov_truncate(&iov_vu[DISCARD_IOV_NUM], iov_used, ret);
+ i = iov_skip_bytes(&iov_vu[DISCARD_IOV_NUM], iov_used,
+ MAX(hdrlen + ret, VNET_HLEN + ETH_ZLEN),
+ NULL);
+ if ((size_t)i < iov_used)
+ i++;
/* adjust head count */
while (*head_cnt > 0 && head[*head_cnt - 1] >= i)
@@ -447,11 +447,13 @@ int tcp_vu_data_from_sock(const struct ctx *c, struct tcp_tap_conn *conn)
size_t frame_size = iov_size(iov, buf_cnt);
bool push = i == head_cnt - 1;
ssize_t dlen;
- size_t l2len;
assert(frame_size >= hdrlen);
dlen = frame_size - hdrlen;
+ if (dlen > len)
+ dlen = len;
+ len -= dlen;
/* The IPv4 header checksum varies only with dlen */
if (previous_dlen != dlen)
@@ -460,10 +462,7 @@ int tcp_vu_data_from_sock(const struct ctx *c, struct tcp_tap_conn *conn)
tcp_vu_prepare(c, conn, iov, buf_cnt, dlen, &check, !*c->pcap, push);
- /* Pad first/single buffer only, it's at least ETH_ZLEN long */
- l2len = dlen + hdrlen - VNET_HLEN;
- vu_pad(iov, l2len);
-
+ vu_pad(elem[head[i]].in_sg, buf_cnt, dlen + hdrlen);
vu_flush(vdev, vq, &elem[head[i]], buf_cnt, dlen + hdrlen);
if (*c->pcap)
diff --git a/udp_vu.c b/udp_vu.c
index 4641f42eb5c4..30af64034516 100644
--- a/udp_vu.c
+++ b/udp_vu.c
@@ -65,7 +65,7 @@ static size_t udp_vu_hdrlen(bool v6)
static ssize_t udp_vu_sock_recv(struct iovec *iov, size_t *cnt, int s, bool v6)
{
struct msghdr msg = { 0 };
- size_t hdrlen, l2len;
+ size_t hdrlen, iov_used;
ssize_t dlen;
/* compute L2 header length */
@@ -88,11 +88,12 @@ static ssize_t udp_vu_sock_recv(struct iovec *iov, size_t *cnt, int s, bool v6)
iov[0].iov_base = (char *)iov[0].iov_base - hdrlen;
iov[0].iov_len += hdrlen;
- *cnt = iov_truncate(iov, *cnt, dlen + hdrlen);
-
- /* pad frame to 60 bytes: first buffer is at least ETH_ZLEN long */
- l2len = dlen + hdrlen - VNET_HLEN;
- vu_pad(&iov[0], l2len);
+ iov_used = iov_skip_bytes(iov, *cnt,
+ MAX(dlen + hdrlen, VNET_HLEN + ETH_ZLEN),
+ NULL);
+ if (iov_used < *cnt)
+ iov_used++;
+ *cnt = iov_used; /* one iovec per element */
return dlen;
}
@@ -234,6 +235,7 @@ void udp_vu_sock_to_tap(const struct ctx *c, int s, int n, flow_sidx_t tosidx)
pcap_iov(iov_vu, iov_cnt, VNET_HLEN,
hdrlen + dlen - VNET_HLEN);
}
+ vu_pad(iov_vu, iov_cnt, hdrlen + dlen);
vu_flush(vdev, vq, elem, elem_used, hdrlen + dlen);
vu_queue_notify(vdev, vq);
}
diff --git a/vu_common.c b/vu_common.c
index d371a59a1813..ca0aab369d3c 100644
--- a/vu_common.c
+++ b/vu_common.c
@@ -74,6 +74,7 @@ int vu_collect(const struct vu_dev *vdev, struct vu_virtq *vq,
size_t current_iov = 0;
int elem_cnt = 0;
+ size = MAX(size, ETH_ZLEN + VNET_HLEN); /* Ethernet minimum size */
while (current_size < size && elem_cnt < max_elem &&
current_iov < max_in_sg) {
int ret;
@@ -262,29 +263,27 @@ int vu_send_single(const struct ctx *c, const void *buf, size_t size)
return -1;
}
- size += VNET_HLEN;
elem_cnt = vu_collect(vdev, vq, elem, ARRAY_SIZE(elem), in_sg,
- ARRAY_SIZE(in_sg), &in_total, size, &total);
- if (elem_cnt == 0 || total < size) {
+ ARRAY_SIZE(in_sg), &in_total, VNET_HLEN + size, &total);
+ if (elem_cnt == 0 || total < VNET_HLEN + size) {
debug("vu_send_single: no space to send the data "
"elem_cnt %d size %zu", elem_cnt, total);
goto err;
}
- total -= VNET_HLEN;
-
/* copy data from the buffer to the iovec */
- iov_from_buf(in_sg, in_total, VNET_HLEN, buf, total);
+ iov_from_buf(in_sg, in_total, VNET_HLEN, buf, size);
if (*c->pcap)
pcap_iov(in_sg, in_total, VNET_HLEN, size);
+ vu_pad(in_sg, in_total, VNET_HLEN + size);
vu_flush(vdev, vq, elem, elem_cnt, VNET_HLEN + size);
vu_queue_notify(vdev, vq);
- trace("vhost-user sent %zu", total);
+ trace("vhost-user sent %zu", size);
- return total;
+ return size;
err:
for (i = 0; i < elem_cnt; i++)
vu_queue_detach_element(vq);
@@ -293,15 +292,14 @@ err:
}
/**
- * vu_pad() - Pad 802.3 frame to minimum length (60 bytes) if needed
- * @iov: Buffer in iovec array where end of 802.3 frame is stored
- * @l2len: Layer-2 length already filled in frame
+ * vu_pad() - Pad short frames to minimum Ethernet length and truncate iovec
+ * @iov: Pointer to iovec array
+ * @cnt: Number of entries in @iov
+ * @frame_len: Data length in @iov (including virtio-net header)
*/
-void vu_pad(struct iovec *iov, size_t l2len)
+void vu_pad(const struct iovec *iov, size_t cnt, size_t frame_len)
{
- if (l2len >= ETH_ZLEN)
- return;
-
- memset((char *)iov->iov_base + iov->iov_len, 0, ETH_ZLEN - l2len);
- iov->iov_len += ETH_ZLEN - l2len;
+ if (frame_len < ETH_ZLEN + VNET_HLEN)
+ iov_memset(iov, cnt, frame_len, 0,
+ ETH_ZLEN + VNET_HLEN - frame_len);
}
diff --git a/vu_common.h b/vu_common.h
index 77d1849e6115..51f70084a7cb 100644
--- a/vu_common.h
+++ b/vu_common.h
@@ -44,6 +44,6 @@ void vu_flush(const struct vu_dev *vdev, struct vu_virtq *vq,
void vu_kick_cb(struct vu_dev *vdev, union epoll_ref ref,
const struct timespec *now);
int vu_send_single(const struct ctx *c, const void *buf, size_t size);
-void vu_pad(struct iovec *iov, size_t l2len);
+void vu_pad(const struct iovec *iov, size_t cnt, size_t frame_len);
#endif /* VU_COMMON_H */
--
2.53.0
prev parent reply other threads:[~2026-04-01 19:18 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-01 19:18 [PATCH 00/10] vhost-user: Preparatory series for multiple iovec entries per virtqueue element Laurent Vivier
2026-04-01 19:18 ` [PATCH 01/10] iov: Introduce iov_memset() Laurent Vivier
2026-04-01 19:18 ` [PATCH 02/10] iov: Add iov_memcopy() to copy data between iovec arrays Laurent Vivier
2026-04-01 19:18 ` [PATCH 03/10] vu_common: Move vnethdr setup into vu_flush() Laurent Vivier
2026-04-01 19:18 ` [PATCH 04/10] udp_vu: Move virtqueue management from udp_vu_sock_recv() to its caller Laurent Vivier
2026-04-01 19:18 ` [PATCH 05/10] udp_vu: Pass iov explicitly to helpers instead of using file-scoped array Laurent Vivier
2026-04-01 19:18 ` [PATCH 06/10] checksum: Pass explicit L4 length to checksum functions Laurent Vivier
2026-04-01 19:18 ` [PATCH 07/10] pcap: Pass explicit L2 length to pcap_iov() Laurent Vivier
2026-04-01 19:18 ` [PATCH 08/10] vu_common: Pass explicit frame length to vu_flush() Laurent Vivier
2026-04-01 19:18 ` [PATCH 09/10] tcp: Pass explicit data length to tcp_fill_headers() Laurent Vivier
2026-04-01 19:18 ` Laurent Vivier [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260401191826.1782394-11-lvivier@redhat.com \
--to=lvivier@redhat.com \
--cc=passt-dev@passt.top \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
Code repositories for project(s) associated with this public inbox
https://passt.top/passt
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for IMAP folder(s).