From: David Gibson <david@gibson.dropbear.id.au>
To: passt-dev@passt.top, Stefano Brivio <sbrivio@redhat.com>
Cc: David Gibson <david@gibson.dropbear.id.au>
Subject: [PATCH v9 14/20] tcp_vu: Share more header construction between IPv4 and IPv6 paths
Date: Mon, 4 Nov 2024 19:39:57 +1100 [thread overview]
Message-ID: <20241104084004.3544294-15-david@gibson.dropbear.id.au> (raw)
In-Reply-To: <20241104084004.3544294-1-david@gibson.dropbear.id.au>
tcp_vu_send_flag() and tcp_vu_prepare() both needs to do some different
things for IPv4 vs. IPv6. However the two paths have a number of lines of
duplicated code. We can share those at the expense of an additional
conditional (which we might be able to simplify again later).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
---
tcp_vu.c | 90 ++++++++++++++++++++++++--------------------------------
1 file changed, 39 insertions(+), 51 deletions(-)
diff --git a/tcp_vu.c b/tcp_vu.c
index c10a269..f779279 100644
--- a/tcp_vu.c
+++ b/tcp_vu.c
@@ -32,6 +32,7 @@
#include "tcp_internal.h"
#include "checksum.h"
#include "vu_common.h"
+#include <time.h>
static struct iovec iov_vu[VIRTQUEUE_MAX_SIZE + 1];
static struct vu_virtq_element elem[VIRTQUEUE_MAX_SIZE];
@@ -94,7 +95,11 @@ int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
const struct flowside *tapside = TAPFLOW(conn);
size_t l2len, l4len, optlen, hdrlen;
+ struct tcp_flags_t *payload;
+ struct ipv6hdr *ip6h = NULL;
+ struct iphdr *iph = NULL;
struct ethhdr *eh;
+ uint32_t seq;
int elem_cnt;
int nb_ack;
int ret;
@@ -117,60 +122,45 @@ int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
memcpy(eh->h_source, c->our_tap_mac, sizeof(eh->h_source));
if (CONN_V4(conn)) {
- struct tcp_flags_t *payload;
- struct iphdr *iph;
- uint32_t seq;
-
eh->h_proto = htons(ETH_P_IP);
iph = vu_ip(iov_vu[0].iov_base);
*iph = (struct iphdr)L2_BUF_IP4_INIT(IPPROTO_TCP);
payload = vu_payloadv4(iov_vu[0].iov_base);
- memset(&payload->th, 0, sizeof(payload->th));
- payload->th.doff = offsetof(struct tcp_flags_t, opts) / 4;
- payload->th.ack = 1;
-
- seq = conn->seq_to_tap;
- ret = tcp_prepare_flags(c, conn, flags, &payload->th,
- &payload->opts, &optlen);
- if (ret <= 0) {
- vu_queue_rewind(vq, 1);
- return ret;
- }
-
- l4len = tcp_fill_headers4(conn, NULL, iph,
- (struct tcp_payload_t *)payload, optlen,
- NULL, seq, true);
- l2len = sizeof(*iph);
} else {
- struct tcp_flags_t *payload;
- struct ipv6hdr *ip6h;
- uint32_t seq;
-
eh->h_proto = htons(ETH_P_IPV6);
ip6h = vu_ip(iov_vu[0].iov_base);
*ip6h = (struct ipv6hdr)L2_BUF_IP6_INIT(IPPROTO_TCP);
payload = vu_payloadv6(iov_vu[0].iov_base);
- memset(&payload->th, 0, sizeof(payload->th));
- payload->th.doff = offsetof(struct tcp_flags_t, opts) / 4;
- payload->th.ack = 1;
-
- seq = conn->seq_to_tap;
- ret = tcp_prepare_flags(c, conn, flags, &payload->th,
- &payload->opts, &optlen);
- if (ret <= 0) {
- vu_queue_rewind(vq, 1);
- return ret;
- }
+ }
+
+ memset(&payload->th, 0, sizeof(payload->th));
+ payload->th.doff = offsetof(struct tcp_flags_t, opts) / 4;
+ payload->th.ack = 1;
+
+ seq = conn->seq_to_tap;
+ ret = tcp_prepare_flags(c, conn, flags, &payload->th,
+ &payload->opts, &optlen);
+ if (ret <= 0) {
+ vu_queue_rewind(vq, 1);
+ return ret;
+ }
+ if (CONN_V4(conn)) {
+ l4len = tcp_fill_headers4(conn, NULL, iph,
+ (struct tcp_payload_t *)payload, optlen,
+ NULL, seq, true);
+ l2len = sizeof(*iph);
+ } else {
l4len = tcp_fill_headers6(conn, NULL, ip6h,
(struct tcp_payload_t *)payload, optlen,
seq, true);
l2len = sizeof(*ip6h);
}
+
l2len += l4len + sizeof(struct ethhdr);
elem[0].in_sg[0].iov_len = l2len +
@@ -284,7 +274,10 @@ static void tcp_vu_prepare(const struct ctx *c,
size_t dlen, const uint16_t **check)
{
const struct flowside *toside = TAPFLOW(conn);
+ struct tcp_payload_t *payload;
char *base = first->iov_base;
+ struct ipv6hdr *ip6h = NULL;
+ struct iphdr *iph = NULL;
struct ethhdr *eh;
/* we guess the first iovec provided by the guest can embed
@@ -297,10 +290,8 @@ static void tcp_vu_prepare(const struct ctx *c,
memcpy(eh->h_source, c->our_tap_mac, sizeof(eh->h_source));
/* initialize header */
- if (inany_v4(&toside->eaddr) && inany_v4(&toside->oaddr)) {
- struct tcp_payload_t *payload;
- struct iphdr *iph;
+ if (inany_v4(&toside->eaddr) && inany_v4(&toside->oaddr)) {
ASSERT(first[0].iov_len >= sizeof(struct virtio_net_hdr_mrg_rxbuf) +
sizeof(struct ethhdr) + sizeof(struct iphdr) +
sizeof(struct tcphdr));
@@ -310,17 +301,7 @@ static void tcp_vu_prepare(const struct ctx *c,
iph = vu_ip(base);
*iph = (struct iphdr)L2_BUF_IP4_INIT(IPPROTO_TCP);
payload = vu_payloadv4(base);
- memset(&payload->th, 0, sizeof(payload->th));
- payload->th.doff = offsetof(struct tcp_payload_t, data) / 4;
- payload->th.ack = 1;
-
- tcp_fill_headers4(conn, NULL, iph, payload, dlen,
- *check, conn->seq_to_tap, true);
- *check = &iph->check;
} else {
- struct tcp_payload_t *payload;
- struct ipv6hdr *ip6h;
-
ASSERT(first[0].iov_len >= sizeof(struct virtio_net_hdr_mrg_rxbuf) +
sizeof(struct ethhdr) + sizeof(struct ipv6hdr) +
sizeof(struct tcphdr));
@@ -331,10 +312,17 @@ static void tcp_vu_prepare(const struct ctx *c,
*ip6h = (struct ipv6hdr)L2_BUF_IP6_INIT(IPPROTO_TCP);
payload = vu_payloadv6(base);
- memset(&payload->th, 0, sizeof(payload->th));
- payload->th.doff = offsetof(struct tcp_payload_t, data) / 4;
- payload->th.ack = 1;
+ }
+ memset(&payload->th, 0, sizeof(payload->th));
+ payload->th.doff = offsetof(struct tcp_payload_t, data) / 4;
+ payload->th.ack = 1;
+
+ if (inany_v4(&toside->eaddr) && inany_v4(&toside->oaddr)) {
+ tcp_fill_headers4(conn, NULL, iph, payload, dlen,
+ *check, conn->seq_to_tap, true);
+ *check = &iph->check;
+ } else {
tcp_fill_headers6(conn, NULL, ip6h, payload, dlen,
conn->seq_to_tap, true);
}
--
@@ -32,6 +32,7 @@
#include "tcp_internal.h"
#include "checksum.h"
#include "vu_common.h"
+#include <time.h>
static struct iovec iov_vu[VIRTQUEUE_MAX_SIZE + 1];
static struct vu_virtq_element elem[VIRTQUEUE_MAX_SIZE];
@@ -94,7 +95,11 @@ int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
const struct flowside *tapside = TAPFLOW(conn);
size_t l2len, l4len, optlen, hdrlen;
+ struct tcp_flags_t *payload;
+ struct ipv6hdr *ip6h = NULL;
+ struct iphdr *iph = NULL;
struct ethhdr *eh;
+ uint32_t seq;
int elem_cnt;
int nb_ack;
int ret;
@@ -117,60 +122,45 @@ int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
memcpy(eh->h_source, c->our_tap_mac, sizeof(eh->h_source));
if (CONN_V4(conn)) {
- struct tcp_flags_t *payload;
- struct iphdr *iph;
- uint32_t seq;
-
eh->h_proto = htons(ETH_P_IP);
iph = vu_ip(iov_vu[0].iov_base);
*iph = (struct iphdr)L2_BUF_IP4_INIT(IPPROTO_TCP);
payload = vu_payloadv4(iov_vu[0].iov_base);
- memset(&payload->th, 0, sizeof(payload->th));
- payload->th.doff = offsetof(struct tcp_flags_t, opts) / 4;
- payload->th.ack = 1;
-
- seq = conn->seq_to_tap;
- ret = tcp_prepare_flags(c, conn, flags, &payload->th,
- &payload->opts, &optlen);
- if (ret <= 0) {
- vu_queue_rewind(vq, 1);
- return ret;
- }
-
- l4len = tcp_fill_headers4(conn, NULL, iph,
- (struct tcp_payload_t *)payload, optlen,
- NULL, seq, true);
- l2len = sizeof(*iph);
} else {
- struct tcp_flags_t *payload;
- struct ipv6hdr *ip6h;
- uint32_t seq;
-
eh->h_proto = htons(ETH_P_IPV6);
ip6h = vu_ip(iov_vu[0].iov_base);
*ip6h = (struct ipv6hdr)L2_BUF_IP6_INIT(IPPROTO_TCP);
payload = vu_payloadv6(iov_vu[0].iov_base);
- memset(&payload->th, 0, sizeof(payload->th));
- payload->th.doff = offsetof(struct tcp_flags_t, opts) / 4;
- payload->th.ack = 1;
-
- seq = conn->seq_to_tap;
- ret = tcp_prepare_flags(c, conn, flags, &payload->th,
- &payload->opts, &optlen);
- if (ret <= 0) {
- vu_queue_rewind(vq, 1);
- return ret;
- }
+ }
+
+ memset(&payload->th, 0, sizeof(payload->th));
+ payload->th.doff = offsetof(struct tcp_flags_t, opts) / 4;
+ payload->th.ack = 1;
+
+ seq = conn->seq_to_tap;
+ ret = tcp_prepare_flags(c, conn, flags, &payload->th,
+ &payload->opts, &optlen);
+ if (ret <= 0) {
+ vu_queue_rewind(vq, 1);
+ return ret;
+ }
+ if (CONN_V4(conn)) {
+ l4len = tcp_fill_headers4(conn, NULL, iph,
+ (struct tcp_payload_t *)payload, optlen,
+ NULL, seq, true);
+ l2len = sizeof(*iph);
+ } else {
l4len = tcp_fill_headers6(conn, NULL, ip6h,
(struct tcp_payload_t *)payload, optlen,
seq, true);
l2len = sizeof(*ip6h);
}
+
l2len += l4len + sizeof(struct ethhdr);
elem[0].in_sg[0].iov_len = l2len +
@@ -284,7 +274,10 @@ static void tcp_vu_prepare(const struct ctx *c,
size_t dlen, const uint16_t **check)
{
const struct flowside *toside = TAPFLOW(conn);
+ struct tcp_payload_t *payload;
char *base = first->iov_base;
+ struct ipv6hdr *ip6h = NULL;
+ struct iphdr *iph = NULL;
struct ethhdr *eh;
/* we guess the first iovec provided by the guest can embed
@@ -297,10 +290,8 @@ static void tcp_vu_prepare(const struct ctx *c,
memcpy(eh->h_source, c->our_tap_mac, sizeof(eh->h_source));
/* initialize header */
- if (inany_v4(&toside->eaddr) && inany_v4(&toside->oaddr)) {
- struct tcp_payload_t *payload;
- struct iphdr *iph;
+ if (inany_v4(&toside->eaddr) && inany_v4(&toside->oaddr)) {
ASSERT(first[0].iov_len >= sizeof(struct virtio_net_hdr_mrg_rxbuf) +
sizeof(struct ethhdr) + sizeof(struct iphdr) +
sizeof(struct tcphdr));
@@ -310,17 +301,7 @@ static void tcp_vu_prepare(const struct ctx *c,
iph = vu_ip(base);
*iph = (struct iphdr)L2_BUF_IP4_INIT(IPPROTO_TCP);
payload = vu_payloadv4(base);
- memset(&payload->th, 0, sizeof(payload->th));
- payload->th.doff = offsetof(struct tcp_payload_t, data) / 4;
- payload->th.ack = 1;
-
- tcp_fill_headers4(conn, NULL, iph, payload, dlen,
- *check, conn->seq_to_tap, true);
- *check = &iph->check;
} else {
- struct tcp_payload_t *payload;
- struct ipv6hdr *ip6h;
-
ASSERT(first[0].iov_len >= sizeof(struct virtio_net_hdr_mrg_rxbuf) +
sizeof(struct ethhdr) + sizeof(struct ipv6hdr) +
sizeof(struct tcphdr));
@@ -331,10 +312,17 @@ static void tcp_vu_prepare(const struct ctx *c,
*ip6h = (struct ipv6hdr)L2_BUF_IP6_INIT(IPPROTO_TCP);
payload = vu_payloadv6(base);
- memset(&payload->th, 0, sizeof(payload->th));
- payload->th.doff = offsetof(struct tcp_payload_t, data) / 4;
- payload->th.ack = 1;
+ }
+ memset(&payload->th, 0, sizeof(payload->th));
+ payload->th.doff = offsetof(struct tcp_payload_t, data) / 4;
+ payload->th.ack = 1;
+
+ if (inany_v4(&toside->eaddr) && inany_v4(&toside->oaddr)) {
+ tcp_fill_headers4(conn, NULL, iph, payload, dlen,
+ *check, conn->seq_to_tap, true);
+ *check = &iph->check;
+ } else {
tcp_fill_headers6(conn, NULL, ip6h, payload, dlen,
conn->seq_to_tap, true);
}
--
2.47.0
next prev parent reply other threads:[~2024-11-04 8:40 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-11-04 8:39 [PATCH v9 00/20] (RFC) Add vhost-user support to passt. (part 3) David Gibson
2024-11-04 8:39 ` [PATCH v9 01/20] packet: replace struct desc by struct iovec David Gibson
2024-11-04 8:39 ` [PATCH v9 02/20] vhost-user: introduce virtio API David Gibson
2024-11-04 8:39 ` [PATCH v9 03/20] vhost-user: introduce vhost-user API David Gibson
2024-11-04 8:39 ` [PATCH v9 04/20] udp: Prepare udp.c to be shared with vhost-user David Gibson
2024-11-04 8:39 ` [PATCH v9 05/20] tcp: Export headers functions David Gibson
2024-11-04 8:39 ` [PATCH v9 06/20] passt: rename tap_sock_init() to tap_backend_init() David Gibson
2024-11-04 8:39 ` [PATCH v9 07/20] vhost-user: add vhost-user David Gibson
2024-11-04 8:39 ` [PATCH v9 08/20] test: Add tests for passt in vhost-user mode David Gibson
2024-11-04 8:39 ` [PATCH v9 09/20] vhost-user: Fix some trivial errors in comments David Gibson
2024-11-04 8:39 ` [PATCH v9 10/20] isolation: Abort if mode is not set David Gibson
2024-11-04 8:39 ` [PATCH v9 11/20] vhost-user: Consistent sense when encoding IP version as boolean David Gibson
2024-11-04 8:39 ` [PATCH v9 12/20] test: Fix trivial errors in two_guests_vu tests David Gibson
2024-11-04 8:39 ` [PATCH v9 13/20] tcp: Use only netinet/tcp.h instead of linux/tcp.h David Gibson
2024-11-04 8:39 ` David Gibson [this message]
2024-11-04 8:39 ` [PATCH v9 15/20] tcp: Move tcp_l2_buf_fill_headers() to tcp_buf.c David Gibson
2024-11-04 8:39 ` [PATCH v9 16/20] tcp: Adjust iov_len before filling headers David Gibson
2024-11-04 8:40 ` [PATCH v9 17/20] tcp: Pass TCP header and payload separately to tcp_update_check_tcp[46]() David Gibson
2024-11-04 8:40 ` [PATCH v9 18/20] tcp: Pass TCP header and payload separately to tcp_fill_headers[46]() David Gibson
2024-11-04 8:40 ` [PATCH v9 19/20] tcp: Merge tcp_update_check_tcp[46]() David Gibson
2024-11-04 8:40 ` [PATCH v9 20/20] tcp: Merge tcp_fill_headers[46]() with each other David Gibson
2024-11-04 9:18 ` [PATCH v9 00/20] (RFC) Add vhost-user support to passt. (part 3) Stefano Brivio
2024-11-04 9:59 ` David Gibson
2024-11-05 1:10 ` David Gibson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241104084004.3544294-15-david@gibson.dropbear.id.au \
--to=david@gibson.dropbear.id.au \
--cc=passt-dev@passt.top \
--cc=sbrivio@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
Code repositories for project(s) associated with this public inbox
https://passt.top/passt
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for IMAP folder(s).