public inbox for passt-dev@passt.top
 help / color / mirror / code / Atom feed
From: Laurent Vivier <lvivier@redhat.com>
To: passt-dev@passt.top
Cc: Laurent Vivier <lvivier@redhat.com>
Subject: [PATCH 21/24] vhost-user: use guest buffer directly in vu_handle_tx()
Date: Fri,  2 Feb 2024 15:11:48 +0100	[thread overview]
Message-ID: <20240202141151.3762941-22-lvivier@redhat.com> (raw)
In-Reply-To: <20240202141151.3762941-1-lvivier@redhat.com>

Check the buffer address is correctly in the mmap'ed memory.

Signed-off-by: Laurent Vivier <lvivier@redhat.com>
---
 packet.c     |  6 +++++
 packet.h     |  2 ++
 tap.c        | 39 +++++++++++++++++++++++++++----
 tap.h        |  1 +
 vhost_user.c | 66 ++++++++++++++++++++++++++++++++--------------------
 5 files changed, 84 insertions(+), 30 deletions(-)

diff --git a/packet.c b/packet.c
index af2a539a1794..3c5fc39df6d7 100644
--- a/packet.c
+++ b/packet.c
@@ -25,6 +25,12 @@
 static int packet_check_range(const struct pool *p, size_t offset, size_t len,
 			      const char *start, const char *func, int line)
 {
+	ASSERT(p->buf);
+
+	if (p->buf_size == 0)
+		return vu_packet_check_range((void *)p->buf, offset, len, start,
+					     func, line);
+
 	if (start < p->buf) {
 		if (func) {
 			trace("add packet start %p before buffer start %p, "
diff --git a/packet.h b/packet.h
index 8377dcf678bb..0aec6d9410aa 100644
--- a/packet.h
+++ b/packet.h
@@ -22,6 +22,8 @@ struct pool {
 	struct iovec pkt[1];
 };
 
+int vu_packet_check_range(void *buf, size_t offset, size_t len,
+			  const char *start, const char *func, int line);
 void packet_add_do(struct pool *p, size_t len, const char *start,
 		   const char *func, int line);
 void *packet_get_do(const struct pool *p, const size_t idx,
diff --git a/tap.c b/tap.c
index c2a917bc00ca..930e48689497 100644
--- a/tap.c
+++ b/tap.c
@@ -626,7 +626,7 @@ resume:
 		if (!eh)
 			continue;
 		if (ntohs(eh->h_proto) == ETH_P_ARP) {
-			PACKET_POOL_P(pkt, 1, in->buf, sizeof(pkt_buf));
+			PACKET_POOL_P(pkt, 1, in->buf, in->buf_size);
 
 			packet_add(pkt, l2_len, (char *)eh);
 			arp(c, pkt);
@@ -656,7 +656,7 @@ resume:
 			continue;
 
 		if (iph->protocol == IPPROTO_ICMP) {
-			PACKET_POOL_P(pkt, 1, in->buf, sizeof(pkt_buf));
+			PACKET_POOL_P(pkt, 1, in->buf, in->buf_size);
 
 			if (c->no_icmp)
 				continue;
@@ -675,7 +675,7 @@ resume:
 			continue;
 
 		if (iph->protocol == IPPROTO_UDP) {
-			PACKET_POOL_P(pkt, 1, in->buf, sizeof(pkt_buf));
+			PACKET_POOL_P(pkt, 1, in->buf, in->buf_size);
 
 			packet_add(pkt, l2_len, (char *)eh);
 			if (dhcp(c, pkt))
@@ -815,7 +815,7 @@ resume:
 		}
 
 		if (proto == IPPROTO_ICMPV6) {
-			PACKET_POOL_P(pkt, 1, in->buf, sizeof(pkt_buf));
+			PACKET_POOL_P(pkt, 1, in->buf, in->buf_size);
 
 			if (c->no_icmp)
 				continue;
@@ -839,7 +839,7 @@ resume:
 		uh = (struct udphdr *)l4h;
 
 		if (proto == IPPROTO_UDP) {
-			PACKET_POOL_P(pkt, 1, in->buf, sizeof(pkt_buf));
+			PACKET_POOL_P(pkt, 1, in->buf, in->buf_size);
 
 			packet_add(pkt, l4_len, l4h);
 
@@ -1291,6 +1291,23 @@ static void tap_sock_tun_init(struct ctx *c)
 	epoll_ctl(c->epollfd, EPOLL_CTL_ADD, c->fd_tap, &ev);
 }
 
+void tap_sock_update_buf(void *base, size_t size)
+{
+	int i;
+
+	pool_tap4_storage.buf = base;
+	pool_tap4_storage.buf_size = size;
+	pool_tap6_storage.buf = base;
+	pool_tap6_storage.buf_size = size;
+
+	for (i = 0; i < TAP_SEQS; i++) {
+		tap4_l4[i].p.buf = base;
+		tap4_l4[i].p.buf_size = size;
+		tap6_l4[i].p.buf = base;
+		tap6_l4[i].p.buf_size = size;
+	}
+}
+
 /**
  * tap_sock_init() - Create and set up AF_UNIX socket or tuntap file descriptor
  * @c:		Execution context
@@ -1302,10 +1319,22 @@ void tap_sock_init(struct ctx *c)
 
 	pool_tap4_storage = PACKET_INIT(pool_tap4, TAP_MSGS, pkt_buf, sz);
 	pool_tap6_storage = PACKET_INIT(pool_tap6, TAP_MSGS, pkt_buf, sz);
+	if (c->mode == MODE_VU) {
+		pool_tap4_storage.buf = NULL;
+		pool_tap4_storage.buf_size = 0;
+		pool_tap6_storage.buf = NULL;
+		pool_tap6_storage.buf_size = 0;
+	}
 
 	for (i = 0; i < TAP_SEQS; i++) {
 		tap4_l4[i].p = PACKET_INIT(pool_l4, UIO_MAXIOV, pkt_buf, sz);
 		tap6_l4[i].p = PACKET_INIT(pool_l4, UIO_MAXIOV, pkt_buf, sz);
+		if (c->mode == MODE_VU) {
+			tap4_l4[i].p.buf = NULL;
+			tap4_l4[i].p.buf_size = 0;
+			tap6_l4[i].p.buf = NULL;
+			tap6_l4[i].p.buf_size = 0;
+		}
 	}
 
 	if (c->fd_tap != -1) { /* Passed as --fd */
diff --git a/tap.h b/tap.h
index ee839d4f09dc..6823c9b32313 100644
--- a/tap.h
+++ b/tap.h
@@ -82,6 +82,7 @@ void tap_handler_pasta(struct ctx *c, uint32_t events,
 void tap_handler_passt(struct ctx *c, uint32_t events,
 		       const struct timespec *now);
 void tap_sock_reset(struct ctx *c);
+void tap_sock_update_buf(void *base, size_t size);
 void tap_sock_init(struct ctx *c);
 void pool_flush_all(void);
 void tap_handler_all(struct ctx *c, const struct timespec *now);
diff --git a/vhost_user.c b/vhost_user.c
index 2acd72398e3a..9cc07c8312c0 100644
--- a/vhost_user.c
+++ b/vhost_user.c
@@ -334,6 +334,25 @@ static bool map_ring(VuDev *vdev, VuVirtq *vq)
 	return !(vq->vring.desc && vq->vring.used && vq->vring.avail);
 }
 
+int vu_packet_check_range(void *buf, size_t offset, size_t len, const char *start,
+			  const char *func, int line)
+{
+	VuDevRegion *dev_region;
+
+	for (dev_region = buf; dev_region->mmap_addr; dev_region++) {
+		if ((char *)dev_region->mmap_addr <= start &&
+		    start + offset + len < (char *)dev_region->mmap_addr +
+					   dev_region->mmap_offset +
+					   dev_region->size)
+			return 0;
+	}
+	if (func) {
+		trace("cannot find region, %s:%i", func, line);
+	}
+
+	return -1;
+}
+
 /*
  * #syscalls:passt mmap munmap
  */
@@ -400,6 +419,12 @@ static bool vu_set_mem_table_exec(VuDev *vdev,
 		}
 	}
 
+	/* XXX */
+	ASSERT(vdev->nregions < VHOST_USER_MAX_RAM_SLOTS - 1);
+	vdev->regions[vdev->nregions].mmap_addr = 0; /* mark EOF for vu_packet_check_range() */
+
+	tap_sock_update_buf(vdev->regions, 0);
+
 	return false;
 }
 
@@ -650,8 +675,8 @@ static void vu_handle_tx(VuDev *vdev, int index)
 	VuVirtq *vq = &vdev->vq[index];
 	int hdrlen = vdev->hdrlen;
 	struct timespec now;
-	char *p;
-	size_t n;
+	unsigned int indexes[VIRTQUEUE_MAX_SIZE];
+	int count;
 
 	if (index % 2 != VHOST_USER_TX_QUEUE) {
 		debug("index %d is not an TX queue", index);
@@ -660,14 +685,11 @@ static void vu_handle_tx(VuDev *vdev, int index)
 
 	clock_gettime(CLOCK_MONOTONIC, &now);
 
-	p = pkt_buf;
-
 	pool_flush_all();
 
+	count = 0;
 	while (1) {
 		VuVirtqElement *elem;
-		unsigned int out_num;
-		struct iovec sg[VIRTQUEUE_MAX_SIZE], *out_sg;
 
 		ASSERT(index == VHOST_USER_TX_QUEUE);
 		elem = vu_queue_pop(vdev, vq, sizeof(VuVirtqElement), buffer[index]);
@@ -675,32 +697,26 @@ static void vu_handle_tx(VuDev *vdev, int index)
 			break;
 		}
 
-		out_num = elem->out_num;
-		out_sg = elem->out_sg;
-		if (out_num < 1) {
+		if (elem->out_num < 1) {
 			debug("virtio-net header not in first element");
 			break;
 		}
+		ASSERT(elem->out_num == 1);
 
-		if (hdrlen) {
-			unsigned sg_num;
-
-			sg_num = iov_copy(sg, ARRAY_SIZE(sg), out_sg, out_num,
-					  hdrlen, -1);
-			out_num = sg_num;
-			out_sg = sg;
-		}
-
-		n = iov_to_buf(out_sg, out_num, 0, p, TAP_BUF_FILL);
-
-		packet_add_all(c, n, p);
-
-		p += n;
+		packet_add_all(c, elem->out_sg[0].iov_len - hdrlen,
+			       (char *)elem->out_sg[0].iov_base + hdrlen);
+		indexes[count] = elem->index;
+		count++;
+	}
+	tap_handler_all(c, &now);
 
-		vu_queue_push(vdev, vq, elem, 0);
+	if (count) {
+		int i;
+		for (i = 0; i < count; i++)
+			vu_queue_fill_by_index(vdev, vq, indexes[i], 0, i);
+		vu_queue_flush(vdev, vq, count);
 		vu_queue_notify(vdev, vq);
 	}
-	tap_handler_all(c, &now);
 }
 
 void vu_kick_cb(struct ctx *c, union epoll_ref ref)
-- 
@@ -334,6 +334,25 @@ static bool map_ring(VuDev *vdev, VuVirtq *vq)
 	return !(vq->vring.desc && vq->vring.used && vq->vring.avail);
 }
 
+int vu_packet_check_range(void *buf, size_t offset, size_t len, const char *start,
+			  const char *func, int line)
+{
+	VuDevRegion *dev_region;
+
+	for (dev_region = buf; dev_region->mmap_addr; dev_region++) {
+		if ((char *)dev_region->mmap_addr <= start &&
+		    start + offset + len < (char *)dev_region->mmap_addr +
+					   dev_region->mmap_offset +
+					   dev_region->size)
+			return 0;
+	}
+	if (func) {
+		trace("cannot find region, %s:%i", func, line);
+	}
+
+	return -1;
+}
+
 /*
  * #syscalls:passt mmap munmap
  */
@@ -400,6 +419,12 @@ static bool vu_set_mem_table_exec(VuDev *vdev,
 		}
 	}
 
+	/* XXX */
+	ASSERT(vdev->nregions < VHOST_USER_MAX_RAM_SLOTS - 1);
+	vdev->regions[vdev->nregions].mmap_addr = 0; /* mark EOF for vu_packet_check_range() */
+
+	tap_sock_update_buf(vdev->regions, 0);
+
 	return false;
 }
 
@@ -650,8 +675,8 @@ static void vu_handle_tx(VuDev *vdev, int index)
 	VuVirtq *vq = &vdev->vq[index];
 	int hdrlen = vdev->hdrlen;
 	struct timespec now;
-	char *p;
-	size_t n;
+	unsigned int indexes[VIRTQUEUE_MAX_SIZE];
+	int count;
 
 	if (index % 2 != VHOST_USER_TX_QUEUE) {
 		debug("index %d is not an TX queue", index);
@@ -660,14 +685,11 @@ static void vu_handle_tx(VuDev *vdev, int index)
 
 	clock_gettime(CLOCK_MONOTONIC, &now);
 
-	p = pkt_buf;
-
 	pool_flush_all();
 
+	count = 0;
 	while (1) {
 		VuVirtqElement *elem;
-		unsigned int out_num;
-		struct iovec sg[VIRTQUEUE_MAX_SIZE], *out_sg;
 
 		ASSERT(index == VHOST_USER_TX_QUEUE);
 		elem = vu_queue_pop(vdev, vq, sizeof(VuVirtqElement), buffer[index]);
@@ -675,32 +697,26 @@ static void vu_handle_tx(VuDev *vdev, int index)
 			break;
 		}
 
-		out_num = elem->out_num;
-		out_sg = elem->out_sg;
-		if (out_num < 1) {
+		if (elem->out_num < 1) {
 			debug("virtio-net header not in first element");
 			break;
 		}
+		ASSERT(elem->out_num == 1);
 
-		if (hdrlen) {
-			unsigned sg_num;
-
-			sg_num = iov_copy(sg, ARRAY_SIZE(sg), out_sg, out_num,
-					  hdrlen, -1);
-			out_num = sg_num;
-			out_sg = sg;
-		}
-
-		n = iov_to_buf(out_sg, out_num, 0, p, TAP_BUF_FILL);
-
-		packet_add_all(c, n, p);
-
-		p += n;
+		packet_add_all(c, elem->out_sg[0].iov_len - hdrlen,
+			       (char *)elem->out_sg[0].iov_base + hdrlen);
+		indexes[count] = elem->index;
+		count++;
+	}
+	tap_handler_all(c, &now);
 
-		vu_queue_push(vdev, vq, elem, 0);
+	if (count) {
+		int i;
+		for (i = 0; i < count; i++)
+			vu_queue_fill_by_index(vdev, vq, indexes[i], 0, i);
+		vu_queue_flush(vdev, vq, count);
 		vu_queue_notify(vdev, vq);
 	}
-	tap_handler_all(c, &now);
 }
 
 void vu_kick_cb(struct ctx *c, union epoll_ref ref)
-- 
2.42.0


  parent reply	other threads:[~2024-02-02 14:11 UTC|newest]

Thread overview: 83+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-02-02 14:11 [PATCH 00/24] Add vhost-user support to passt Laurent Vivier
2024-02-02 14:11 ` [PATCH 01/24] iov: add some functions to manage iovec Laurent Vivier
2024-02-05  5:57   ` David Gibson
2024-02-06 14:28     ` Laurent Vivier
2024-02-07  1:01       ` David Gibson
2024-02-07 10:00         ` Laurent Vivier
2024-02-06 16:10   ` Stefano Brivio
2024-02-07 14:02     ` Laurent Vivier
2024-02-07 14:57       ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 02/24] pcap: add pcap_iov() Laurent Vivier
2024-02-05  6:25   ` David Gibson
2024-02-06 16:10   ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 03/24] checksum: align buffers Laurent Vivier
2024-02-05  6:02   ` David Gibson
2024-02-07  9:01     ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 04/24] checksum: add csum_iov() Laurent Vivier
2024-02-05  6:07   ` David Gibson
2024-02-07  9:02   ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 05/24] util: move IP stuff from util.[ch] to ip.[ch] Laurent Vivier
2024-02-05  6:13   ` David Gibson
2024-02-07  9:03     ` Stefano Brivio
2024-02-08  0:04       ` David Gibson
2024-02-02 14:11 ` [PATCH 06/24] ip: move duplicate IPv4 checksum function to ip.h Laurent Vivier
2024-02-05  6:16   ` David Gibson
2024-02-07 10:40   ` Stefano Brivio
2024-02-07 23:43     ` David Gibson
2024-02-02 14:11 ` [PATCH 07/24] ip: introduce functions to compute the header part checksum for TCP/UDP Laurent Vivier
2024-02-05  6:20   ` David Gibson
2024-02-07 10:41   ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 08/24] tcp: extract buffer management from tcp_send_flag() Laurent Vivier
2024-02-06  0:24   ` David Gibson
2024-02-08 16:57   ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 09/24] tcp: extract buffer management from tcp_conn_tap_mss() Laurent Vivier
2024-02-06  0:47   ` David Gibson
2024-02-08 16:59   ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 10/24] tcp: rename functions that manage buffers Laurent Vivier
2024-02-06  1:48   ` David Gibson
2024-02-08 17:10     ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 11/24] tcp: move buffers management functions to their own file Laurent Vivier
2024-02-02 14:11 ` [PATCH 12/24] tap: make tap_update_mac() generic Laurent Vivier
2024-02-06  1:49   ` David Gibson
2024-02-08 17:10     ` Stefano Brivio
2024-02-09  5:02       ` David Gibson
2024-02-02 14:11 ` [PATCH 13/24] tap: export pool_flush()/tapX_handler()/packet_add() Laurent Vivier
2024-02-02 14:29   ` Laurent Vivier
2024-02-06  1:52   ` David Gibson
2024-02-11 23:15   ` Stefano Brivio
2024-02-12  2:22     ` David Gibson
2024-02-02 14:11 ` [PATCH 14/24] udp: move udpX_l2_buf_t and udpX_l2_mh_sock out of udp_update_hdrX() Laurent Vivier
2024-02-06  1:59   ` David Gibson
2024-02-11 23:16   ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 15/24] udp: rename udp_sock_handler() to udp_buf_sock_handler() Laurent Vivier
2024-02-06  2:14   ` David Gibson
2024-02-11 23:17     ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 16/24] packet: replace struct desc by struct iovec Laurent Vivier
2024-02-06  2:25   ` David Gibson
2024-02-11 23:18     ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 17/24] vhost-user: compare mode MODE_PASTA and not MODE_PASST Laurent Vivier
2024-02-06  2:29   ` David Gibson
2024-02-02 14:11 ` [PATCH 18/24] vhost-user: introduce virtio API Laurent Vivier
2024-02-06  3:51   ` David Gibson
2024-02-11 23:18     ` Stefano Brivio
2024-02-12  2:26       ` David Gibson
2024-02-02 14:11 ` [PATCH 19/24] vhost-user: introduce vhost-user API Laurent Vivier
2024-02-07  2:13   ` David Gibson
2024-02-02 14:11 ` [PATCH 20/24] vhost-user: add vhost-user Laurent Vivier
2024-02-07  2:40   ` David Gibson
2024-02-11 23:19     ` Stefano Brivio
2024-02-12  2:47       ` David Gibson
2024-02-13 15:22         ` Stefano Brivio
2024-02-14  2:05           ` David Gibson
2024-02-11 23:19   ` Stefano Brivio
2024-02-12  2:49     ` David Gibson
2024-02-12 10:02       ` Laurent Vivier
2024-02-12 16:56         ` Stefano Brivio
2024-02-02 14:11 ` Laurent Vivier [this message]
2024-02-09  4:26   ` [PATCH 21/24] vhost-user: use guest buffer directly in vu_handle_tx() David Gibson
2024-02-02 14:11 ` [PATCH 22/24] tcp: vhost-user RX nocopy Laurent Vivier
2024-02-09  4:57   ` David Gibson
2024-02-02 14:11 ` [PATCH 23/24] udp: " Laurent Vivier
2024-02-09  5:00   ` David Gibson
2024-02-02 14:11 ` [PATCH 24/24] vhost-user: remove tap_send_frames_vu() Laurent Vivier
2024-02-09  5:01   ` David Gibson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240202141151.3762941-22-lvivier@redhat.com \
    --to=lvivier@redhat.com \
    --cc=passt-dev@passt.top \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
Code repositories for project(s) associated with this public inbox

	https://passt.top/passt

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for IMAP folder(s).