public inbox for passt-dev@passt.top
 help / color / mirror / code / Atom feed
From: Stefano Brivio <sbrivio@redhat.com>
To: Laurent Vivier <lvivier@redhat.com>
Cc: passt-dev@passt.top
Subject: Re: [PATCH v6 7/7] vhost-user: add vhost-user
Date: Wed, 9 Oct 2024 10:23:53 +0200	[thread overview]
Message-ID: <20241009102353.59aa73b7@elisabeth> (raw)
In-Reply-To: <20241007144054.41868-8-lvivier@redhat.com>

[-- Attachment #1: Type: text/plain, Size: 775 bytes --]

On Mon,  7 Oct 2024 16:40:53 +0200
Laurent Vivier <lvivier@redhat.com> wrote:

> add virtio and vhost-user functions to connect with QEMU.
> 
>   $ ./passt --vhost-user
> 
> and
> 
>   # qemu-system-x86_64 ... -m 4G \
>         -object memory-backend-memfd,id=memfd0,share=on,size=4G \
>         -numa node,memdev=memfd0 \
>         -chardev socket,id=chr0,path=/tmp/passt_1.socket \
>         -netdev vhost-user,id=netdev0,chardev=chr0 \
>         -device virtio-net,mac=9a:2b:2c:2d:2e:2f,netdev=netdev0 \
>         ...
> 
> Signed-off-by: Laurent Vivier <lvivier@redhat.com>

This has a trivial conflict with commit ff63ac922a40 ("conf: Add
--dns-host option to configure host side nameserver"), which I had
actually applied meanwhile. Rebased patch attached.

-- 
Stefano

[-- Attachment #2: 0001-vhost-user-add-vhost-user.patch --]
[-- Type: text/x-patch, Size: 56715 bytes --]

From 599bd9da7d8140f22404202e7ec1c044229b292e Mon Sep 17 00:00:00 2001
From: Laurent Vivier <lvivier@redhat.com>
Date: Mon, 7 Oct 2024 16:40:53 +0200
Subject: [PATCH] vhost-user: add vhost-user

add virtio and vhost-user functions to connect with QEMU.

  $ ./passt --vhost-user

and

  # qemu-system-x86_64 ... -m 4G \
        -object memory-backend-memfd,id=memfd0,share=on,size=4G \
        -numa node,memdev=memfd0 \
        -chardev socket,id=chr0,path=/tmp/passt_1.socket \
        -netdev vhost-user,id=netdev0,chardev=chr0 \
        -device virtio-net,mac=9a:2b:2c:2d:2e:2f,netdev=netdev0 \
        ...

Signed-off-by: Laurent Vivier <lvivier@redhat.com>
---
 Makefile     |   6 +-
 conf.c       |  21 ++-
 epoll_type.h |   4 +
 iov.c        |   1 -
 isolation.c  |  15 +-
 packet.c     |  11 ++
 packet.h     |   8 +-
 passt.1      |  10 +-
 passt.c      |   9 +
 passt.h      |   6 +
 pcap.c       |   1 -
 tap.c        |  80 +++++++--
 tap.h        |   5 +-
 tcp.c        |   7 +
 tcp_vu.c     | 476 +++++++++++++++++++++++++++++++++++++++++++++++++++
 tcp_vu.h     |  12 ++
 udp.c        |  10 ++
 udp_vu.c     | 332 +++++++++++++++++++++++++++++++++++
 udp_vu.h     |  13 ++
 vhost_user.c |  38 ++--
 vhost_user.h |   4 +-
 virtio.c     |   5 -
 vu_common.c  | 327 +++++++++++++++++++++++++++++++++++
 vu_common.h  |  47 +++++
 24 files changed, 1392 insertions(+), 56 deletions(-)
 create mode 100644 tcp_vu.c
 create mode 100644 tcp_vu.h
 create mode 100644 udp_vu.c
 create mode 100644 udp_vu.h
 create mode 100644 vu_common.c
 create mode 100644 vu_common.h

diff --git a/Makefile b/Makefile
index 0e8ed60..1e8910d 100644
--- a/Makefile
+++ b/Makefile
@@ -54,7 +54,8 @@ FLAGS += -DDUAL_STACK_SOCKETS=$(DUAL_STACK_SOCKETS)
 PASST_SRCS = arch.c arp.c checksum.c conf.c dhcp.c dhcpv6.c flow.c fwd.c \
 	icmp.c igmp.c inany.c iov.c ip.c isolation.c lineread.c log.c mld.c \
 	ndp.c netlink.c packet.c passt.c pasta.c pcap.c pif.c tap.c tcp.c \
-	tcp_buf.c tcp_splice.c udp.c udp_flow.c util.c vhost_user.c virtio.c
+	tcp_buf.c tcp_splice.c tcp_vu.c udp.c udp_flow.c udp_vu.c util.c \
+	vhost_user.c virtio.c vu_common.c
 QRAP_SRCS = qrap.c
 SRCS = $(PASST_SRCS) $(QRAP_SRCS)
 
@@ -64,7 +65,8 @@ PASST_HEADERS = arch.h arp.h checksum.h conf.h dhcp.h dhcpv6.h flow.h fwd.h \
 	flow_table.h icmp.h icmp_flow.h inany.h iov.h ip.h isolation.h \
 	lineread.h log.h ndp.h netlink.h packet.h passt.h pasta.h pcap.h pif.h \
 	siphash.h tap.h tcp.h tcp_buf.h tcp_conn.h tcp_internal.h tcp_splice.h \
-	udp.h udp_flow.h util.h vhost_user.h virtio.h
+	tcp_vu.h udp.h udp_flow.h udp_internal.h udp_vu.h util.h vhost_user.h \
+	virtio.h vu_common.h
 HEADERS = $(PASST_HEADERS) seccomp.h
 
 C := \#include <linux/tcp.h>\nstruct tcp_info x = { .tcpi_snd_wnd = 0 };
diff --git a/conf.c b/conf.c
index c631019..29d6e41 100644
--- a/conf.c
+++ b/conf.c
@@ -45,6 +45,7 @@
 #include "lineread.h"
 #include "isolation.h"
 #include "log.h"
+#include "vhost_user.h"
 
 /**
  * next_chunk - Return the next piece of a string delimited by a character
@@ -762,9 +763,14 @@ static void usage(const char *name, FILE *f, int status)
 			"    default: same interface name as external one\n");
 	} else {
 		fprintf(f,
-			"  -s, --socket PATH	UNIX domain socket path\n"
+			"  -s, --socket, --socket-path PATH	UNIX domain socket path\n"
 			"    default: probe free path starting from "
 			UNIX_SOCK_PATH "\n", 1);
+		fprintf(f,
+			"  --vhost-user		Enable vhost-user mode\n"
+			"    UNIX domain socket is provided by -s option\n"
+			"  --print-capabilities	print back-end capabilities in JSON format,\n"
+			"    only meaningful for vhost-user mode\n");
 	}
 
 	fprintf(f,
@@ -1290,6 +1296,10 @@ void conf(struct ctx *c, int argc, char **argv)
 		{"map-host-loopback", required_argument, NULL,		21 },
 		{"map-guest-addr", required_argument,	NULL,		22 },
 		{"dns-host",	required_argument,	NULL,		24 },
+		{"vhost-user",	no_argument,		NULL,		25 },
+		/* vhost-user backend program convention */
+		{"print-capabilities", no_argument,	NULL,		26 },
+		{"socket-path",	required_argument,	NULL,		's' },
 		{ 0 },
 	};
 	const char *logname = (c->mode == MODE_PASTA) ? "pasta" : "passt";
@@ -1478,6 +1488,15 @@ void conf(struct ctx *c, int argc, char **argv)
 				break;
 
 			die("Invalid host nameserver address: %s", optarg);
+		case 25:
+			if (c->mode == MODE_PASTA) {
+				err("--vhost-user is for passt mode only");
+				usage(argv[0], stdout, EXIT_SUCCESS);
+			}
+			c->mode = MODE_VU;
+			break;
+		case 26:
+			vu_print_capabilities();
 			break;
 		case 'd':
 			c->debug = 1;
diff --git a/epoll_type.h b/epoll_type.h
index 0ad1efa..f3ef415 100644
--- a/epoll_type.h
+++ b/epoll_type.h
@@ -36,6 +36,10 @@ enum epoll_type {
 	EPOLL_TYPE_TAP_PASST,
 	/* socket listening for qemu socket connections */
 	EPOLL_TYPE_TAP_LISTEN,
+	/* vhost-user command socket */
+	EPOLL_TYPE_VHOST_CMD,
+	/* vhost-user kick event socket */
+	EPOLL_TYPE_VHOST_KICK,
 
 	EPOLL_NUM_TYPES,
 };
diff --git a/iov.c b/iov.c
index 3f9e229..3741db2 100644
--- a/iov.c
+++ b/iov.c
@@ -68,7 +68,6 @@ size_t iov_skip_bytes(const struct iovec *iov, size_t n,
  *
  * Returns:    The number of bytes successfully copied.
  */
-/* cppcheck-suppress unusedFunction */
 size_t iov_from_buf(const struct iovec *iov, size_t iov_cnt,
 		    size_t offset, const void *buf, size_t bytes)
 {
diff --git a/isolation.c b/isolation.c
index 45fba1e..c2a3c7b 100644
--- a/isolation.c
+++ b/isolation.c
@@ -379,12 +379,19 @@ void isolate_postfork(const struct ctx *c)
 
 	prctl(PR_SET_DUMPABLE, 0);
 
-	if (c->mode == MODE_PASTA) {
-		prog.len = (unsigned short)ARRAY_SIZE(filter_pasta);
-		prog.filter = filter_pasta;
-	} else {
+	switch (c->mode) {
+	case MODE_PASST:
 		prog.len = (unsigned short)ARRAY_SIZE(filter_passt);
 		prog.filter = filter_passt;
+		break;
+	case MODE_PASTA:
+		prog.len = (unsigned short)ARRAY_SIZE(filter_pasta);
+		prog.filter = filter_pasta;
+		break;
+	case MODE_VU:
+		prog.len = (unsigned short)ARRAY_SIZE(filter_vu);
+		prog.filter = filter_vu;
+		break;
 	}
 
 	if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) ||
diff --git a/packet.c b/packet.c
index 3748996..e5a78d0 100644
--- a/packet.c
+++ b/packet.c
@@ -36,6 +36,17 @@
 static int packet_check_range(const struct pool *p, size_t offset, size_t len,
 			      const char *start, const char *func, int line)
 {
+	if (p->buf_size == 0) {
+		int ret;
+
+		ret = vu_packet_check_range((void *)p->buf, offset, len, start);
+
+		if (ret == -1)
+			trace("cannot find region, %s:%i", func, line);
+
+		return ret;
+	}
+
 	if (start < p->buf) {
 		trace("packet start %p before buffer start %p, "
 		      "%s:%i", (void *)start, (void *)p->buf, func, line);
diff --git a/packet.h b/packet.h
index 8377dcf..3f70e94 100644
--- a/packet.h
+++ b/packet.h
@@ -8,8 +8,10 @@
 
 /**
  * struct pool - Generic pool of packets stored in a buffer
- * @buf:	Buffer storing packet descriptors
- * @buf_size:	Total size of buffer
+ * @buf:	Buffer storing packet descriptors,
+ * 		a struct vu_dev_region array for passt vhost-user mode
+ * @buf_size:	Total size of buffer,
+ * 		0 for passt vhost-user mode
  * @size:	Number of usable descriptors for the pool
  * @count:	Number of used descriptors for the pool
  * @pkt:	Descriptors: see macros below
@@ -22,6 +24,8 @@ struct pool {
 	struct iovec pkt[1];
 };
 
+int vu_packet_check_range(void *buf, size_t offset, size_t len,
+			  const char *start);
 void packet_add_do(struct pool *p, size_t len, const char *start,
 		   const char *func, int line);
 void *packet_get_do(const struct pool *p, const size_t idx,
diff --git a/passt.1 b/passt.1
index ef33267..96532dd 100644
--- a/passt.1
+++ b/passt.1
@@ -397,12 +397,20 @@ interface address are configured on a given host interface.
 .SS \fBpasst\fR-only options
 
 .TP
-.BR \-s ", " \-\-socket " " \fIpath
+.BR \-s ", " \-\-socket-path ", " \-\-socket " " \fIpath
 Path for UNIX domain socket used by \fBqemu\fR(1) or \fBqrap\fR(1) to connect to
 \fBpasst\fR.
 Default is to probe a free socket, not accepting connections, starting from
 \fI/tmp/passt_1.socket\fR to \fI/tmp/passt_64.socket\fR.
 
+.TP
+.BR \-\-vhost-user
+Enable vhost-user. The vhost-user command socket is provided by \fB--socket\fR.
+
+.TP
+.BR \-\-print-capabilities
+Print back-end capabilities in JSON format, only meaningful for vhost-user mode.
+
 .TP
 .BR \-F ", " \-\-fd " " \fIFD
 Pass a pre-opened, connected socket to \fBpasst\fR. Usually the socket is opened
diff --git a/passt.c b/passt.c
index 79093ee..2d105e8 100644
--- a/passt.c
+++ b/passt.c
@@ -52,6 +52,7 @@
 #include "arch.h"
 #include "log.h"
 #include "tcp_splice.h"
+#include "vu_common.h"
 
 #define EPOLL_EVENTS		8
 
@@ -74,6 +75,8 @@ char *epoll_type_str[] = {
 	[EPOLL_TYPE_TAP_PASTA]		= "/dev/net/tun device",
 	[EPOLL_TYPE_TAP_PASST]		= "connected qemu socket",
 	[EPOLL_TYPE_TAP_LISTEN]		= "listening qemu socket",
+	[EPOLL_TYPE_VHOST_CMD]		= "vhost-user command socket",
+	[EPOLL_TYPE_VHOST_KICK]		= "vhost-user kick socket",
 };
 static_assert(ARRAY_SIZE(epoll_type_str) == EPOLL_NUM_TYPES,
 	      "epoll_type_str[] doesn't match enum epoll_type");
@@ -360,6 +363,12 @@ loop:
 		case EPOLL_TYPE_PING:
 			icmp_sock_handler(&c, ref);
 			break;
+		case EPOLL_TYPE_VHOST_CMD:
+			vu_control_handler(c.vdev, c.fd_tap, eventmask);
+			break;
+		case EPOLL_TYPE_VHOST_KICK:
+			vu_kick_cb(c.vdev, ref, &now);
+			break;
 		default:
 			/* Can't happen */
 			ASSERT(0);
diff --git a/passt.h b/passt.h
index 4908ed9..311482d 100644
--- a/passt.h
+++ b/passt.h
@@ -25,6 +25,8 @@ union epoll_ref;
 #include "fwd.h"
 #include "tcp.h"
 #include "udp.h"
+#include "udp_vu.h"
+#include "vhost_user.h"
 
 /* Default address for our end on the tap interface.  Bit 0 of byte 0 must be 0
  * (unicast) and bit 1 of byte 1 must be 1 (locally administered).  Otherwise
@@ -94,6 +96,7 @@ struct fqdn {
 enum passt_modes {
 	MODE_PASST,
 	MODE_PASTA,
+	MODE_VU,
 };
 
 /**
@@ -228,6 +231,7 @@ struct ip6_ctx {
  * @freebind:		Allow binding of non-local addresses for forwarding
  * @low_wmem:		Low probed net.core.wmem_max
  * @low_rmem:		Low probed net.core.rmem_max
+ * @vdev:		vhost-user device
  */
 struct ctx {
 	enum passt_modes mode;
@@ -289,6 +293,8 @@ struct ctx {
 
 	int low_wmem;
 	int low_rmem;
+
+	struct vu_dev *vdev;
 };
 
 void proto_update_l2_buf(const unsigned char *eth_d,
diff --git a/pcap.c b/pcap.c
index 6ee6cdf..718d6ad 100644
--- a/pcap.c
+++ b/pcap.c
@@ -140,7 +140,6 @@ void pcap_multiple(const struct iovec *iov, size_t frame_parts, unsigned int n,
  * @iovcnt:	Number of buffers (@iov entries)
  * @offset:	Offset of the L2 frame within the full data length
  */
-/* cppcheck-suppress unusedFunction */
 void pcap_iov(const struct iovec *iov, size_t iovcnt, size_t offset)
 {
 	struct timespec now;
diff --git a/tap.c b/tap.c
index 4b826fd..22d19f1 100644
--- a/tap.c
+++ b/tap.c
@@ -58,6 +58,8 @@
 #include "packet.h"
 #include "tap.h"
 #include "log.h"
+#include "vhost_user.h"
+#include "vu_common.h"
 
 /* IPv4 (plus ARP) and IPv6 message batches from tap/guest to IP handlers */
 static PACKET_POOL_NOINIT(pool_tap4, TAP_MSGS, pkt_buf);
@@ -78,16 +80,22 @@ void tap_send_single(const struct ctx *c, const void *data, size_t l2len)
 	struct iovec iov[2];
 	size_t iovcnt = 0;
 
-	if (c->mode == MODE_PASST) {
+	switch (c->mode) {
+	case MODE_PASST:
 		iov[iovcnt] = IOV_OF_LVALUE(vnet_len);
 		iovcnt++;
-	}
-
-	iov[iovcnt].iov_base = (void *)data;
-	iov[iovcnt].iov_len = l2len;
-	iovcnt++;
+		/* fall through */
+	case MODE_PASTA:
+		iov[iovcnt].iov_base = (void *)data;
+		iov[iovcnt].iov_len = l2len;
+		iovcnt++;
 
-	tap_send_frames(c, iov, iovcnt, 1);
+		tap_send_frames(c, iov, iovcnt, 1);
+		break;
+	case MODE_VU:
+		vu_send_single(c, data, l2len);
+		break;
+	}
 }
 
 /**
@@ -414,10 +422,18 @@ size_t tap_send_frames(const struct ctx *c, const struct iovec *iov,
 	if (!nframes)
 		return 0;
 
-	if (c->mode == MODE_PASTA)
+	switch (c->mode) {
+	case MODE_PASTA:
 		m = tap_send_frames_pasta(c, iov, bufs_per_frame, nframes);
-	else
+		break;
+	case MODE_PASST:
 		m = tap_send_frames_passt(c, iov, bufs_per_frame, nframes);
+		break;
+	case MODE_VU:
+		/* fall through */
+	default:
+		ASSERT(0);
+	}
 
 	if (m < nframes)
 		debug("tap: failed to send %zu frames of %zu",
@@ -976,7 +992,7 @@ void tap_add_packet(struct ctx *c, ssize_t l2len, char *p)
  * tap_sock_reset() - Handle closing or failure of connect AF_UNIX socket
  * @c:		Execution context
  */
-static void tap_sock_reset(struct ctx *c)
+void tap_sock_reset(struct ctx *c)
 {
 	info("Client connection closed%s", c->one_off ? ", exiting" : "");
 
@@ -987,6 +1003,8 @@ static void tap_sock_reset(struct ctx *c)
 	epoll_ctl(c->epollfd, EPOLL_CTL_DEL, c->fd_tap, NULL);
 	close(c->fd_tap);
 	c->fd_tap = -1;
+	if (c->mode == MODE_VU)
+		vu_cleanup(c->vdev);
 }
 
 /**
@@ -1205,6 +1223,11 @@ static void tap_backend_show_hints(struct ctx *c)
 		info("or qrap, for earlier qemu versions:");
 		info("    ./qrap 5 kvm ... -net socket,fd=5 -net nic,model=virtio");
 		break;
+	case MODE_VU:
+		info("You can start qemu with:");
+		info("    kvm ... -chardev socket,id=chr0,path=%s -netdev vhost-user,id=netdev0,chardev=chr0 -device virtio-net,netdev=netdev0 -object memory-backend-memfd,id=memfd0,share=on,size=$RAMSIZE -numa node,memdev=memfd0\n",
+		     c->sock_path);
+		break;
 	}
 }
 
@@ -1232,8 +1255,8 @@ static void tap_sock_unix_init(const struct ctx *c)
  */
 void tap_listen_handler(struct ctx *c, uint32_t events)
 {
-	union epoll_ref ref = { .type = EPOLL_TYPE_TAP_PASST };
 	struct epoll_event ev = { 0 };
+	union epoll_ref ref = { 0 };
 	int v = INT_MAX / 2;
 	struct ucred ucred;
 	socklen_t len;
@@ -1273,6 +1296,10 @@ void tap_listen_handler(struct ctx *c, uint32_t events)
 		trace("tap: failed to set SO_SNDBUF to %i", v);
 
 	ref.fd = c->fd_tap;
+	if (c->mode == MODE_VU)
+		ref.type = EPOLL_TYPE_VHOST_CMD;
+	else
+		ref.type = EPOLL_TYPE_TAP_PASST;
 	ev.events = EPOLLIN | EPOLLRDHUP;
 	ev.data.u64 = ref.u64;
 	epoll_ctl(c->epollfd, EPOLL_CTL_ADD, c->fd_tap, &ev);
@@ -1339,7 +1366,7 @@ static void tap_sock_tun_init(struct ctx *c)
  * @base:	Buffer base
  * @size	Buffer size
  */
-static void tap_sock_update_pool(void *base, size_t size)
+void tap_sock_update_pool(void *base, size_t size)
 {
 	int i;
 
@@ -1353,13 +1380,15 @@ static void tap_sock_update_pool(void *base, size_t size)
 }
 
 /**
- * tap_backend_init() - Create and set up AF_UNIX socket or
- *			tuntap file descriptor
+ * tap_sock_init() - Create and set up AF_UNIX socket or tuntap file descriptor
  * @c:		Execution context
  */
 void tap_backend_init(struct ctx *c)
 {
-	tap_sock_update_pool(pkt_buf, sizeof(pkt_buf));
+	if (c->mode == MODE_VU)
+		tap_sock_update_pool(NULL, 0);
+	else
+		tap_sock_update_pool(pkt_buf, sizeof(pkt_buf));
 
 	if (c->fd_tap != -1) { /* Passed as --fd */
 		struct epoll_event ev = { 0 };
@@ -1367,10 +1396,17 @@ void tap_backend_init(struct ctx *c)
 
 		ASSERT(c->one_off);
 		ref.fd = c->fd_tap;
-		if (c->mode == MODE_PASST)
+		switch (c->mode) {
+		case MODE_PASST:
 			ref.type = EPOLL_TYPE_TAP_PASST;
-		else
+			break;
+		case MODE_PASTA:
 			ref.type = EPOLL_TYPE_TAP_PASTA;
+			break;
+		case MODE_VU:
+			ref.type = EPOLL_TYPE_VHOST_CMD;
+			break;
+		}
 
 		ev.events = EPOLLIN | EPOLLRDHUP;
 		ev.data.u64 = ref.u64;
@@ -1378,9 +1414,14 @@ void tap_backend_init(struct ctx *c)
 		return;
 	}
 
-	if (c->mode == MODE_PASTA) {
+	switch (c->mode) {
+	case MODE_PASTA:
 		tap_sock_tun_init(c);
-	} else {
+		break;
+	case MODE_VU:
+		vu_init(c);
+		/* fall through */
+	case MODE_PASST:
 		tap_sock_unix_init(c);
 
 		/* In passt mode, we don't know the guest's MAC address until it
@@ -1388,6 +1429,7 @@ void tap_backend_init(struct ctx *c)
 		 * first packets will reach it.
 		 */
 		memset(&c->guest_mac, 0xff, sizeof(c->guest_mac));
+		break;
 	}
 
 	tap_backend_show_hints(c);
diff --git a/tap.h b/tap.h
index 8728cc5..dfbd8b9 100644
--- a/tap.h
+++ b/tap.h
@@ -40,7 +40,8 @@ static inline struct iovec tap_hdr_iov(const struct ctx *c,
  */
 static inline void tap_hdr_update(struct tap_hdr *thdr, size_t l2len)
 {
-	thdr->vnet_len = htonl(l2len);
+	if (thdr)
+		thdr->vnet_len = htonl(l2len);
 }
 
 void tap_udp4_send(const struct ctx *c, struct in_addr src, in_port_t sport,
@@ -68,6 +69,8 @@ void tap_handler_pasta(struct ctx *c, uint32_t events,
 void tap_handler_passt(struct ctx *c, uint32_t events,
 		       const struct timespec *now);
 int tap_sock_unix_open(char *sock_path);
+void tap_sock_reset(struct ctx *c);
+void tap_sock_update_pool(void *base, size_t size);
 void tap_backend_init(struct ctx *c);
 void tap_flush_pools(void);
 void tap_handler(struct ctx *c, const struct timespec *now);
diff --git a/tcp.c b/tcp.c
index eae02b1..fd2def0 100644
--- a/tcp.c
+++ b/tcp.c
@@ -304,6 +304,7 @@
 #include "flow_table.h"
 #include "tcp_internal.h"
 #include "tcp_buf.h"
+#include "tcp_vu.h"
 
 /* MSS rounding: see SET_MSS() */
 #define MSS_DEFAULT			536
@@ -1328,6 +1329,9 @@ int tcp_prepare_flags(const struct ctx *c, struct tcp_tap_conn *conn,
 static int tcp_send_flag(const struct ctx *c, struct tcp_tap_conn *conn,
 			 int flags)
 {
+	if (c->mode == MODE_VU)
+		return tcp_vu_send_flag(c, conn, flags);
+
 	return tcp_buf_send_flag(c, conn, flags);
 }
 
@@ -1721,6 +1725,9 @@ static int tcp_sock_consume(const struct tcp_tap_conn *conn, uint32_t ack_seq)
  */
 static int tcp_data_from_sock(const struct ctx *c, struct tcp_tap_conn *conn)
 {
+	if (c->mode == MODE_VU)
+		return tcp_vu_data_from_sock(c, conn);
+
 	return tcp_buf_data_from_sock(c, conn);
 }
 
diff --git a/tcp_vu.c b/tcp_vu.c
new file mode 100644
index 0000000..b903a0c
--- /dev/null
+++ b/tcp_vu.c
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* tcp_vu.c - TCP L2 vhost-user management functions
+ *
+ * Copyright Red Hat
+ * Author: Laurent Vivier <lvivier@redhat.com>
+ */
+
+#include <errno.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <netinet/ip.h>
+
+#include <sys/socket.h>
+
+#include <linux/tcp.h>
+#include <linux/virtio_net.h>
+
+#include "util.h"
+#include "ip.h"
+#include "passt.h"
+#include "siphash.h"
+#include "inany.h"
+#include "vhost_user.h"
+#include "tcp.h"
+#include "pcap.h"
+#include "flow.h"
+#include "tcp_conn.h"
+#include "flow_table.h"
+#include "tcp_vu.h"
+#include "tap.h"
+#include "tcp_internal.h"
+#include "checksum.h"
+#include "vu_common.h"
+
+static struct iovec iov_vu[VIRTQUEUE_MAX_SIZE + 1];
+static struct vu_virtq_element elem[VIRTQUEUE_MAX_SIZE];
+
+/**
+ * tcp_vu_l2_hdrlen() - return the size of the header in level 2 frame (TDP)
+ * @v6:		Set for IPv6 packet
+ *
+ * Return: Return the size of the header
+ */
+static size_t tcp_vu_l2_hdrlen(bool v6)
+{
+	size_t l2_hdrlen;
+
+	l2_hdrlen = sizeof(struct ethhdr) + sizeof(struct tcphdr);
+
+	if (v6)
+		l2_hdrlen += sizeof(struct ipv6hdr);
+	else
+		l2_hdrlen += sizeof(struct iphdr);
+
+	return l2_hdrlen;
+}
+
+/**
+ * tcp_vu_update_check() - Calculate TCP checksum
+ * @tapside:	Address information for one side of the flow
+ * @iov:	Pointer to the array of IO vectors
+ * @iov_used:	Length of the array
+ */
+static void tcp_vu_update_check(const struct flowside *tapside,
+			        struct iovec *iov, int iov_used)
+{
+	char *base = iov[0].iov_base;
+
+	if (inany_v4(&tapside->oaddr)) {
+		const struct iphdr *iph = vu_ip(base);
+
+		tcp_update_check_tcp4(iph, iov, iov_used,
+				      (char *)vu_payloadv4(base) - base);
+	} else {
+		const struct ipv6hdr *ip6h = vu_ip(base);
+
+		tcp_update_check_tcp6(ip6h, iov, iov_used,
+				      (char *)vu_payloadv6(base) - base);
+	}
+}
+
+/**
+ * tcp_vu_send_flag() - Send segment with flags to vhost-user (no payload)
+ * @c:		Execution context
+ * @conn:	Connection pointer
+ * @flags:	TCP flags: if not set, send segment only if ACK is due
+ *
+ * Return: negative error code on connection reset, 0 otherwise
+ */
+int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
+{
+	struct vu_dev *vdev = c->vdev;
+	struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
+	const struct flowside *tapside = TAPFLOW(conn);
+	size_t l2len, l4len, optlen, hdrlen;
+	struct ethhdr *eh;
+	int elem_cnt;
+	int nb_ack;
+	int ret;
+
+	hdrlen = tcp_vu_l2_hdrlen(CONN_V6(conn));
+
+	vu_init_elem(elem, iov_vu, 2);
+
+	elem_cnt = vu_collect_one_frame(vdev, vq, elem, 1,
+					hdrlen + OPT_MSS_LEN + OPT_WS_LEN + 1,
+					0);
+	if (elem_cnt < 1)
+		return 0;
+
+	vu_set_vnethdr(vdev, &iov_vu[0], 1, 0);
+
+	eh = vu_eth(iov_vu[0].iov_base);
+
+	memcpy(eh->h_dest, c->guest_mac, sizeof(eh->h_dest));
+	memcpy(eh->h_source, c->our_tap_mac, sizeof(eh->h_source));
+
+	if (CONN_V4(conn)) {
+		struct tcp_payload_t *payload;
+		struct iphdr *iph;
+		uint32_t seq;
+
+		eh->h_proto = htons(ETH_P_IP);
+
+		iph = vu_ip(iov_vu[0].iov_base);
+		*iph = (struct iphdr)L2_BUF_IP4_INIT(IPPROTO_TCP);
+
+		payload = vu_payloadv4(iov_vu[0].iov_base);
+		memset(&payload->th, 0, sizeof(payload->th));
+		payload->th.doff = offsetof(struct tcp_flags_t, opts) / 4;
+		payload->th.ack = 1;
+
+		seq = conn->seq_to_tap;
+		ret = tcp_prepare_flags(c, conn, flags, &payload->th,
+					(char *)payload->data, &optlen);
+		if (ret <= 0) {
+			vu_queue_rewind(vq, 1);
+			return ret;
+		}
+
+		l4len = tcp_fill_headers4(conn, NULL, iph, payload, optlen,
+					  NULL, seq, true);
+		l2len = sizeof(*iph);
+	} else {
+		struct tcp_payload_t *payload;
+		struct ipv6hdr *ip6h;
+		uint32_t seq;
+
+		eh->h_proto = htons(ETH_P_IPV6);
+
+		ip6h = vu_ip(iov_vu[0].iov_base);
+		*ip6h = (struct ipv6hdr)L2_BUF_IP6_INIT(IPPROTO_TCP);
+
+		payload = vu_payloadv6(iov_vu[0].iov_base);
+		memset(&payload->th, 0, sizeof(payload->th));
+		payload->th.doff = offsetof(struct tcp_flags_t, opts) / 4;
+		payload->th.ack = 1;
+
+		seq = conn->seq_to_tap;
+		ret = tcp_prepare_flags(c, conn, flags, &payload->th,
+					(char *)payload->data, &optlen);
+		if (ret <= 0) {
+			vu_queue_rewind(vq, 1);
+			return ret;
+		}
+
+		l4len = tcp_fill_headers6(conn, NULL, ip6h, payload, optlen,
+					  seq, true);
+		l2len = sizeof(*ip6h);
+	}
+	l2len += l4len + sizeof(struct ethhdr);
+
+	elem[0].in_sg[0].iov_len = l2len +
+				   sizeof(struct virtio_net_hdr_mrg_rxbuf);
+	if (*c->pcap) {
+		tcp_vu_update_check(tapside, &elem[0].in_sg[0], 1);
+		pcap_iov(&elem[0].in_sg[0], 1,
+			 sizeof(struct virtio_net_hdr_mrg_rxbuf));
+	}
+	nb_ack = 1;
+
+	if (flags & DUP_ACK) {
+		elem_cnt = vu_collect_one_frame(vdev, vq, &elem[1], 1, l2len,
+						0);
+		if (elem_cnt == 1) {
+			memcpy(elem[1].in_sg[0].iov_base,
+			       elem[0].in_sg[0].iov_base, l2len);
+			vu_set_vnethdr(vdev, &elem[1].in_sg[0], 1, 0);
+			nb_ack++;
+
+			if (*c->pcap)
+				pcap_iov(&elem[1].in_sg[0], 1, 0);
+		}
+	}
+
+	vu_flush(vdev, vq, elem, nb_ack);
+
+	return 0;
+}
+
+/** tcp_vu_sock_recv() - Receive datastream from socket into vhost-user buffers
+ * @c:		Execution context
+ * @conn:	Connection pointer
+ * @v4:		Set for IPv4 connections
+ * @fillsize:	Number of bytes we can receive
+ * @datalen:	Size of received data (output)
+ *
+ * Return: Number of iov entries used to store the data
+ */
+static ssize_t tcp_vu_sock_recv(const struct ctx *c,
+				struct tcp_tap_conn *conn, bool v4,
+				size_t fillsize, ssize_t *dlen)
+{
+	struct vu_dev *vdev = c->vdev;
+	struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
+	struct msghdr mh_sock = { 0 };
+	uint16_t mss = MSS_GET(conn);
+	int s = conn->sock;
+	size_t l2_hdrlen;
+	int elem_cnt;
+	ssize_t ret;
+
+	*dlen = 0;
+
+	l2_hdrlen = tcp_vu_l2_hdrlen(!v4);
+
+	vu_init_elem(elem, &iov_vu[1], VIRTQUEUE_MAX_SIZE);
+
+	elem_cnt = vu_collect(vdev, vq, elem, VIRTQUEUE_MAX_SIZE, mss,
+			      l2_hdrlen, fillsize);
+	if (elem_cnt < 0) {
+		tcp_rst(c, conn);
+		return -ENOMEM;
+	}
+
+	mh_sock.msg_iov = iov_vu;
+	mh_sock.msg_iovlen = elem_cnt + 1;
+
+	do
+		ret = recvmsg(s, &mh_sock, MSG_PEEK);
+	while (ret < 0 && errno == EINTR);
+
+	if (ret < 0) {
+		vu_queue_rewind(vq, elem_cnt);
+		if (errno != EAGAIN && errno != EWOULDBLOCK) {
+			ret = -errno;
+			tcp_rst(c, conn);
+		}
+		return ret;
+	}
+	if (!ret) {
+		vu_queue_rewind(vq, elem_cnt);
+
+		if ((conn->events & (SOCK_FIN_RCVD | TAP_FIN_SENT)) == SOCK_FIN_RCVD) {
+			int retf = tcp_vu_send_flag(c, conn, FIN | ACK);
+			if (retf) {
+				tcp_rst(c, conn);
+				return retf;
+			}
+
+			conn_event(c, conn, TAP_FIN_SENT);
+		}
+		return 0;
+	}
+
+	*dlen = ret;
+
+	return elem_cnt;
+}
+
+/**
+ * tcp_vu_prepare() - Prepare the packet header
+ * @c:		Execution context
+ * @conn:	Connection pointer
+ * @first:	Pointer to the array of IO vectors
+ * @dlen:	Packet data length
+ * @check:	Checksum, if already known
+ */
+static void tcp_vu_prepare(const struct ctx *c,
+			   struct tcp_tap_conn *conn, struct iovec *first,
+			   size_t dlen, const uint16_t **check)
+{
+	const struct flowside *toside = TAPFLOW(conn);
+	char *base = first->iov_base;
+	struct ethhdr *eh;
+
+	/* we guess the first iovec provided by the guest can embed
+	 * all the headers needed by L2 frame
+	 */
+
+	eh = vu_eth(base);
+
+	memcpy(eh->h_dest, c->guest_mac, sizeof(eh->h_dest));
+	memcpy(eh->h_source, c->our_tap_mac, sizeof(eh->h_source));
+
+	/* initialize header */
+	if (inany_v4(&toside->eaddr) && inany_v4(&toside->oaddr)) {
+		struct tcp_payload_t *payload;
+		struct iphdr *iph;
+
+		ASSERT(first[0].iov_len >= sizeof(struct virtio_net_hdr_mrg_rxbuf) +
+		       sizeof(struct ethhdr) + sizeof(struct iphdr) +
+		       sizeof(struct tcphdr));
+
+		eh->h_proto = htons(ETH_P_IP);
+
+		iph = vu_ip(base);
+		*iph = (struct iphdr)L2_BUF_IP4_INIT(IPPROTO_TCP);
+		payload = vu_payloadv4(base);
+		memset(&payload->th, 0, sizeof(payload->th));
+		payload->th.doff = offsetof(struct tcp_payload_t, data) / 4;
+		payload->th.ack = 1;
+
+		tcp_fill_headers4(conn, NULL, iph, payload, dlen,
+				  *check, conn->seq_to_tap, true);
+		*check = &iph->check;
+	} else {
+		struct tcp_payload_t *payload;
+		struct ipv6hdr *ip6h;
+
+		ASSERT(first[0].iov_len >= sizeof(struct virtio_net_hdr_mrg_rxbuf) +
+		       sizeof(struct ethhdr) + sizeof(struct ipv6hdr) +
+		       sizeof(struct tcphdr));
+
+		eh->h_proto = htons(ETH_P_IPV6);
+
+		ip6h = vu_ip(base);
+		*ip6h = (struct ipv6hdr)L2_BUF_IP6_INIT(IPPROTO_TCP);
+
+		payload = vu_payloadv6(base);
+		memset(&payload->th, 0, sizeof(payload->th));
+		payload->th.doff = offsetof(struct tcp_payload_t, data) / 4;
+		payload->th.ack = 1;
+
+		tcp_fill_headers6(conn, NULL, ip6h, payload, dlen,
+				  conn->seq_to_tap, true);
+	}
+}
+
+/**
+ * tcp_vu_data_from_sock() - Handle new data from socket, queue to vhost-user,
+ *			     in window
+ * @c:		Execution context
+ * @conn:	Connection pointer
+ *
+ * Return: Negative on connection reset, 0 otherwise
+ */
+int tcp_vu_data_from_sock(const struct ctx *c, struct tcp_tap_conn *conn)
+{
+	uint32_t wnd_scaled = conn->wnd_from_tap << conn->ws_from_tap;
+	struct vu_dev *vdev = c->vdev;
+	struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
+	const struct flowside *tapside = TAPFLOW(conn);
+	uint16_t mss = MSS_GET(conn);
+	size_t l2_hdrlen, fillsize;
+	int i, iov_cnt, iov_used;
+	int v4 = CONN_V4(conn);
+	uint32_t already_sent = 0;
+	const uint16_t *check;
+	struct iovec *first;
+	int frame_size;
+	int num_buffers;
+	ssize_t len;
+
+	if (!vu_queue_enabled(vq) || !vu_queue_started(vq)) {
+		flow_err(conn,
+			 "Got packet, but RX virtqueue not usable yet");
+		return 0;
+	}
+
+	already_sent = conn->seq_to_tap - conn->seq_ack_from_tap;
+
+	if (SEQ_LT(already_sent, 0)) {
+		/* RFC 761, section 2.1. */
+		flow_trace(conn, "ACK sequence gap: ACK for %u, sent: %u",
+			   conn->seq_ack_from_tap, conn->seq_to_tap);
+		conn->seq_to_tap = conn->seq_ack_from_tap;
+		already_sent = 0;
+	}
+
+	if (!wnd_scaled || already_sent >= wnd_scaled) {
+		conn_flag(c, conn, STALLED);
+		conn_flag(c, conn, ACK_FROM_TAP_DUE);
+		return 0;
+	}
+
+	/* Set up buffer descriptors we'll fill completely and partially. */
+
+	fillsize = wnd_scaled;
+
+	if (peek_offset_cap)
+		already_sent = 0;
+
+	iov_vu[0].iov_base = tcp_buf_discard;
+	iov_vu[0].iov_len = already_sent;
+	fillsize -= already_sent;
+
+	/* collect the buffers from vhost-user and fill them with the
+	 * data from the socket
+	 */
+	iov_cnt = tcp_vu_sock_recv(c, conn, v4, fillsize, &len);
+	if (iov_cnt <= 0)
+		return iov_cnt;
+
+	len -= already_sent;
+	if (len <= 0) {
+		conn_flag(c, conn, STALLED);
+		vu_queue_rewind(vq, iov_cnt);
+		return 0;
+	}
+
+	conn_flag(c, conn, ~STALLED);
+
+	/* Likely, some new data was acked too. */
+	tcp_update_seqack_wnd(c, conn, 0, NULL);
+
+	/* initialize headers */
+	l2_hdrlen = tcp_vu_l2_hdrlen(!v4);
+	iov_used = 0;
+	num_buffers = 0;
+	check = NULL;
+	frame_size = 0;
+
+	/* iov_vu is an array of buffers and the buffer size can be
+	 * smaller than the frame size we want to use but with
+	 * num_buffer we can merge several virtio iov buffers in one packet
+	 * we need only to set the packet headers in the first iov and
+	 * num_buffer to the number of iov entries
+	 */
+	for (i = 0; i < iov_cnt && len; i++) {
+
+		if (frame_size == 0)
+			first = &iov_vu[i + 1];
+
+		if (iov_vu[i + 1].iov_len > (size_t)len)
+			iov_vu[i + 1].iov_len = len;
+
+		len -= iov_vu[i + 1].iov_len;
+		iov_used++;
+
+		frame_size += iov_vu[i + 1].iov_len;
+		num_buffers++;
+
+		if (frame_size >= mss || len == 0 ||
+		    i + 1 == iov_cnt || !vu_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
+			if (i + 1 == iov_cnt)
+				check = NULL;
+
+			/* restore first iovec base: point to vnet header */
+			vu_set_vnethdr(vdev, first, num_buffers, l2_hdrlen);
+
+			tcp_vu_prepare(c, conn, first, frame_size, &check);
+			if (*c->pcap)  {
+				tcp_vu_update_check(tapside, first, num_buffers);
+				pcap_iov(first, num_buffers,
+					 sizeof(struct virtio_net_hdr_mrg_rxbuf));
+			}
+
+			conn->seq_to_tap += frame_size;
+
+			frame_size = 0;
+			num_buffers = 0;
+		}
+	}
+
+	/* release unused buffers */
+	vu_queue_rewind(vq, iov_cnt - iov_used);
+
+	/* send packets */
+	vu_flush(vdev, vq, elem, iov_used);
+
+	conn_flag(c, conn, ACK_FROM_TAP_DUE);
+
+	return 0;
+}
diff --git a/tcp_vu.h b/tcp_vu.h
new file mode 100644
index 0000000..6ab6057
--- /dev/null
+++ b/tcp_vu.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright Red Hat
+ * Author: Laurent Vivier <lvivier@redhat.com>
+ */
+
+#ifndef TCP_VU_H
+#define TCP_VU_H
+
+int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags);
+int tcp_vu_data_from_sock(const struct ctx *c, struct tcp_tap_conn *conn);
+
+#endif  /*TCP_VU_H */
diff --git a/udp.c b/udp.c
index 8fc5d80..1171d9d 100644
--- a/udp.c
+++ b/udp.c
@@ -628,6 +628,11 @@ void udp_listen_sock_handler(const struct ctx *c,
 			     union epoll_ref ref, uint32_t events,
 			     const struct timespec *now)
 {
+	if (c->mode == MODE_VU) {
+		udp_vu_listen_sock_handler(c, ref, events, now);
+		return;
+	}
+
 	udp_buf_listen_sock_handler(c, ref, events, now);
 }
 
@@ -697,6 +702,11 @@ static void udp_buf_reply_sock_handler(const struct ctx *c, union epoll_ref ref,
 void udp_reply_sock_handler(const struct ctx *c, union epoll_ref ref,
 			    uint32_t events, const struct timespec *now)
 {
+	if (c->mode == MODE_VU) {
+		udp_vu_reply_sock_handler(c, ref, events, now);
+		return;
+	}
+
 	udp_buf_reply_sock_handler(c, ref, events, now);
 }
 
diff --git a/udp_vu.c b/udp_vu.c
new file mode 100644
index 0000000..04fabf5
--- /dev/null
+++ b/udp_vu.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* udp_vu.c - UDP L2 vhost-user management functions
+ *
+ * Copyright Red Hat
+ * Author: Laurent Vivier <lvivier@redhat.com>
+ */
+
+#include <unistd.h>
+#include <assert.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/udp.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <sys/uio.h>
+#include <linux/virtio_net.h>
+
+#include "checksum.h"
+#include "util.h"
+#include "ip.h"
+#include "siphash.h"
+#include "inany.h"
+#include "passt.h"
+#include "pcap.h"
+#include "log.h"
+#include "vhost_user.h"
+#include "udp_internal.h"
+#include "flow.h"
+#include "flow_table.h"
+#include "udp_flow.h"
+#include "udp_vu.h"
+#include "vu_common.h"
+
+static struct iovec     iov_vu		[VIRTQUEUE_MAX_SIZE];
+static struct vu_virtq_element	elem		[VIRTQUEUE_MAX_SIZE];
+
+/**
+ * udp_vu_l2_hdrlen() - return the size of the header in level 2 frame (UDP)
+ * @v6:		Set for IPv6 packet
+ *
+ * Return: Return the size of the header
+ */
+static size_t udp_vu_l2_hdrlen(bool v6)
+{
+	size_t l2_hdrlen;
+
+	l2_hdrlen = sizeof(struct ethhdr) + sizeof(struct udphdr);
+
+	if (v6)
+		l2_hdrlen += sizeof(struct ipv6hdr);
+	else
+		l2_hdrlen += sizeof(struct iphdr);
+
+	return l2_hdrlen;
+}
+
+static int udp_vu_sock_init(int s, union sockaddr_inany *s_in)
+{
+	struct msghdr msg = {
+		.msg_name = s_in,
+		.msg_namelen = sizeof(union sockaddr_inany),
+	};
+
+	return recvmsg(s, &msg, MSG_PEEK | MSG_DONTWAIT);
+}
+
+/**
+ * udp_vu_sock_recv() - Receive datagrams from socket into vhost-user buffers
+ * @c:		Execution context
+ * @s:		Socket to receive from
+ * @events:	epoll events bitmap
+ * @v6:		Set for IPv6 connections
+ * @datalen:	Size of received data (output)
+ *
+ * Return: Number of iov entries used to store the datagram
+ */
+static int udp_vu_sock_recv(const struct ctx *c, int s, uint32_t events,
+			    bool v6, ssize_t *dlen)
+{
+	struct vu_dev *vdev = c->vdev;
+	struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
+	int max_elem, iov_cnt, idx, iov_used;
+	struct msghdr msg  = { 0 };
+	size_t off, l2_hdrlen;
+
+	ASSERT(!c->no_udp);
+
+	if (!(events & EPOLLIN))
+		return 0;
+
+	/* compute L2 header length */
+
+	if (vu_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
+		max_elem = VIRTQUEUE_MAX_SIZE;
+	else
+		max_elem = 1;
+
+	l2_hdrlen = udp_vu_l2_hdrlen(v6);
+
+	vu_init_elem(elem, iov_vu, max_elem);
+
+	iov_cnt = vu_collect_one_frame(vdev, vq, elem, max_elem,
+			      ETH_MAX_MTU - l2_hdrlen,
+			      l2_hdrlen);
+	if (iov_cnt == 0)
+		return 0;
+
+	msg.msg_iov = iov_vu;
+	msg.msg_iovlen = iov_cnt;
+
+	*dlen = recvmsg(s, &msg, 0);
+	if (*dlen < 0) {
+		vu_queue_rewind(vq, iov_cnt);
+		return 0;
+	}
+
+	/* count the numbers of buffer filled by recvmsg() */
+	idx = iov_skip_bytes(iov_vu, iov_cnt, *dlen, &off);
+
+	/* adjust last iov length */
+	if (idx < iov_cnt)
+		iov_vu[idx].iov_len = off;
+	iov_used = idx + !!off;
+
+	/* release unused buffers */
+	vu_queue_rewind(vq, iov_cnt - iov_used);
+
+	vu_set_vnethdr(vdev, &iov_vu[0], iov_used, l2_hdrlen);
+
+	return iov_used;
+}
+
+/**
+ * udp_vu_prepare() - Prepare the packet header
+ * @c:		Execution context
+ * @toside:	Address information for one side of the flow
+ * @datalen:	Packet data length
+ *
+ * Return: Layer-4 length
+ */
+static size_t udp_vu_prepare(const struct ctx *c,
+			     const struct flowside *toside, ssize_t dlen)
+{
+	struct ethhdr *eh;
+	size_t l4len;
+
+	/* ethernet header */
+	eh = vu_eth(iov_vu[0].iov_base);
+
+	memcpy(eh->h_dest, c->guest_mac, sizeof(eh->h_dest));
+	memcpy(eh->h_source, c->our_tap_mac, sizeof(eh->h_source));
+
+	/* initialize header */
+	if (inany_v4(&toside->eaddr) && inany_v4(&toside->oaddr)) {
+		struct iphdr *iph = vu_ip(iov_vu[0].iov_base);
+		struct udp_payload_t *bp = vu_payloadv4(iov_vu[0].iov_base);
+
+		eh->h_proto = htons(ETH_P_IP);
+
+		*iph = (struct iphdr)L2_BUF_IP4_INIT(IPPROTO_UDP);
+
+		l4len = udp_update_hdr4(iph, bp, toside, dlen, true);
+	} else {
+		struct ipv6hdr *ip6h = vu_ip(iov_vu[0].iov_base);
+		struct udp_payload_t *bp = vu_payloadv6(iov_vu[0].iov_base);
+
+		eh->h_proto = htons(ETH_P_IPV6);
+
+		*ip6h = (struct ipv6hdr)L2_BUF_IP6_INIT(IPPROTO_UDP);
+
+		l4len = udp_update_hdr6(ip6h, bp, toside, dlen, true);
+	}
+
+	return l4len;
+}
+
+/**
+ * udp_vu_csum() - Calculate and set checksum for a UDP packet
+ * @toside:	ddress information for one side of the flow
+ * @l4len:	IPv4 Payload length
+ * @iov_used:	Length of the array
+ */
+static void udp_vu_csum(const struct flowside *toside, int iov_used)
+{
+	const struct in_addr *src4 = inany_v4(&toside->oaddr);
+	const struct in_addr *dst4 = inany_v4(&toside->eaddr);
+	char *base = iov_vu[0].iov_base;
+	struct udp_payload_t *bp;
+
+	if (src4 && dst4) {
+		bp = vu_payloadv4(base);
+		csum_udp4(&bp->uh, *src4, *dst4, iov_vu, iov_used,
+			  (char *)&bp->data - base);
+	} else {
+		bp = vu_payloadv6(base);
+		csum_udp6(&bp->uh, &toside->oaddr.a6, &toside->eaddr.a6,
+			  iov_vu, iov_used, (char *)&bp->data - base);
+	}
+}
+
+/**
+ * udp_vu_listen_sock_handler() - Handle new data from socket
+ * @c:		Execution context
+ * @ref:	epoll reference
+ * @events:	epoll events bitmap
+ * @now:	Current timestamp
+ */
+void udp_vu_listen_sock_handler(const struct ctx *c, union epoll_ref ref,
+				uint32_t events, const struct timespec *now)
+{
+	struct vu_dev *vdev = c->vdev;
+	struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
+	int i;
+
+	if (udp_sock_errs(c, ref.fd, events) < 0) {
+		err("UDP: Unrecoverable error on listening socket:"
+		    " (%s port %hu)", pif_name(ref.udp.pif), ref.udp.port);
+		return;
+	}
+
+	for (i = 0; i < UDP_MAX_FRAMES; i++) {
+		const struct flowside *toside;
+		union sockaddr_inany s_in;
+		flow_sidx_t batchsidx;
+		uint8_t batchpif;
+		ssize_t dlen;
+		int iov_used;
+		bool v6;
+
+		if (udp_vu_sock_init(ref.fd, &s_in) < 0)
+			break;
+
+		batchsidx = udp_flow_from_sock(c, ref, &s_in, now);
+		batchpif = pif_at_sidx(batchsidx);
+
+		if (batchpif != PIF_TAP) {
+			if (flow_sidx_valid(batchsidx)) {
+				flow_sidx_t fromsidx = flow_sidx_opposite(batchsidx);
+				struct udp_flow *uflow = udp_at_sidx(batchsidx);
+
+				flow_err(uflow,
+					"No support for forwarding UDP from %s to %s",
+					pif_name(pif_at_sidx(fromsidx)),
+					pif_name(batchpif));
+			} else {
+				debug("Discarding 1 datagram without flow");
+			}
+
+			continue;
+		}
+
+		toside = flowside_at_sidx(batchsidx);
+
+		v6 = !(inany_v4(&toside->eaddr) && inany_v4(&toside->oaddr));
+
+		iov_used = udp_vu_sock_recv(c, ref.fd, events, v6, &dlen);
+		if (iov_used <= 0)
+			break;
+
+		udp_vu_prepare(c, toside, dlen);
+		if (*c->pcap) {
+			udp_vu_csum(toside, iov_used);
+			pcap_iov(iov_vu, iov_used,
+				 sizeof(struct virtio_net_hdr_mrg_rxbuf));
+		}
+		vu_flush(vdev, vq, elem, iov_used);
+	}
+}
+
+/**
+ * udp_vu_reply_sock_handler() - Handle new data from flow specific socket
+ * @c:		Execution context
+ * @ref:	epoll reference
+ * @events:	epoll events bitmap
+ * @now:	Current timestamp
+ */
+void udp_vu_reply_sock_handler(const struct ctx *c, union epoll_ref ref,
+			        uint32_t events, const struct timespec *now)
+{
+	flow_sidx_t tosidx = flow_sidx_opposite(ref.flowside);
+	const struct flowside *toside = flowside_at_sidx(tosidx);
+	struct udp_flow *uflow = udp_at_sidx(ref.flowside);
+	int from_s = uflow->s[ref.flowside.sidei];
+	struct vu_dev *vdev = c->vdev;
+	struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
+	int i;
+
+	ASSERT(!c->no_udp);
+
+	if (udp_sock_errs(c, from_s, events) < 0) {
+		flow_err(uflow, "Unrecoverable error on reply socket");
+		flow_err_details(uflow);
+		udp_flow_close(c, uflow);
+		return;
+	}
+
+	for (i = 0; i < UDP_MAX_FRAMES; i++) {
+		uint8_t topif = pif_at_sidx(tosidx);
+		ssize_t dlen;
+		int iov_used;
+		bool v6;
+
+		ASSERT(uflow);
+
+		if (topif != PIF_TAP) {
+			uint8_t frompif = pif_at_sidx(ref.flowside);
+
+			flow_err(uflow,
+				 "No support for forwarding UDP from %s to %s",
+				 pif_name(frompif), pif_name(topif));
+			continue;
+		}
+
+		v6 = !(inany_v4(&toside->eaddr) && inany_v4(&toside->oaddr));
+
+		iov_used = udp_vu_sock_recv(c, from_s, events, v6, &dlen);
+		if (iov_used <= 0)
+			break;
+		flow_trace(uflow, "Received 1 datagram on reply socket");
+		uflow->ts = now->tv_sec;
+
+		udp_vu_prepare(c, toside, dlen);
+		if (*c->pcap) {
+			udp_vu_csum(toside, iov_used);
+			pcap_iov(iov_vu, iov_used,
+				 sizeof(struct virtio_net_hdr_mrg_rxbuf));
+		}
+		vu_flush(vdev, vq, elem, iov_used);
+	}
+}
diff --git a/udp_vu.h b/udp_vu.h
new file mode 100644
index 0000000..ba7018d
--- /dev/null
+++ b/udp_vu.h
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright Red Hat
+ * Author: Laurent Vivier <lvivier@redhat.com>
+ */
+
+#ifndef UDP_VU_H
+#define UDP_VU_H
+
+void udp_vu_listen_sock_handler(const struct ctx *c, union epoll_ref ref,
+				uint32_t events, const struct timespec *now);
+void udp_vu_reply_sock_handler(const struct ctx *c, union epoll_ref ref,
+			       uint32_t events, const struct timespec *now);
+#endif /* UDP_VU_H */
diff --git a/vhost_user.c b/vhost_user.c
index 952e97e..13decd3 100644
--- a/vhost_user.c
+++ b/vhost_user.c
@@ -48,12 +48,13 @@
 /* vhost-user version we are compatible with */
 #define VHOST_USER_VERSION 1
 
+static struct vu_dev vdev_storage;
+
 /**
  * vu_print_capabilities() - print vhost-user capabilities
  * 			     this is part of the vhost-user backend
  * 			     convention.
  */
-/* cppcheck-suppress unusedFunction */
 void vu_print_capabilities(void)
 {
 	info("{");
@@ -163,9 +164,7 @@ static void vmsg_close_fds(const struct vhost_user_msg *vmsg)
  */
 static void vu_remove_watch(const struct vu_dev *vdev, int fd)
 {
-	/* Placeholder to add passt related code */
-	(void)vdev;
-	(void)fd;
+	epoll_ctl(vdev->context->epollfd, EPOLL_CTL_DEL, fd, NULL);
 }
 
 /**
@@ -426,7 +425,6 @@ static bool map_ring(struct vu_dev *vdev, struct vu_virtq *vq)
  *
  * Return: 0 if the zone is in a mapped memory region, -1 otherwise
  */
-/* cppcheck-suppress unusedFunction */
 int vu_packet_check_range(void *buf, size_t offset, size_t len,
 			  const char *start)
 {
@@ -516,6 +514,14 @@ static bool vu_set_mem_table_exec(struct vu_dev *vdev,
 		}
 	}
 
+	/* As vu_packet_check_range() has no access to the number of
+	 * memory regions, mark the end of the array with mmap_addr = 0
+	 */
+	ASSERT(vdev->nregions < VHOST_USER_MAX_RAM_SLOTS - 1);
+	vdev->regions[vdev->nregions].mmap_addr = 0;
+
+	tap_sock_update_pool(vdev->regions, 0);
+
 	return false;
 }
 
@@ -644,9 +650,12 @@ static bool vu_get_vring_base_exec(struct vu_dev *vdev,
  */
 static void vu_set_watch(const struct vu_dev *vdev, int fd)
 {
-	/* Placeholder to add passt related code */
-	(void)vdev;
-	(void)fd;
+	union epoll_ref ref = { .type = EPOLL_TYPE_VHOST_KICK, .fd = fd };
+	struct epoll_event ev = { 0 };
+
+	ev.data.u64 = ref.u64;
+	ev.events = EPOLLIN;
+	epoll_ctl(vdev->context->epollfd, EPOLL_CTL_ADD, fd, &ev);
 }
 
 /**
@@ -858,14 +867,14 @@ static bool vu_set_vring_enable_exec(struct vu_dev *vdev,
  * @c:		execution context
  * @vdev:	vhost-user device
  */
-/* cppcheck-suppress unusedFunction */
-void vu_init(struct ctx *c, struct vu_dev *vdev)
+void vu_init(struct ctx *c)
 {
 	int i;
 
-	vdev->context = c;
+	c->vdev = &vdev_storage;
+	c->vdev->context = c;
 	for (i = 0; i < VHOST_USER_MAX_QUEUES; i++) {
-		vdev->vq[i] = (struct vu_virtq){
+		c->vdev->vq[i] = (struct vu_virtq){
 			.call_fd = -1,
 			.kick_fd = -1,
 			.err_fd = -1,
@@ -878,7 +887,6 @@ void vu_init(struct ctx *c, struct vu_dev *vdev)
  * vu_cleanup() - Reset vhost-user device
  * @vdev:	vhost-user device
  */
-/* cppcheck-suppress unusedFunction */
 void vu_cleanup(struct vu_dev *vdev)
 {
 	unsigned int i;
@@ -925,8 +933,7 @@ void vu_cleanup(struct vu_dev *vdev)
  */
 static void vu_sock_reset(struct vu_dev *vdev)
 {
-	/* Placeholder to add passt related code */
-	(void)vdev;
+	tap_sock_reset(vdev->context);
 }
 
 static bool (*vu_handle[VHOST_USER_MAX])(struct vu_dev *vdev,
@@ -954,7 +961,6 @@ static bool (*vu_handle[VHOST_USER_MAX])(struct vu_dev *vdev,
  * @fd:		vhost-user message socket
  * @events:	epoll events
  */
-/* cppcheck-suppress unusedFunction */
 void vu_control_handler(struct vu_dev *vdev, int fd, uint32_t events)
 {
 	struct vhost_user_msg msg = { 0 };
diff --git a/vhost_user.h b/vhost_user.h
index 5af349b..464ba21 100644
--- a/vhost_user.h
+++ b/vhost_user.h
@@ -183,7 +183,6 @@ struct vhost_user_msg {
  *
  * Return: true if the virqueue is enabled, false otherwise
  */
-/* cppcheck-suppress unusedFunction */
 static inline bool vu_queue_enabled(const struct vu_virtq *vq)
 {
 	return vq->enable;
@@ -195,14 +194,13 @@ static inline bool vu_queue_enabled(const struct vu_virtq *vq)
  *
  * Return: true if the virqueue is started, false otherwise
  */
-/* cppcheck-suppress unusedFunction */
 static inline bool vu_queue_started(const struct vu_virtq *vq)
 {
 	return vq->started;
 }
 
 void vu_print_capabilities(void);
-void vu_init(struct ctx *c, struct vu_dev *vdev);
+void vu_init(struct ctx *c);
 void vu_cleanup(struct vu_dev *vdev);
 void vu_control_handler(struct vu_dev *vdev, int fd, uint32_t events);
 #endif /* VHOST_USER_H */
diff --git a/virtio.c b/virtio.c
index 380590a..0598ff4 100644
--- a/virtio.c
+++ b/virtio.c
@@ -328,7 +328,6 @@ static bool vring_can_notify(const struct vu_dev *dev, struct vu_virtq *vq)
  * @dev:	Vhost-user device
  * @vq:		Virtqueue
  */
-/* cppcheck-suppress unusedFunction */
 void vu_queue_notify(const struct vu_dev *dev, struct vu_virtq *vq)
 {
 	if (!vq->vring.avail)
@@ -504,7 +503,6 @@ static int vu_queue_map_desc(struct vu_dev *dev, struct vu_virtq *vq, unsigned i
  *
  * Return: -1 if there is an error, 0 otherwise
  */
-/* cppcheck-suppress unusedFunction */
 int vu_queue_pop(struct vu_dev *dev, struct vu_virtq *vq, struct vu_virtq_element *elem)
 {
 	unsigned int head;
@@ -565,7 +563,6 @@ void vu_queue_unpop(struct vu_virtq *vq)
  * @vq:		Virtqueue
  * @num:	Number of element to unpop
  */
-/* cppcheck-suppress unusedFunction */
 bool vu_queue_rewind(struct vu_virtq *vq, unsigned int num)
 {
 	if (num > vq->inuse)
@@ -621,7 +618,6 @@ void vu_queue_fill_by_index(struct vu_virtq *vq, unsigned int index,
  * @len:	Size of the element
  * @idx:	Used ring entry index
  */
-/* cppcheck-suppress unusedFunction */
 void vu_queue_fill(struct vu_virtq *vq, const struct vu_virtq_element *elem,
 		   unsigned int len, unsigned int idx)
 {
@@ -645,7 +641,6 @@ static inline void vring_used_idx_set(struct vu_virtq *vq, uint16_t val)
  * @vq:		Virtqueue
  * @count:	Number of entry to flush
  */
-/* cppcheck-suppress unusedFunction */
 void vu_queue_flush(struct vu_virtq *vq, unsigned int count)
 {
 	uint16_t old, new;
diff --git a/vu_common.c b/vu_common.c
new file mode 100644
index 0000000..38d5d8e
--- /dev/null
+++ b/vu_common.c
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright Red Hat
+ * Author: Laurent Vivier <lvivier@redhat.com>
+ *
+ * common_vu.c - vhost-user common UDP and TCP functions
+ */
+
+#include <unistd.h>
+#include <sys/uio.h>
+#include <sys/eventfd.h>
+#include <linux/virtio_net.h>
+
+#include "util.h"
+#include "passt.h"
+#include "tap.h"
+#include "vhost_user.h"
+#include "pcap.h"
+#include "vu_common.h"
+
+/**
+ * vu_init_elem() - initialize an array of virtqueue element with 1 iov in each
+ * @elem:	Array of virtqueue element to initialize
+ * @iov:	Array of iovec to assign to virtqueue element
+ * @elem_cnt:	Number of virtqueue element
+ */
+void vu_init_elem(struct vu_virtq_element *elem, struct iovec *iov, int elem_cnt)
+{
+	int i;
+
+	for (i = 0; i < elem_cnt; i++) {
+		elem[i].out_num = 0;
+		elem[i].out_sg = NULL;
+		elem[i].in_num = 1;
+		elem[i].in_sg = &iov[i];
+	}
+}
+
+/**
+ * vu_collect_one_frame() - collect virtio buffers from a given virtqueue for
+ *			    one frame
+ * @vdev:		vhost-user device
+ * @vq:			virtqueue to collect from
+ * @elem:		Array of virtqueue element
+ * 			each element must be initialized with one iovec entry
+ * 			in the in_sg array.
+ * @max_elem:		Number of virtqueue element in the array
+ * @size:		Maximum size of the data in the frame
+ * @hdrlen:		Size of the frame header
+ */
+int vu_collect_one_frame(struct vu_dev *vdev, struct vu_virtq *vq,
+			 struct vu_virtq_element *elem, int max_elem,
+			 size_t size, size_t hdrlen)
+{
+	return vu_collect(vdev, vq, elem, max_elem, size, hdrlen, size);
+}
+
+/**
+ * vu_collect() - collect virtio buffers from a given virtqueue
+ * @vdev:		vhost-user device
+ * @vq:			virtqueue to collect from
+ * @elem:		Array of virtqueue element
+ * 			each element must be initialized with one iovec entry
+ * 			in the in_sg array.
+ * @max_elem:		Number of virtqueue element in the array
+ * @max_frame_size:	Maximum size of the data in the frame
+ * @hdrlen:		Size of the frame header
+ * @size:		Total size of the buffers we need to collect
+ * 			(if size > max_frame_size, we collect several frame)
+ */
+int vu_collect(struct vu_dev *vdev, struct vu_virtq *vq,
+	       struct vu_virtq_element *elem, int max_elem,
+	       size_t max_frame_size, size_t hdrlen, size_t size)
+{
+	size_t frame_size = 0;
+	int elem_cnt = 0;
+
+	/* header is at least virtio_net_hdr_mrg_rxbuf */
+	hdrlen += sizeof(struct virtio_net_hdr_mrg_rxbuf);
+
+	while (size > 0 && elem_cnt < max_elem) {
+		struct iovec *iov;
+		int ret;
+
+		ret = vu_queue_pop(vdev, vq, &elem[elem_cnt]);
+		if (ret < 0)
+			break;
+
+		if (elem[elem_cnt].in_num < 1) {
+			warn("virtio-net receive queue contains no in buffers");
+			vu_queue_detach_element(vq);
+			break;
+		}
+
+		iov = &elem[elem_cnt].in_sg[0];
+
+		ASSERT(iov->iov_len >= hdrlen);
+
+		if (frame_size == 0) {
+			/* this is the iovec that will contain frame headers */
+			iov->iov_base = (char *)iov->iov_base + hdrlen;
+			iov->iov_len -= hdrlen;
+		}
+
+		if (iov->iov_len > size)
+			iov->iov_len = size;
+
+		frame_size += iov->iov_len;
+
+		if (frame_size >= max_frame_size) {
+			/* if the frame size is greater than maximum size
+			 * we need to start a new frame
+			 */
+			iov->iov_len -= frame_size - max_frame_size;
+			frame_size = 0;
+		} else if (!vu_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
+			/* if we don't have feature F_MRG_RXBUF,
+			 * the frame cannot be spread over several
+			 * virtqueue element
+			 */
+			frame_size = 0;
+		}
+
+		size -= iov->iov_len;
+		elem_cnt++;
+	}
+
+	return elem_cnt;
+}
+
+/**
+ * vu_set_vnethdr() - set virtio-net headers in a given iovec
+ * @vdev:		vhost-user device
+ * @iov:		One iovec to initialize
+ * @num_buffers:	Number of guest buffers of the frame
+ * @hdrlen:		Size of the frame header
+ */
+void vu_set_vnethdr(const struct vu_dev *vdev, struct iovec *iov,
+		    int num_buffers, size_t hdrlen)
+{
+	struct virtio_net_hdr_mrg_rxbuf *vnethdr;
+
+	/* header is at least virtio_net_hdr_mrg_rxbuf */
+	hdrlen += sizeof(struct virtio_net_hdr_mrg_rxbuf);
+
+	/* NOLINTNEXTLINE(clang-analyzer-core.UndefinedBinaryOperatorResult) */
+	iov->iov_base = (char *)iov->iov_base - hdrlen;
+	iov->iov_len += hdrlen;
+
+	vnethdr = iov->iov_base;
+	vnethdr->hdr = VU_HEADER;
+	if (vu_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
+		vnethdr->num_buffers = htole16(num_buffers);
+}
+
+/**
+ * vu_flush() - flush all the collected buffers to the vhost-user interface
+ * @vdev:	vhost-user device
+ * @vq:		vhost-user virtqueue
+ * @elem:	virtqueue element array to send back to the virqueue
+ * @iov_used:	Length of the array
+ */
+void vu_flush(const struct vu_dev *vdev, struct vu_virtq *vq,
+	      struct vu_virtq_element *elem, int elem_cnt)
+{
+	int i;
+
+	for (i = 0; i < elem_cnt; i++)
+		vu_queue_fill(vq, &elem[i], elem[i].in_sg[0].iov_len, i);
+
+	vu_queue_flush(vq, elem_cnt);
+	vu_queue_notify(vdev, vq);
+}
+
+/**
+ * vu_handle_tx() - Receive data from the TX virtqueue
+ * @vdev:	vhost-user device
+ * @index:	index of the virtqueue
+ * @now:	Current timestamp
+ */
+static void vu_handle_tx(struct vu_dev *vdev, int index,
+			 const struct timespec *now)
+{
+	struct vu_virtq_element elem[VIRTQUEUE_MAX_SIZE];
+	struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
+	struct vu_virtq *vq = &vdev->vq[index];
+	int hdrlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+	int out_sg_count;
+	int count;
+
+	if (!VHOST_USER_IS_QUEUE_TX(index)) {
+		debug("vhost-user: index %d is not a TX queue", index);
+		return;
+	}
+
+	tap_flush_pools();
+
+	count = 0;
+	out_sg_count = 0;
+	while (count < VIRTQUEUE_MAX_SIZE) {
+		int ret;
+
+		elem[count].out_num = 1;
+		elem[count].out_sg = &out_sg[out_sg_count];
+		elem[count].in_num = 0;
+		elem[count].in_sg = NULL;
+		ret = vu_queue_pop(vdev, vq, &elem[count]);
+		if (ret < 0)
+			break;
+		out_sg_count += elem[count].out_num;
+
+		if (elem[count].out_num < 1) {
+			debug("virtio-net header not in first element");
+			break;
+		}
+		ASSERT(elem[count].out_num == 1);
+
+		tap_add_packet(vdev->context,
+			       elem[count].out_sg[0].iov_len - hdrlen,
+			       (char *)elem[count].out_sg[0].iov_base + hdrlen);
+		count++;
+	}
+	tap_handler(vdev->context, now);
+
+	if (count) {
+		int i;
+
+		for (i = 0; i < count; i++)
+			vu_queue_fill(vq, &elem[i], 0, i);
+		vu_queue_flush(vq, count);
+		vu_queue_notify(vdev, vq);
+	}
+}
+
+/**
+ * vu_kick_cb() - Called on a kick event to start to receive data
+ * @vdev:	vhost-user device
+ * @ref:	epoll reference information
+ * @now:	Current timestamp
+ */
+void vu_kick_cb(struct vu_dev *vdev, union epoll_ref ref,
+		const struct timespec *now)
+{
+	eventfd_t kick_data;
+	ssize_t rc;
+	int idx;
+
+	for (idx = 0; idx < VHOST_USER_MAX_QUEUES; idx++) {
+		if (vdev->vq[idx].kick_fd == ref.fd)
+			break;
+	}
+
+	if (idx == VHOST_USER_MAX_QUEUES)
+		return;
+
+	rc = eventfd_read(ref.fd, &kick_data);
+	if (rc == -1)
+		die_perror("vhost-user kick eventfd_read()");
+
+	debug("vhost-user: ot kick_data: %016"PRIx64" idx:%d",
+	      kick_data, idx);
+	if (VHOST_USER_IS_QUEUE_TX(idx))
+		vu_handle_tx(vdev, idx, now);
+}
+
+/**
+ * vu_send_single() - Send a buffer to the front-end using the RX virtqueue
+ * @c:		execution context
+ * @buf:	address of the buffer
+ * @size:	size of the buffer
+ *
+ * Return: number of bytes sent, -1 if there is an error
+ */
+int vu_send_single(const struct ctx *c, const void *buf, size_t size)
+{
+	struct vu_dev *vdev = c->vdev;
+	struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
+	struct vu_virtq_element elem[VIRTQUEUE_MAX_SIZE];
+	struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
+	size_t total;
+	int elem_cnt, max_elem;
+	int i;
+
+	debug("vu_send_single size %zu", size);
+
+	if (!vu_queue_enabled(vq) || !vu_queue_started(vq)) {
+		err("Got packet, but no available descriptors on RX virtq.");
+		return 0;
+	}
+
+	if (vu_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
+		max_elem = VIRTQUEUE_MAX_SIZE;
+	else
+		max_elem = 1;
+
+	vu_init_elem(elem, in_sg, max_elem);
+
+	elem_cnt = vu_collect_one_frame(vdev, vq, elem, max_elem, size,
+					0);
+	total = iov_size(in_sg, elem_cnt);
+	if (total < size) {
+		debug("vu_send_single: no space to send the data "
+		      "elem_cnt %d iov_size %zd", elem_cnt, total);
+		goto err;
+	}
+
+	vu_set_vnethdr(vdev, in_sg, elem_cnt, 0);
+
+	/* copy data from the buffer to the iovec */
+	iov_from_buf(in_sg, elem_cnt, sizeof(struct virtio_net_hdr_mrg_rxbuf),
+		     buf, size);
+
+	if (*c->pcap) {
+		pcap_iov(in_sg, elem_cnt,
+			 sizeof(struct virtio_net_hdr_mrg_rxbuf));
+	}
+
+	vu_flush(vdev, vq, elem, elem_cnt);
+
+	debug("vhost-user sent %zu", total);
+
+	return total;
+err:
+	for (i = 0; i < elem_cnt; i++)
+		vu_queue_detach_element(vq);
+
+	return 0;
+}
diff --git a/vu_common.h b/vu_common.h
new file mode 100644
index 0000000..a6f7fa9
--- /dev/null
+++ b/vu_common.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright Red Hat
+ * Author: Laurent Vivier <lvivier@redhat.com>
+ *
+ * vhost-user common UDP and TCP functions
+ */
+
+#ifndef VU_COMMON_H
+#define VU_COMMON_H
+#include <linux/virtio_net.h>
+
+static inline void *vu_eth(void *base)
+{
+	return ((char *)base + sizeof(struct virtio_net_hdr_mrg_rxbuf));
+}
+
+static inline void *vu_ip(void *base)
+{
+	return (struct ethhdr *)vu_eth(base) + 1;
+}
+
+static inline void *vu_payloadv4(void *base)
+{
+	return (struct iphdr *)vu_ip(base) + 1;
+}
+
+static inline void *vu_payloadv6(void *base)
+{
+	return (struct ipv6hdr *)vu_ip(base) + 1;
+}
+
+void vu_init_elem(struct vu_virtq_element *elem, struct iovec *iov,
+		  int elem_cnt);
+int vu_collect_one_frame(struct vu_dev *vdev, struct vu_virtq *vq,
+			 struct vu_virtq_element *elem, int max_elem,
+			 size_t size, size_t hdrlen);
+int vu_collect(struct vu_dev *vdev, struct vu_virtq *vq,
+	       struct vu_virtq_element *elem, int max_elem,
+	       size_t max_frame_size, size_t hdrlen, size_t size);
+void vu_set_vnethdr(const struct vu_dev *vdev, struct iovec *iov,
+                    int num_buffers, size_t hdrlen);
+void vu_flush(const struct vu_dev *vdev, struct vu_virtq *vq,
+	      struct vu_virtq_element *elem, int elem_cnt);
+void vu_kick_cb(struct vu_dev *vdev, union epoll_ref ref,
+		const struct timespec *now);
+int vu_send_single(const struct ctx *c, const void *buf, size_t size);
+#endif /* VU_COMMON_H */
-- 
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright Red Hat
+ * Author: Laurent Vivier <lvivier@redhat.com>
+ *
+ * vhost-user common UDP and TCP functions
+ */
+
+#ifndef VU_COMMON_H
+#define VU_COMMON_H
+#include <linux/virtio_net.h>
+
+static inline void *vu_eth(void *base)
+{
+	return ((char *)base + sizeof(struct virtio_net_hdr_mrg_rxbuf));
+}
+
+static inline void *vu_ip(void *base)
+{
+	return (struct ethhdr *)vu_eth(base) + 1;
+}
+
+static inline void *vu_payloadv4(void *base)
+{
+	return (struct iphdr *)vu_ip(base) + 1;
+}
+
+static inline void *vu_payloadv6(void *base)
+{
+	return (struct ipv6hdr *)vu_ip(base) + 1;
+}
+
+void vu_init_elem(struct vu_virtq_element *elem, struct iovec *iov,
+		  int elem_cnt);
+int vu_collect_one_frame(struct vu_dev *vdev, struct vu_virtq *vq,
+			 struct vu_virtq_element *elem, int max_elem,
+			 size_t size, size_t hdrlen);
+int vu_collect(struct vu_dev *vdev, struct vu_virtq *vq,
+	       struct vu_virtq_element *elem, int max_elem,
+	       size_t max_frame_size, size_t hdrlen, size_t size);
+void vu_set_vnethdr(const struct vu_dev *vdev, struct iovec *iov,
+                    int num_buffers, size_t hdrlen);
+void vu_flush(const struct vu_dev *vdev, struct vu_virtq *vq,
+	      struct vu_virtq_element *elem, int elem_cnt);
+void vu_kick_cb(struct vu_dev *vdev, union epoll_ref ref,
+		const struct timespec *now);
+int vu_send_single(const struct ctx *c, const void *buf, size_t size);
+#endif /* VU_COMMON_H */
-- 
2.43.0


      reply	other threads:[~2024-10-09  8:24 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-07 14:40 [PATCH v6 0/7] Add vhost-user support to passt. (part 3) Laurent Vivier
2024-10-07 14:40 ` [PATCH v6 1/7] packet: replace struct desc by struct iovec Laurent Vivier
2024-10-07 14:40 ` [PATCH v6 2/7] vhost-user: introduce virtio API Laurent Vivier
2024-10-07 14:40 ` [PATCH v6 3/7] vhost-user: introduce vhost-user API Laurent Vivier
2024-10-07 14:40 ` [PATCH v6 4/7] udp: Prepare udp.c to be shared with vhost-user Laurent Vivier
2024-10-07 14:40 ` [PATCH v6 5/7] tcp: Export headers functions Laurent Vivier
2024-10-07 14:40 ` [PATCH v6 6/7] passt: rename tap_sock_init() to tap_backend_init() Laurent Vivier
2024-10-07 14:40 ` [PATCH v6 7/7] vhost-user: add vhost-user Laurent Vivier
2024-10-09  8:23   ` Stefano Brivio [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241009102353.59aa73b7@elisabeth \
    --to=sbrivio@redhat.com \
    --cc=lvivier@redhat.com \
    --cc=passt-dev@passt.top \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
Code repositories for project(s) associated with this public inbox

	https://passt.top/passt

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for IMAP folder(s).