* [PATCH v14 1/9] packet: replace struct desc by struct iovec
2024-11-22 16:43 [PATCH v14 0/9] Add vhost-user support to passt. (part 3) Laurent Vivier
@ 2024-11-22 16:43 ` Laurent Vivier
2024-11-22 16:43 ` [PATCH v14 2/9] vhost-user: introduce virtio API Laurent Vivier
` (8 subsequent siblings)
9 siblings, 0 replies; 26+ messages in thread
From: Laurent Vivier @ 2024-11-22 16:43 UTC (permalink / raw)
To: passt-dev; +Cc: Laurent Vivier, David Gibson
To be able to manage buffers inside a shared memory provided
by a VM via a vhost-user interface, we cannot rely on the fact
that buffers are located in a pre-defined memory area and use
a base address and a 32bit offset to address them.
We need a 64bit address, so replace struct desc by struct iovec
and update range checking.
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
---
packet.c | 80 ++++++++++++++++++++++++++++++--------------------------
packet.h | 14 ++--------
2 files changed, 45 insertions(+), 49 deletions(-)
diff --git a/packet.c b/packet.c
index ccfc84607709..37489961a37e 100644
--- a/packet.c
+++ b/packet.c
@@ -22,6 +22,35 @@
#include "util.h"
#include "log.h"
+/**
+ * packet_check_range() - Check if a packet memory range is valid
+ * @p: Packet pool
+ * @offset: Offset of data range in packet descriptor
+ * @len: Length of desired data range
+ * @start: Start of the packet descriptor
+ * @func: For tracing: name of calling function
+ * @line: For tracing: caller line of function call
+ *
+ * Return: 0 if the range is valid, -1 otherwise
+ */
+static int packet_check_range(const struct pool *p, size_t offset, size_t len,
+ const char *start, const char *func, int line)
+{
+ if (start < p->buf) {
+ trace("packet start %p before buffer start %p, "
+ "%s:%i", (void *)start, (void *)p->buf, func, line);
+ return -1;
+ }
+
+ if (start + len + offset > p->buf + p->buf_size) {
+ trace("packet offset plus length %lu from size %lu, "
+ "%s:%i", start - p->buf + len + offset,
+ p->buf_size, func, line);
+ return -1;
+ }
+
+ return 0;
+}
/**
* packet_add_do() - Add data as packet descriptor to given pool
* @p: Existing pool
@@ -41,34 +70,16 @@ void packet_add_do(struct pool *p, size_t len, const char *start,
return;
}
- if (start < p->buf) {
- trace("add packet start %p before buffer start %p, %s:%i",
- (void *)start, (void *)p->buf, func, line);
+ if (packet_check_range(p, 0, len, start, func, line))
return;
- }
-
- if (start + len > p->buf + p->buf_size) {
- trace("add packet start %p, length: %zu, buffer end %p, %s:%i",
- (void *)start, len, (void *)(p->buf + p->buf_size),
- func, line);
- return;
- }
if (len > UINT16_MAX) {
trace("add packet length %zu, %s:%i", len, func, line);
return;
}
-#if UINTPTR_MAX == UINT64_MAX
- if ((uintptr_t)start - (uintptr_t)p->buf > UINT32_MAX) {
- trace("add packet start %p, buffer start %p, %s:%i",
- (void *)start, (void *)p->buf, func, line);
- return;
- }
-#endif
-
- p->pkt[idx].offset = start - p->buf;
- p->pkt[idx].len = len;
+ p->pkt[idx].iov_base = (void *)start;
+ p->pkt[idx].iov_len = len;
p->count++;
}
@@ -96,36 +107,31 @@ void *packet_get_do(const struct pool *p, size_t idx, size_t offset,
return NULL;
}
- if (len > UINT16_MAX || len + offset > UINT32_MAX) {
+ if (len > UINT16_MAX) {
if (func) {
- trace("packet data length %zu, offset %zu, %s:%i",
- len, offset, func, line);
+ trace("packet data length %zu, %s:%i",
+ len, func, line);
}
return NULL;
}
- if (p->pkt[idx].offset + len + offset > p->buf_size) {
+ if (len + offset > p->pkt[idx].iov_len) {
if (func) {
- trace("packet offset plus length %zu from size %zu, "
- "%s:%i", p->pkt[idx].offset + len + offset,
- p->buf_size, func, line);
+ trace("data length %zu, offset %zu from length %zu, "
+ "%s:%i", len, offset, p->pkt[idx].iov_len,
+ func, line);
}
return NULL;
}
- if (len + offset > p->pkt[idx].len) {
- if (func) {
- trace("data length %zu, offset %zu from length %u, "
- "%s:%i", len, offset, p->pkt[idx].len,
- func, line);
- }
+ if (packet_check_range(p, offset, len, p->pkt[idx].iov_base,
+ func, line))
return NULL;
- }
if (left)
- *left = p->pkt[idx].len - offset - len;
+ *left = p->pkt[idx].iov_len - offset - len;
- return p->buf + p->pkt[idx].offset + offset;
+ return (char *)p->pkt[idx].iov_base + offset;
}
/**
diff --git a/packet.h b/packet.h
index a784b07bbed5..8377dcf678bb 100644
--- a/packet.h
+++ b/packet.h
@@ -6,16 +6,6 @@
#ifndef PACKET_H
#define PACKET_H
-/**
- * struct desc - Generic offset-based descriptor within buffer
- * @offset: Offset of descriptor relative to buffer start, 32-bit limit
- * @len: Length of descriptor, host order, 16-bit limit
- */
-struct desc {
- uint32_t offset;
- uint16_t len;
-};
-
/**
* struct pool - Generic pool of packets stored in a buffer
* @buf: Buffer storing packet descriptors
@@ -29,7 +19,7 @@ struct pool {
size_t buf_size;
size_t size;
size_t count;
- struct desc pkt[1];
+ struct iovec pkt[1];
};
void packet_add_do(struct pool *p, size_t len, const char *start,
@@ -54,7 +44,7 @@ struct _name ## _t { \
size_t buf_size; \
size_t size; \
size_t count; \
- struct desc pkt[_size]; \
+ struct iovec pkt[_size]; \
}
#define PACKET_POOL_INIT_NOCAST(_size, _buf, _buf_size) \
--
@@ -6,16 +6,6 @@
#ifndef PACKET_H
#define PACKET_H
-/**
- * struct desc - Generic offset-based descriptor within buffer
- * @offset: Offset of descriptor relative to buffer start, 32-bit limit
- * @len: Length of descriptor, host order, 16-bit limit
- */
-struct desc {
- uint32_t offset;
- uint16_t len;
-};
-
/**
* struct pool - Generic pool of packets stored in a buffer
* @buf: Buffer storing packet descriptors
@@ -29,7 +19,7 @@ struct pool {
size_t buf_size;
size_t size;
size_t count;
- struct desc pkt[1];
+ struct iovec pkt[1];
};
void packet_add_do(struct pool *p, size_t len, const char *start,
@@ -54,7 +44,7 @@ struct _name ## _t { \
size_t buf_size; \
size_t size; \
size_t count; \
- struct desc pkt[_size]; \
+ struct iovec pkt[_size]; \
}
#define PACKET_POOL_INIT_NOCAST(_size, _buf, _buf_size) \
--
2.47.0
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH v14 2/9] vhost-user: introduce virtio API
2024-11-22 16:43 [PATCH v14 0/9] Add vhost-user support to passt. (part 3) Laurent Vivier
2024-11-22 16:43 ` [PATCH v14 1/9] packet: replace struct desc by struct iovec Laurent Vivier
@ 2024-11-22 16:43 ` Laurent Vivier
2024-11-22 16:43 ` [PATCH v14 3/9] vhost-user: introduce vhost-user API Laurent Vivier
` (7 subsequent siblings)
9 siblings, 0 replies; 26+ messages in thread
From: Laurent Vivier @ 2024-11-22 16:43 UTC (permalink / raw)
To: passt-dev; +Cc: Laurent Vivier
Add virtio.c and virtio.h that define the functions needed
to manage virtqueues.
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
---
Makefile | 4 +-
util.h | 9 +
virtio.c | 650 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
virtio.h | 183 ++++++++++++++++
4 files changed, 844 insertions(+), 2 deletions(-)
create mode 100644 virtio.c
create mode 100644 virtio.h
diff --git a/Makefile b/Makefile
index 258d298d4787..9b61a47e50fc 100644
--- a/Makefile
+++ b/Makefile
@@ -37,7 +37,7 @@ FLAGS += -DDUAL_STACK_SOCKETS=$(DUAL_STACK_SOCKETS)
PASST_SRCS = arch.c arp.c checksum.c conf.c dhcp.c dhcpv6.c flow.c fwd.c \
icmp.c igmp.c inany.c iov.c ip.c isolation.c lineread.c log.c mld.c \
ndp.c netlink.c packet.c passt.c pasta.c pcap.c pif.c tap.c tcp.c \
- tcp_buf.c tcp_splice.c udp.c udp_flow.c util.c
+ tcp_buf.c tcp_splice.c udp.c udp_flow.c util.c virtio.c
QRAP_SRCS = qrap.c
SRCS = $(PASST_SRCS) $(QRAP_SRCS)
@@ -47,7 +47,7 @@ PASST_HEADERS = arch.h arp.h checksum.h conf.h dhcp.h dhcpv6.h flow.h fwd.h \
flow_table.h icmp.h icmp_flow.h inany.h iov.h ip.h isolation.h \
lineread.h log.h ndp.h netlink.h packet.h passt.h pasta.h pcap.h pif.h \
siphash.h tap.h tcp.h tcp_buf.h tcp_conn.h tcp_internal.h tcp_splice.h \
- udp.h udp_flow.h util.h
+ udp.h udp_flow.h util.h virtio.h
HEADERS = $(PASST_HEADERS) seccomp.h
C := \#include <sys/random.h>\nint main(){int a=getrandom(0, 0, 0);}
diff --git a/util.h b/util.h
index 90428c42a21b..41bbd6044ec2 100644
--- a/util.h
+++ b/util.h
@@ -144,7 +144,16 @@ static inline uint32_t ntohl_unaligned(const void *p)
return ntohl(val);
}
+static inline void barrier(void) { __asm__ __volatile__("" ::: "memory"); }
+#define smp_mb() do { barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); } while (0)
+#define smp_mb_release() do { barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); } while (0)
+#define smp_mb_acquire() do { barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); } while (0)
+
+#define smp_wmb() smp_mb_release()
+#define smp_rmb() smp_mb_acquire()
+
#define NS_FN_STACK_SIZE (1024 * 1024) /* 1MiB */
+
int do_clone(int (*fn)(void *), char *stack_area, size_t stack_size, int flags,
void *arg);
#define NS_CALL(fn, arg) \
diff --git a/virtio.c b/virtio.c
new file mode 100644
index 000000000000..b23a68c4917f
--- /dev/null
+++ b/virtio.c
@@ -0,0 +1,650 @@
+// SPDX-License-Identifier: GPL-2.0-or-later AND BSD-3-Clause
+/*
+ * virtio API, vring and virtqueue functions definition
+ *
+ * Copyright Red Hat
+ * Author: Laurent Vivier <lvivier@redhat.com>
+ */
+
+/* Some parts copied from QEMU subprojects/libvhost-user/libvhost-user.c
+ * originally licensed under the following terms:
+ *
+ * --
+ *
+ * Copyright IBM, Corp. 2007
+ * Copyright (c) 2016 Red Hat, Inc.
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ * Marc-André Lureau <mlureau@redhat.com>
+ * Victor Kaplansky <victork@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ *
+ * Some parts copied from QEMU hw/virtio/virtio.c
+ * licensed under the following terms:
+ *
+ * Copyright IBM, Corp. 2007
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * --
+ *
+ * virtq_used_event() and virtq_avail_event() from
+ * https://docs.oasis-open.org/virtio/virtio/v1.2/csd01/virtio-v1.2-csd01.html#x1-712000A
+ * licensed under the following terms:
+ *
+ * --
+ *
+ * This header is BSD licensed so anyone can use the definitions
+ * to implement compatible drivers/servers.
+ *
+ * Copyright 2007, 2009, IBM Corporation
+ * Copyright 2011, Red Hat, Inc
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ‘‘AS IS’’ AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stddef.h>
+#include <endian.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/eventfd.h>
+#include <sys/socket.h>
+
+#include "util.h"
+#include "virtio.h"
+
+#define VIRTQUEUE_MAX_SIZE 1024
+
+/**
+ * vu_gpa_to_va() - Translate guest physical address to our virtual address.
+ * @dev: Vhost-user device
+ * @plen: Physical length to map (input), capped to region (output)
+ * @guest_addr: Guest physical address
+ *
+ * Return: virtual address in our address space of the guest physical address
+ */
+static void *vu_gpa_to_va(struct vu_dev *dev, uint64_t *plen, uint64_t guest_addr)
+{
+ unsigned int i;
+
+ if (*plen == 0)
+ return NULL;
+
+ /* Find matching memory region. */
+ for (i = 0; i < dev->nregions; i++) {
+ const struct vu_dev_region *r = &dev->regions[i];
+
+ if ((guest_addr >= r->gpa) &&
+ (guest_addr < (r->gpa + r->size))) {
+ if ((guest_addr + *plen) > (r->gpa + r->size))
+ *plen = r->gpa + r->size - guest_addr;
+ /* NOLINTNEXTLINE(performance-no-int-to-ptr) */
+ return (void *)(guest_addr - r->gpa + r->mmap_addr +
+ r->mmap_offset);
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * vring_avail_flags() - Read the available ring flags
+ * @vq: Virtqueue
+ *
+ * Return: the available ring descriptor flags of the given virtqueue
+ */
+static inline uint16_t vring_avail_flags(const struct vu_virtq *vq)
+{
+ return le16toh(vq->vring.avail->flags);
+}
+
+/**
+ * vring_avail_idx() - Read the available ring index
+ * @vq: Virtqueue
+ *
+ * Return: the available ring index of the given virtqueue
+ */
+static inline uint16_t vring_avail_idx(struct vu_virtq *vq)
+{
+ vq->shadow_avail_idx = le16toh(vq->vring.avail->idx);
+
+ return vq->shadow_avail_idx;
+}
+
+/**
+ * vring_avail_ring() - Read an available ring entry
+ * @vq: Virtqueue
+ * @i: Index of the entry to read
+ *
+ * Return: the ring entry content (head of the descriptor chain)
+ */
+static inline uint16_t vring_avail_ring(const struct vu_virtq *vq, int i)
+{
+ return le16toh(vq->vring.avail->ring[i]);
+}
+
+/**
+ * virtq_used_event - Get location of used event indices
+ * (only with VIRTIO_F_EVENT_IDX)
+ * @vq Virtqueue
+ *
+ * Return: return the location of the used event index
+ */
+static inline uint16_t *virtq_used_event(const struct vu_virtq *vq)
+{
+ /* For backwards compat, used event index is at *end* of avail ring. */
+ return &vq->vring.avail->ring[vq->vring.num];
+}
+
+/**
+ * vring_get_used_event() - Get the used event from the available ring
+ * @vq Virtqueue
+ *
+ * Return: the used event (available only if VIRTIO_RING_F_EVENT_IDX is set)
+ * used_event is a performant alternative where the driver
+ * specifies how far the device can progress before a notification
+ * is required.
+ */
+static inline uint16_t vring_get_used_event(const struct vu_virtq *vq)
+{
+ return le16toh(*virtq_used_event(vq));
+}
+
+/**
+ * virtqueue_get_head() - Get the head of the descriptor chain for a given
+ * index
+ * @vq: Virtqueue
+ * @idx: Available ring entry index
+ * @head: Head of the descriptor chain
+ */
+static void virtqueue_get_head(const struct vu_virtq *vq,
+ unsigned int idx, unsigned int *head)
+{
+ /* Grab the next descriptor number they're advertising, and increment
+ * the index we've seen.
+ */
+ *head = vring_avail_ring(vq, idx % vq->vring.num);
+
+ /* If their number is silly, that's a fatal mistake. */
+ if (*head >= vq->vring.num)
+ die("vhost-user: Guest says index %u is available", *head);
+}
+
+/**
+ * virtqueue_read_indirect_desc() - Copy virtio ring descriptors from guest
+ * memory
+ * @dev: Vhost-user device
+ * @desc: Destination address to copy the descriptors to
+ * @addr: Guest memory address to copy from
+ * @len: Length of memory to copy
+ *
+ * Return: -1 if there is an error, 0 otherwise
+ */
+static int virtqueue_read_indirect_desc(struct vu_dev *dev, struct vring_desc *desc,
+ uint64_t addr, size_t len)
+{
+ uint64_t read_len;
+
+ if (len > (VIRTQUEUE_MAX_SIZE * sizeof(struct vring_desc)))
+ return -1;
+
+ if (len == 0)
+ return -1;
+
+ while (len) {
+ const struct vring_desc *orig_desc;
+
+ read_len = len;
+ orig_desc = vu_gpa_to_va(dev, &read_len, addr);
+ if (!orig_desc)
+ return -1;
+
+ memcpy(desc, orig_desc, read_len);
+ len -= read_len;
+ addr += read_len;
+ desc += read_len / sizeof(struct vring_desc);
+ }
+
+ return 0;
+}
+
+/**
+ * enum virtqueue_read_desc_state - State in the descriptor chain
+ * @VIRTQUEUE_READ_DESC_ERROR Found an invalid descriptor
+ * @VIRTQUEUE_READ_DESC_DONE No more descriptors in the chain
+ * @VIRTQUEUE_READ_DESC_MORE there are more descriptors in the chain
+ */
+enum virtqueue_read_desc_state {
+ VIRTQUEUE_READ_DESC_ERROR = -1,
+ VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
+ VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
+};
+
+/**
+ * virtqueue_read_next_desc() - Read the the next descriptor in the chain
+ * @desc: Virtio ring descriptors
+ * @i: Index of the current descriptor
+ * @max: Maximum value of the descriptor index
+ * @next: Index of the next descriptor in the chain (output value)
+ *
+ * Return: current chain descriptor state (error, next, done)
+ */
+static int virtqueue_read_next_desc(const struct vring_desc *desc,
+ int i, unsigned int max, unsigned int *next)
+{
+ /* If this descriptor says it doesn't chain, we're done. */
+ if (!(le16toh(desc[i].flags) & VRING_DESC_F_NEXT))
+ return VIRTQUEUE_READ_DESC_DONE;
+
+ /* Check they're not leading us off end of descriptors. */
+ *next = le16toh(desc[i].next);
+ /* Make sure compiler knows to grab that: we don't want it changing! */
+ smp_wmb();
+
+ if (*next >= max)
+ return VIRTQUEUE_READ_DESC_ERROR;
+
+ return VIRTQUEUE_READ_DESC_MORE;
+}
+
+/**
+ * vu_queue_empty() - Check if virtqueue is empty
+ * @vq: Virtqueue
+ *
+ * Return: true if the virtqueue is empty, false otherwise
+ */
+bool vu_queue_empty(struct vu_virtq *vq)
+{
+ if (vq->shadow_avail_idx != vq->last_avail_idx)
+ return false;
+
+ return vring_avail_idx(vq) == vq->last_avail_idx;
+}
+
+/**
+ * vring_can_notify() - Check if a notification can be sent
+ * @dev: Vhost-user device
+ * @vq: Virtqueue
+ *
+ * Return: true if notification can be sent
+ */
+static bool vring_can_notify(const struct vu_dev *dev, struct vu_virtq *vq)
+{
+ uint16_t old, new;
+ bool v;
+
+ /* We need to expose used array entries before checking used event. */
+ smp_mb();
+
+ /* Always notify when queue is empty (when feature acknowledge) */
+ if (vu_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
+ !vq->inuse && vu_queue_empty(vq))
+ return true;
+
+ if (!vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX))
+ return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
+
+ v = vq->signalled_used_valid;
+ vq->signalled_used_valid = true;
+ old = vq->signalled_used;
+ new = vq->signalled_used = vq->used_idx;
+ return !v || vring_need_event(vring_get_used_event(vq), new, old);
+}
+
+/**
+ * vu_queue_notify() - Send a notification to the given virtqueue
+ * @dev: Vhost-user device
+ * @vq: Virtqueue
+ */
+/* cppcheck-suppress unusedFunction */
+void vu_queue_notify(const struct vu_dev *dev, struct vu_virtq *vq)
+{
+ if (!vring_can_notify(dev, vq)) {
+ debug("vhost-user: virtqueue can skip notify...");
+ return;
+ }
+
+ if (eventfd_write(vq->call_fd, 1) < 0)
+ die_perror("Error writing vhost-user queue eventfd");
+}
+
+/* virtq_avail_event() - Get location of available event indices
+ * (only with VIRTIO_F_EVENT_IDX)
+ * @vq: Virtqueue
+ *
+ * Return: return the location of the available event index
+ */
+static inline uint16_t *virtq_avail_event(const struct vu_virtq *vq)
+{
+ /* For backwards compat, avail event index is at *end* of used ring. */
+ return (uint16_t *)&vq->vring.used->ring[vq->vring.num];
+}
+
+/**
+ * vring_set_avail_event() - Set avail_event
+ * @vq: Virtqueue
+ * @val: Value to set to avail_event
+ * avail_event is used in the same way the used_event is in the
+ * avail_ring.
+ * avail_event is used to advise the driver that notifications
+ * are unnecessary until the driver writes entry with an index
+ * specified by avail_event into the available ring.
+ */
+static inline void vring_set_avail_event(const struct vu_virtq *vq,
+ uint16_t val)
+{
+ uint16_t val_le = htole16(val);
+
+ if (!vq->notification)
+ return;
+
+ memcpy(virtq_avail_event(vq), &val_le, sizeof(val_le));
+}
+
+/**
+ * virtqueue_map_desc() - Translate descriptor ring physical address into our
+ * virtual address space
+ * @dev: Vhost-user device
+ * @p_num_sg: First iov entry to use (input),
+ * first iov entry not used (output)
+ * @iov: Iov array to use to store buffer virtual addresses
+ * @max_num_sg: Maximum number of iov entries
+ * @pa: Guest physical address of the buffer to map into our virtual
+ * address
+ * @sz: Size of the buffer
+ *
+ * Return: false on error, true otherwise
+ */
+static bool virtqueue_map_desc(struct vu_dev *dev,
+ unsigned int *p_num_sg, struct iovec *iov,
+ unsigned int max_num_sg,
+ uint64_t pa, size_t sz)
+{
+ unsigned int num_sg = *p_num_sg;
+
+ ASSERT(num_sg < max_num_sg);
+ ASSERT(sz);
+
+ while (sz) {
+ uint64_t len = sz;
+
+ iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa);
+ if (iov[num_sg].iov_base == NULL)
+ die("vhost-user: invalid address for buffers");
+ iov[num_sg].iov_len = len;
+ num_sg++;
+ sz -= len;
+ pa += len;
+ }
+
+ *p_num_sg = num_sg;
+ return true;
+}
+
+/**
+ * vu_queue_map_desc - Map the virtqueue descriptor ring into our virtual
+ * address space
+ * @dev: Vhost-user device
+ * @vq: Virtqueue
+ * @idx: First descriptor ring entry to map
+ * @elem: Virtqueue element to store descriptor ring iov
+ *
+ * Return: -1 if there is an error, 0 otherwise
+ */
+static int vu_queue_map_desc(struct vu_dev *dev, struct vu_virtq *vq, unsigned int idx,
+ struct vu_virtq_element *elem)
+{
+ const struct vring_desc *desc = vq->vring.desc;
+ struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
+ unsigned int out_num = 0, in_num = 0;
+ unsigned int max = vq->vring.num;
+ unsigned int i = idx;
+ uint64_t read_len;
+ int rc;
+
+ if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) {
+ unsigned int desc_len;
+ uint64_t desc_addr;
+
+ if (le32toh(desc[i].len) % sizeof(struct vring_desc))
+ die("vhost-user: Invalid size for indirect buffer table");
+
+ /* loop over the indirect descriptor table */
+ desc_addr = le64toh(desc[i].addr);
+ desc_len = le32toh(desc[i].len);
+ max = desc_len / sizeof(struct vring_desc);
+ read_len = desc_len;
+ desc = vu_gpa_to_va(dev, &read_len, desc_addr);
+ if (desc && read_len != desc_len) {
+ /* Failed to use zero copy */
+ desc = NULL;
+ if (!virtqueue_read_indirect_desc(dev, desc_buf, desc_addr, desc_len))
+ desc = desc_buf;
+ }
+ if (!desc)
+ die("vhost-user: Invalid indirect buffer table");
+ i = 0;
+ }
+
+ /* Collect all the descriptors */
+ do {
+ if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) {
+ if (!virtqueue_map_desc(dev, &in_num, elem->in_sg,
+ elem->in_num,
+ le64toh(desc[i].addr),
+ le32toh(desc[i].len)))
+ return -1;
+ } else {
+ if (in_num)
+ die("Incorrect order for descriptors");
+ if (!virtqueue_map_desc(dev, &out_num, elem->out_sg,
+ elem->out_num,
+ le64toh(desc[i].addr),
+ le32toh(desc[i].len))) {
+ return -1;
+ }
+ }
+
+ /* If we've got too many, that implies a descriptor loop. */
+ if ((in_num + out_num) > max)
+ die("vhost-user: Loop in queue descriptor list");
+ rc = virtqueue_read_next_desc(desc, i, max, &i);
+ } while (rc == VIRTQUEUE_READ_DESC_MORE);
+
+ if (rc == VIRTQUEUE_READ_DESC_ERROR)
+ die("vhost-user: Failed to read descriptor list");
+
+ elem->index = idx;
+ elem->in_num = in_num;
+ elem->out_num = out_num;
+
+ return 0;
+}
+
+/**
+ * vu_queue_pop() - Pop an entry from the virtqueue
+ * @dev: Vhost-user device
+ * @vq: Virtqueue
+ * @elem: Virtqueue element to file with the entry information
+ *
+ * Return: -1 if there is an error, 0 otherwise
+ */
+/* cppcheck-suppress unusedFunction */
+int vu_queue_pop(struct vu_dev *dev, struct vu_virtq *vq, struct vu_virtq_element *elem)
+{
+ unsigned int head;
+ int ret;
+
+ if (vu_queue_empty(vq))
+ return -1;
+
+ /* Needed after vu_queue_empty(), see comment in
+ * virtqueue_num_heads().
+ */
+ smp_rmb();
+
+ if (vq->inuse >= vq->vring.num)
+ die("vhost-user queue size exceeded");
+
+ virtqueue_get_head(vq, vq->last_avail_idx++, &head);
+
+ if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX))
+ vring_set_avail_event(vq, vq->last_avail_idx);
+
+ ret = vu_queue_map_desc(dev, vq, head, elem);
+
+ if (ret < 0)
+ return ret;
+
+ vq->inuse++;
+
+ return 0;
+}
+
+/**
+ * vu_queue_detach_element() - Detach an element from the virqueue
+ * @vq: Virtqueue
+ */
+void vu_queue_detach_element(struct vu_virtq *vq)
+{
+ vq->inuse--;
+ /* unmap, when DMA support is added */
+}
+
+/**
+ * vu_queue_unpop() - Push back the previously popped element from the virqueue
+ * @vq: Virtqueue
+ */
+/* cppcheck-suppress unusedFunction */
+void vu_queue_unpop(struct vu_virtq *vq)
+{
+ vq->last_avail_idx--;
+ vu_queue_detach_element(vq);
+}
+
+/**
+ * vu_queue_rewind() - Push back a given number of popped elements
+ * @vq: Virtqueue
+ * @num: Number of element to unpop
+ */
+/* cppcheck-suppress unusedFunction */
+bool vu_queue_rewind(struct vu_virtq *vq, unsigned int num)
+{
+ if (num > vq->inuse)
+ return false;
+
+ vq->last_avail_idx -= num;
+ vq->inuse -= num;
+ return true;
+}
+
+/**
+ * vring_used_write() - Write an entry in the used ring
+ * @vq: Virtqueue
+ * @uelem: Entry to write
+ * @i: Index of the entry in the used ring
+ */
+static inline void vring_used_write(struct vu_virtq *vq,
+ const struct vring_used_elem *uelem, int i)
+{
+ struct vring_used *used = vq->vring.used;
+
+ used->ring[i] = *uelem;
+}
+
+/**
+ * vu_queue_fill_by_index() - Update information of a descriptor ring entry
+ * in the used ring
+ * @vq: Virtqueue
+ * @index: Descriptor ring index
+ * @len: Size of the element
+ * @idx: Used ring entry index
+ */
+void vu_queue_fill_by_index(struct vu_virtq *vq, unsigned int index,
+ unsigned int len, unsigned int idx)
+{
+ struct vring_used_elem uelem;
+
+ idx = (idx + vq->used_idx) % vq->vring.num;
+
+ uelem.id = htole32(index);
+ uelem.len = htole32(len);
+ vring_used_write(vq, &uelem, idx);
+}
+
+/**
+ * vu_queue_fill() - Update information of a given element in the used ring
+ * @dev: Vhost-user device
+ * @vq: Virtqueue
+ * @elem: Element information to fill
+ * @len: Size of the element
+ * @idx: Used ring entry index
+ */
+/* cppcheck-suppress unusedFunction */
+void vu_queue_fill(struct vu_virtq *vq, const struct vu_virtq_element *elem,
+ unsigned int len, unsigned int idx)
+{
+ vu_queue_fill_by_index(vq, elem->index, len, idx);
+}
+
+/**
+ * vring_used_idx_set() - Set the descriptor ring current index
+ * @vq: Virtqueue
+ * @val: Value to set in the index
+ */
+static inline void vring_used_idx_set(struct vu_virtq *vq, uint16_t val)
+{
+ vq->vring.used->idx = htole16(val);
+
+ vq->used_idx = val;
+}
+
+/**
+ * vu_queue_flush() - Flush the virtqueue
+ * @vq: Virtqueue
+ * @count: Number of entry to flush
+ */
+/* cppcheck-suppress unusedFunction */
+void vu_queue_flush(struct vu_virtq *vq, unsigned int count)
+{
+ uint16_t old, new;
+
+ /* Make sure buffer is written before we update index. */
+ smp_wmb();
+
+ old = vq->used_idx;
+ new = old + count;
+ vring_used_idx_set(vq, new);
+ vq->inuse -= count;
+ if ((uint16_t)(new - vq->signalled_used) < (uint16_t)(new - old))
+ vq->signalled_used_valid = false;
+}
diff --git a/virtio.h b/virtio.h
new file mode 100644
index 000000000000..94efeb049fbc
--- /dev/null
+++ b/virtio.h
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * virtio API, vring and virtqueue functions definition
+ *
+ * Copyright Red Hat
+ * Author: Laurent Vivier <lvivier@redhat.com>
+ */
+
+#ifndef VIRTIO_H
+#define VIRTIO_H
+
+#include <stdbool.h>
+#include <linux/vhost_types.h>
+
+/* Maximum size of a virtqueue */
+#define VIRTQUEUE_MAX_SIZE 1024
+
+/**
+ * struct vu_ring - Virtqueue rings
+ * @num: Size of the queue
+ * @desc: Descriptor ring
+ * @avail: Available ring
+ * @used: Used ring
+ * @log_guest_addr: Guest address for logging
+ * @flags: Vring flags
+ * VHOST_VRING_F_LOG is set if log address is valid
+ */
+struct vu_ring {
+ unsigned int num;
+ struct vring_desc *desc;
+ struct vring_avail *avail;
+ struct vring_used *used;
+ uint64_t log_guest_addr;
+ uint32_t flags;
+};
+
+/**
+ * struct vu_virtq - Virtqueue definition
+ * @vring: Virtqueue rings
+ * @last_avail_idx: Next head to pop
+ * @shadow_avail_idx: Last avail_idx read from VQ.
+ * @used_idx: Descriptor ring current index
+ * @signalled_used: Last used index value we have signalled on
+ * @signalled_used_valid: True if signalled_used if valid
+ * @notification: True if the queues notify (via event
+ * index or interrupt)
+ * @inuse: Number of entries in use
+ * @call_fd: The event file descriptor to signal when
+ * buffers are used.
+ * @kick_fd: The event file descriptor for adding
+ * buffers to the vring
+ * @err_fd: The event file descriptor to signal when
+ * error occurs
+ * @enable: True if the virtqueue is enabled
+ * @started: True if the virtqueue is started
+ * @vra: QEMU address of our rings
+ */
+struct vu_virtq {
+ struct vu_ring vring;
+ uint16_t last_avail_idx;
+ uint16_t shadow_avail_idx;
+ uint16_t used_idx;
+ uint16_t signalled_used;
+ bool signalled_used_valid;
+ bool notification;
+ unsigned int inuse;
+ int call_fd;
+ int kick_fd;
+ int err_fd;
+ unsigned int enable;
+ bool started;
+ struct vhost_vring_addr vra;
+};
+
+/**
+ * struct vu_dev_region - guest shared memory region
+ * @gpa: Guest physical address of the region
+ * @size: Memory size in bytes
+ * @qva: QEMU virtual address
+ * @mmap_offset: Offset where the region starts in the mapped memory
+ * @mmap_addr: Address of the mapped memory
+ */
+struct vu_dev_region {
+ uint64_t gpa;
+ uint64_t size;
+ uint64_t qva;
+ uint64_t mmap_offset;
+ uint64_t mmap_addr;
+};
+
+#define VHOST_USER_MAX_QUEUES 2
+
+/*
+ * Set a reasonable maximum number of ram slots, which will be supported by
+ * any architecture.
+ */
+#define VHOST_USER_MAX_RAM_SLOTS 32
+
+/**
+ * struct vu_dev - vhost-user device information
+ * @context: Execution context
+ * @nregions: Number of shared memory regions
+ * @regions: Guest shared memory regions
+ * @features: Vhost-user features
+ * @protocol_features: Vhost-user protocol features
+ */
+struct vu_dev {
+ uint32_t nregions;
+ struct vu_dev_region regions[VHOST_USER_MAX_RAM_SLOTS];
+ struct vu_virtq vq[VHOST_USER_MAX_QUEUES];
+ uint64_t features;
+ uint64_t protocol_features;
+};
+
+/**
+ * struct vu_virtq_element - virtqueue element
+ * @index: Descriptor ring index
+ * @out_num: Number of outgoing iovec buffers
+ * @in_num: Number of incoming iovec buffers
+ * @in_sg: Incoming iovec buffers
+ * @out_sg: Outgoing iovec buffers
+ */
+struct vu_virtq_element {
+ unsigned int index;
+ unsigned int out_num;
+ unsigned int in_num;
+ struct iovec *in_sg;
+ struct iovec *out_sg;
+};
+
+/**
+ * has_feature() - Check a feature bit in a features set
+ * @features: Features set
+ * @fb: Feature bit to check
+ *
+ * Return: True if the feature bit is set
+ */
+static inline bool has_feature(uint64_t features, unsigned int fbit)
+{
+ return !!(features & (1ULL << fbit));
+}
+
+/**
+ * vu_has_feature() - Check if a virtio-net feature is available
+ * @vdev: Vhost-user device
+ * @bit: Feature to check
+ *
+ * Return: True if the feature is available
+ */
+static inline bool vu_has_feature(const struct vu_dev *vdev,
+ unsigned int fbit)
+{
+ return has_feature(vdev->features, fbit);
+}
+
+/**
+ * vu_has_protocol_feature() - Check if a vhost-user feature is available
+ * @vdev: Vhost-user device
+ * @bit: Feature to check
+ *
+ * Return: True if the feature is available
+ */
+/* cppcheck-suppress unusedFunction */
+static inline bool vu_has_protocol_feature(const struct vu_dev *vdev,
+ unsigned int fbit)
+{
+ return has_feature(vdev->protocol_features, fbit);
+}
+
+bool vu_queue_empty(struct vu_virtq *vq);
+void vu_queue_notify(const struct vu_dev *dev, struct vu_virtq *vq);
+int vu_queue_pop(struct vu_dev *dev, struct vu_virtq *vq,
+ struct vu_virtq_element *elem);
+void vu_queue_detach_element(struct vu_virtq *vq);
+void vu_queue_unpop(struct vu_virtq *vq);
+bool vu_queue_rewind(struct vu_virtq *vq, unsigned int num);
+void vu_queue_fill_by_index(struct vu_virtq *vq, unsigned int index,
+ unsigned int len, unsigned int idx);
+void vu_queue_fill(struct vu_virtq *vq,
+ const struct vu_virtq_element *elem, unsigned int len,
+ unsigned int idx);
+void vu_queue_flush(struct vu_virtq *vq, unsigned int count);
+#endif /* VIRTIO_H */
--
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * virtio API, vring and virtqueue functions definition
+ *
+ * Copyright Red Hat
+ * Author: Laurent Vivier <lvivier@redhat.com>
+ */
+
+#ifndef VIRTIO_H
+#define VIRTIO_H
+
+#include <stdbool.h>
+#include <linux/vhost_types.h>
+
+/* Maximum size of a virtqueue */
+#define VIRTQUEUE_MAX_SIZE 1024
+
+/**
+ * struct vu_ring - Virtqueue rings
+ * @num: Size of the queue
+ * @desc: Descriptor ring
+ * @avail: Available ring
+ * @used: Used ring
+ * @log_guest_addr: Guest address for logging
+ * @flags: Vring flags
+ * VHOST_VRING_F_LOG is set if log address is valid
+ */
+struct vu_ring {
+ unsigned int num;
+ struct vring_desc *desc;
+ struct vring_avail *avail;
+ struct vring_used *used;
+ uint64_t log_guest_addr;
+ uint32_t flags;
+};
+
+/**
+ * struct vu_virtq - Virtqueue definition
+ * @vring: Virtqueue rings
+ * @last_avail_idx: Next head to pop
+ * @shadow_avail_idx: Last avail_idx read from VQ.
+ * @used_idx: Descriptor ring current index
+ * @signalled_used: Last used index value we have signalled on
+ * @signalled_used_valid: True if signalled_used if valid
+ * @notification: True if the queues notify (via event
+ * index or interrupt)
+ * @inuse: Number of entries in use
+ * @call_fd: The event file descriptor to signal when
+ * buffers are used.
+ * @kick_fd: The event file descriptor for adding
+ * buffers to the vring
+ * @err_fd: The event file descriptor to signal when
+ * error occurs
+ * @enable: True if the virtqueue is enabled
+ * @started: True if the virtqueue is started
+ * @vra: QEMU address of our rings
+ */
+struct vu_virtq {
+ struct vu_ring vring;
+ uint16_t last_avail_idx;
+ uint16_t shadow_avail_idx;
+ uint16_t used_idx;
+ uint16_t signalled_used;
+ bool signalled_used_valid;
+ bool notification;
+ unsigned int inuse;
+ int call_fd;
+ int kick_fd;
+ int err_fd;
+ unsigned int enable;
+ bool started;
+ struct vhost_vring_addr vra;
+};
+
+/**
+ * struct vu_dev_region - guest shared memory region
+ * @gpa: Guest physical address of the region
+ * @size: Memory size in bytes
+ * @qva: QEMU virtual address
+ * @mmap_offset: Offset where the region starts in the mapped memory
+ * @mmap_addr: Address of the mapped memory
+ */
+struct vu_dev_region {
+ uint64_t gpa;
+ uint64_t size;
+ uint64_t qva;
+ uint64_t mmap_offset;
+ uint64_t mmap_addr;
+};
+
+#define VHOST_USER_MAX_QUEUES 2
+
+/*
+ * Set a reasonable maximum number of ram slots, which will be supported by
+ * any architecture.
+ */
+#define VHOST_USER_MAX_RAM_SLOTS 32
+
+/**
+ * struct vu_dev - vhost-user device information
+ * @context: Execution context
+ * @nregions: Number of shared memory regions
+ * @regions: Guest shared memory regions
+ * @features: Vhost-user features
+ * @protocol_features: Vhost-user protocol features
+ */
+struct vu_dev {
+ uint32_t nregions;
+ struct vu_dev_region regions[VHOST_USER_MAX_RAM_SLOTS];
+ struct vu_virtq vq[VHOST_USER_MAX_QUEUES];
+ uint64_t features;
+ uint64_t protocol_features;
+};
+
+/**
+ * struct vu_virtq_element - virtqueue element
+ * @index: Descriptor ring index
+ * @out_num: Number of outgoing iovec buffers
+ * @in_num: Number of incoming iovec buffers
+ * @in_sg: Incoming iovec buffers
+ * @out_sg: Outgoing iovec buffers
+ */
+struct vu_virtq_element {
+ unsigned int index;
+ unsigned int out_num;
+ unsigned int in_num;
+ struct iovec *in_sg;
+ struct iovec *out_sg;
+};
+
+/**
+ * has_feature() - Check a feature bit in a features set
+ * @features: Features set
+ * @fb: Feature bit to check
+ *
+ * Return: True if the feature bit is set
+ */
+static inline bool has_feature(uint64_t features, unsigned int fbit)
+{
+ return !!(features & (1ULL << fbit));
+}
+
+/**
+ * vu_has_feature() - Check if a virtio-net feature is available
+ * @vdev: Vhost-user device
+ * @bit: Feature to check
+ *
+ * Return: True if the feature is available
+ */
+static inline bool vu_has_feature(const struct vu_dev *vdev,
+ unsigned int fbit)
+{
+ return has_feature(vdev->features, fbit);
+}
+
+/**
+ * vu_has_protocol_feature() - Check if a vhost-user feature is available
+ * @vdev: Vhost-user device
+ * @bit: Feature to check
+ *
+ * Return: True if the feature is available
+ */
+/* cppcheck-suppress unusedFunction */
+static inline bool vu_has_protocol_feature(const struct vu_dev *vdev,
+ unsigned int fbit)
+{
+ return has_feature(vdev->protocol_features, fbit);
+}
+
+bool vu_queue_empty(struct vu_virtq *vq);
+void vu_queue_notify(const struct vu_dev *dev, struct vu_virtq *vq);
+int vu_queue_pop(struct vu_dev *dev, struct vu_virtq *vq,
+ struct vu_virtq_element *elem);
+void vu_queue_detach_element(struct vu_virtq *vq);
+void vu_queue_unpop(struct vu_virtq *vq);
+bool vu_queue_rewind(struct vu_virtq *vq, unsigned int num);
+void vu_queue_fill_by_index(struct vu_virtq *vq, unsigned int index,
+ unsigned int len, unsigned int idx);
+void vu_queue_fill(struct vu_virtq *vq,
+ const struct vu_virtq_element *elem, unsigned int len,
+ unsigned int idx);
+void vu_queue_flush(struct vu_virtq *vq, unsigned int count);
+#endif /* VIRTIO_H */
--
2.47.0
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH v14 3/9] vhost-user: introduce vhost-user API
2024-11-22 16:43 [PATCH v14 0/9] Add vhost-user support to passt. (part 3) Laurent Vivier
2024-11-22 16:43 ` [PATCH v14 1/9] packet: replace struct desc by struct iovec Laurent Vivier
2024-11-22 16:43 ` [PATCH v14 2/9] vhost-user: introduce virtio API Laurent Vivier
@ 2024-11-22 16:43 ` Laurent Vivier
2024-11-22 16:43 ` [PATCH v14 4/9] udp: Prepare udp.c to be shared with vhost-user Laurent Vivier
` (6 subsequent siblings)
9 siblings, 0 replies; 26+ messages in thread
From: Laurent Vivier @ 2024-11-22 16:43 UTC (permalink / raw)
To: passt-dev; +Cc: Laurent Vivier
Add vhost_user.c and vhost_user.h that define the functions needed
to implement vhost-user backend.
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
---
Makefile | 4 +-
vhost_user.c | 970 +++++++++++++++++++++++++++++++++++++++++++++++++++
vhost_user.h | 208 +++++++++++
virtio.h | 1 +
4 files changed, 1181 insertions(+), 2 deletions(-)
create mode 100644 vhost_user.c
create mode 100644 vhost_user.h
diff --git a/Makefile b/Makefile
index 9b61a47e50fc..bcb084e66e4d 100644
--- a/Makefile
+++ b/Makefile
@@ -37,7 +37,7 @@ FLAGS += -DDUAL_STACK_SOCKETS=$(DUAL_STACK_SOCKETS)
PASST_SRCS = arch.c arp.c checksum.c conf.c dhcp.c dhcpv6.c flow.c fwd.c \
icmp.c igmp.c inany.c iov.c ip.c isolation.c lineread.c log.c mld.c \
ndp.c netlink.c packet.c passt.c pasta.c pcap.c pif.c tap.c tcp.c \
- tcp_buf.c tcp_splice.c udp.c udp_flow.c util.c virtio.c
+ tcp_buf.c tcp_splice.c udp.c udp_flow.c util.c vhost_user.c virtio.c
QRAP_SRCS = qrap.c
SRCS = $(PASST_SRCS) $(QRAP_SRCS)
@@ -47,7 +47,7 @@ PASST_HEADERS = arch.h arp.h checksum.h conf.h dhcp.h dhcpv6.h flow.h fwd.h \
flow_table.h icmp.h icmp_flow.h inany.h iov.h ip.h isolation.h \
lineread.h log.h ndp.h netlink.h packet.h passt.h pasta.h pcap.h pif.h \
siphash.h tap.h tcp.h tcp_buf.h tcp_conn.h tcp_internal.h tcp_splice.h \
- udp.h udp_flow.h util.h virtio.h
+ udp.h udp_flow.h util.h vhost_user.h virtio.h
HEADERS = $(PASST_HEADERS) seccomp.h
C := \#include <sys/random.h>\nint main(){int a=getrandom(0, 0, 0);}
diff --git a/vhost_user.c b/vhost_user.c
new file mode 100644
index 000000000000..89627a227ff1
--- /dev/null
+++ b/vhost_user.c
@@ -0,0 +1,970 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * vhost-user API, command management and virtio interface
+ *
+ * Copyright Red Hat
+ * Author: Laurent Vivier <lvivier@redhat.com>
+ *
+ * Some parts from QEMU subprojects/libvhost-user/libvhost-user.c
+ * licensed under the following terms:
+ *
+ * Copyright IBM, Corp. 2007
+ * Copyright (c) 2016 Red Hat, Inc.
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ * Marc-André Lureau <mlureau@redhat.com>
+ * Victor Kaplansky <victork@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <string.h>
+#include <assert.h>
+#include <stdbool.h>
+#include <inttypes.h>
+#include <time.h>
+#include <net/ethernet.h>
+#include <netinet/in.h>
+#include <sys/epoll.h>
+#include <sys/eventfd.h>
+#include <sys/mman.h>
+#include <linux/vhost_types.h>
+#include <linux/virtio_net.h>
+
+#include "util.h"
+#include "passt.h"
+#include "tap.h"
+#include "vhost_user.h"
+#include "pcap.h"
+
+/* vhost-user version we are compatible with */
+#define VHOST_USER_VERSION 1
+
+/**
+ * vu_print_capabilities() - print vhost-user capabilities
+ * this is part of the vhost-user backend
+ * convention.
+ */
+/* cppcheck-suppress unusedFunction */
+void vu_print_capabilities(void)
+{
+ info("{");
+ info(" \"type\": \"net\"");
+ info("}");
+ exit(EXIT_SUCCESS);
+}
+
+/**
+ * vu_request_to_string() - convert a vhost-user request number to its name
+ * @req: request number
+ *
+ * Return: the name of request number
+ */
+static const char *vu_request_to_string(unsigned int req)
+{
+ if (req < VHOST_USER_MAX) {
+#define REQ(req) [req] = #req
+ static const char * const vu_request_str[VHOST_USER_MAX] = {
+ REQ(VHOST_USER_NONE),
+ REQ(VHOST_USER_GET_FEATURES),
+ REQ(VHOST_USER_SET_FEATURES),
+ REQ(VHOST_USER_SET_OWNER),
+ REQ(VHOST_USER_RESET_OWNER),
+ REQ(VHOST_USER_SET_MEM_TABLE),
+ REQ(VHOST_USER_SET_LOG_BASE),
+ REQ(VHOST_USER_SET_LOG_FD),
+ REQ(VHOST_USER_SET_VRING_NUM),
+ REQ(VHOST_USER_SET_VRING_ADDR),
+ REQ(VHOST_USER_SET_VRING_BASE),
+ REQ(VHOST_USER_GET_VRING_BASE),
+ REQ(VHOST_USER_SET_VRING_KICK),
+ REQ(VHOST_USER_SET_VRING_CALL),
+ REQ(VHOST_USER_SET_VRING_ERR),
+ REQ(VHOST_USER_GET_PROTOCOL_FEATURES),
+ REQ(VHOST_USER_SET_PROTOCOL_FEATURES),
+ REQ(VHOST_USER_GET_QUEUE_NUM),
+ REQ(VHOST_USER_SET_VRING_ENABLE),
+ REQ(VHOST_USER_SEND_RARP),
+ REQ(VHOST_USER_NET_SET_MTU),
+ REQ(VHOST_USER_SET_BACKEND_REQ_FD),
+ REQ(VHOST_USER_IOTLB_MSG),
+ REQ(VHOST_USER_SET_VRING_ENDIAN),
+ REQ(VHOST_USER_GET_CONFIG),
+ REQ(VHOST_USER_SET_CONFIG),
+ REQ(VHOST_USER_POSTCOPY_ADVISE),
+ REQ(VHOST_USER_POSTCOPY_LISTEN),
+ REQ(VHOST_USER_POSTCOPY_END),
+ REQ(VHOST_USER_GET_INFLIGHT_FD),
+ REQ(VHOST_USER_SET_INFLIGHT_FD),
+ REQ(VHOST_USER_GPU_SET_SOCKET),
+ REQ(VHOST_USER_VRING_KICK),
+ REQ(VHOST_USER_GET_MAX_MEM_SLOTS),
+ REQ(VHOST_USER_ADD_MEM_REG),
+ REQ(VHOST_USER_REM_MEM_REG),
+ };
+#undef REQ
+ return vu_request_str[req];
+ }
+
+ return "unknown";
+}
+
+/**
+ * qva_to_va() - Translate front-end (QEMU) virtual address to our virtual
+ * address
+ * @dev: vhost-user device
+ * @qemu_addr: front-end userspace address
+ *
+ * Return: the memory address in our process virtual address space.
+ */
+static void *qva_to_va(struct vu_dev *dev, uint64_t qemu_addr)
+{
+ unsigned int i;
+
+ /* Find matching memory region. */
+ for (i = 0; i < dev->nregions; i++) {
+ const struct vu_dev_region *r = &dev->regions[i];
+
+ if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) {
+ /* NOLINTNEXTLINE(performance-no-int-to-ptr) */
+ return (void *)(qemu_addr - r->qva + r->mmap_addr +
+ r->mmap_offset);
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * vmsg_close_fds() - Close all file descriptors of a given message
+ * @vmsg: vhost-user message with the list of the file descriptors
+ */
+static void vmsg_close_fds(const struct vhost_user_msg *vmsg)
+{
+ int i;
+
+ for (i = 0; i < vmsg->fd_num; i++)
+ close(vmsg->fds[i]);
+}
+
+/**
+ * vu_remove_watch() - Remove a file descriptor from our passt epoll
+ * file descriptor
+ * @vdev: vhost-user device
+ * @fd: file descriptor to remove
+ */
+static void vu_remove_watch(const struct vu_dev *vdev, int fd)
+{
+ /* Placeholder to add passt related code */
+ (void)vdev;
+ (void)fd;
+}
+
+/**
+ * vmsg_set_reply_u64() - Set reply payload.u64 and clear request flags
+ * and fd_num
+ * @vmsg: vhost-user message
+ * @val: 64-bit value to reply
+ */
+static void vmsg_set_reply_u64(struct vhost_user_msg *vmsg, uint64_t val)
+{
+ vmsg->hdr.flags = 0; /* defaults will be set by vu_send_reply() */
+ vmsg->hdr.size = sizeof(vmsg->payload.u64);
+ vmsg->payload.u64 = val;
+ vmsg->fd_num = 0;
+}
+
+/**
+ * vu_message_read_default() - Read incoming vhost-user message from the
+ * front-end
+ * @conn_fd: vhost-user command socket
+ * @vmsg: vhost-user message
+ *
+ * Return: 0 if recvmsg() has been interrupted or if there's no data to read,
+ * 1 if a message has been received
+ */
+static int vu_message_read_default(int conn_fd, struct vhost_user_msg *vmsg)
+{
+ char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS *
+ sizeof(int))] = { 0 };
+ struct iovec iov = {
+ .iov_base = (char *)vmsg,
+ .iov_len = VHOST_USER_HDR_SIZE,
+ };
+ struct msghdr msg = {
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ .msg_control = control,
+ .msg_controllen = sizeof(control),
+ };
+ ssize_t ret, sz_payload;
+ struct cmsghdr *cmsg;
+
+ ret = recvmsg(conn_fd, &msg, MSG_DONTWAIT);
+ if (ret < 0) {
+ if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK)
+ return 0;
+ die_perror("vhost-user message receive (recvmsg)");
+ }
+
+ vmsg->fd_num = 0;
+ for (cmsg = CMSG_FIRSTHDR(&msg); cmsg != NULL;
+ cmsg = CMSG_NXTHDR(&msg, cmsg)) {
+ if (cmsg->cmsg_level == SOL_SOCKET &&
+ cmsg->cmsg_type == SCM_RIGHTS) {
+ size_t fd_size;
+
+ ASSERT(cmsg->cmsg_len >= CMSG_LEN(0));
+ fd_size = cmsg->cmsg_len - CMSG_LEN(0);
+ ASSERT(fd_size <= sizeof(vmsg->fds));
+ vmsg->fd_num = fd_size / sizeof(int);
+ memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size);
+ break;
+ }
+ }
+
+ sz_payload = vmsg->hdr.size;
+ if ((size_t)sz_payload > sizeof(vmsg->payload)) {
+ die("vhost-user message request too big: %d,"
+ " size: vmsg->size: %zd, "
+ "while sizeof(vmsg->payload) = %zu",
+ vmsg->hdr.request, sz_payload, sizeof(vmsg->payload));
+ }
+
+ if (sz_payload) {
+ do
+ ret = recv(conn_fd, &vmsg->payload, sz_payload, 0);
+ while (ret < 0 && errno == EINTR);
+
+ if (ret < 0)
+ die_perror("vhost-user message receive");
+
+ if (ret == 0)
+ die("EOF on vhost-user message receive");
+
+ if (ret < sz_payload)
+ die("Short-read on vhost-user message receive");
+ }
+
+ return 1;
+}
+
+/**
+ * vu_message_write() - Send a message to the front-end
+ * @conn_fd: vhost-user command socket
+ * @vmsg: vhost-user message
+ *
+ * #syscalls:vu sendmsg
+ */
+static void vu_message_write(int conn_fd, struct vhost_user_msg *vmsg)
+{
+ char control[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS * sizeof(int))] = { 0 };
+ struct iovec iov = {
+ .iov_base = (char *)vmsg,
+ .iov_len = VHOST_USER_HDR_SIZE + vmsg->hdr.size,
+ };
+ struct msghdr msg = {
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ .msg_control = control,
+ };
+ int rc;
+
+ ASSERT(vmsg->fd_num <= VHOST_MEMORY_BASELINE_NREGIONS);
+ if (vmsg->fd_num > 0) {
+ size_t fdsize = vmsg->fd_num * sizeof(int);
+ struct cmsghdr *cmsg;
+
+ msg.msg_controllen = CMSG_SPACE(fdsize);
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_len = CMSG_LEN(fdsize);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ memcpy(CMSG_DATA(cmsg), vmsg->fds, fdsize);
+ }
+
+ do
+ rc = sendmsg(conn_fd, &msg, 0);
+ while (rc < 0 && errno == EINTR);
+
+ if (rc < 0)
+ die_perror("vhost-user message send");
+
+ if ((uint32_t)rc < VHOST_USER_HDR_SIZE + vmsg->hdr.size)
+ die("EOF on vhost-user message send");
+}
+
+/**
+ * vu_send_reply() - Update message flags and send it to front-end
+ * @conn_fd: vhost-user command socket
+ * @vmsg: vhost-user message
+ */
+static void vu_send_reply(int conn_fd, struct vhost_user_msg *msg)
+{
+ msg->hdr.flags &= ~VHOST_USER_VERSION_MASK;
+ msg->hdr.flags |= VHOST_USER_VERSION;
+ msg->hdr.flags |= VHOST_USER_REPLY_MASK;
+
+ vu_message_write(conn_fd, msg);
+}
+
+/**
+ * vu_get_features_exec() - Provide back-end features bitmask to front-end
+ * @vdev: vhost-user device
+ * @vmsg: vhost-user message
+ *
+ * Return: True as a reply is requested
+ */
+static bool vu_get_features_exec(struct vu_dev *vdev,
+ struct vhost_user_msg *msg)
+{
+ uint64_t features =
+ 1ULL << VIRTIO_F_VERSION_1 |
+ 1ULL << VIRTIO_NET_F_MRG_RXBUF |
+ 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
+
+ (void)vdev;
+
+ vmsg_set_reply_u64(msg, features);
+
+ debug("Sending back to guest u64: 0x%016"PRIx64, msg->payload.u64);
+
+ return true;
+}
+
+/**
+ * vu_set_enable_all_rings() - Enable/disable all the virtqueues
+ * @vdev: vhost-user device
+ * @enable: New virtqueues state
+ */
+static void vu_set_enable_all_rings(struct vu_dev *vdev, bool enable)
+{
+ uint16_t i;
+
+ for (i = 0; i < VHOST_USER_MAX_QUEUES; i++)
+ vdev->vq[i].enable = enable;
+}
+
+/**
+ * vu_set_features_exec() - Enable features of the back-end
+ * @vdev: vhost-user device
+ * @vmsg: vhost-user message
+ *
+ * Return: False as no reply is requested
+ */
+static bool vu_set_features_exec(struct vu_dev *vdev,
+ struct vhost_user_msg *msg)
+{
+ debug("u64: 0x%016"PRIx64, msg->payload.u64);
+
+ vdev->features = msg->payload.u64;
+ /* We only support devices conforming to VIRTIO 1.0 or
+ * later
+ */
+ if (!vu_has_feature(vdev, VIRTIO_F_VERSION_1))
+ die("virtio legacy devices aren't supported by passt");
+
+ if (!vu_has_feature(vdev, VHOST_USER_F_PROTOCOL_FEATURES))
+ vu_set_enable_all_rings(vdev, true);
+
+ return false;
+}
+
+/**
+ * vu_set_owner_exec() - Session start flag, do nothing in our case
+ * @vdev: vhost-user device
+ * @vmsg: vhost-user message
+ *
+ * Return: False as no reply is requested
+ */
+static bool vu_set_owner_exec(struct vu_dev *vdev,
+ struct vhost_user_msg *msg)
+{
+ (void)vdev;
+ (void)msg;
+
+ return false;
+}
+
+/**
+ * map_ring() - Convert ring front-end (QEMU) addresses to our process
+ * virtual address space.
+ * @vdev: vhost-user device
+ * @vq: Virtqueue
+ *
+ * Return: True if ring cannot be mapped to our address space
+ */
+static bool map_ring(struct vu_dev *vdev, struct vu_virtq *vq)
+{
+ vq->vring.desc = qva_to_va(vdev, vq->vra.desc_user_addr);
+ vq->vring.used = qva_to_va(vdev, vq->vra.used_user_addr);
+ vq->vring.avail = qva_to_va(vdev, vq->vra.avail_user_addr);
+
+ debug("Setting virtq addresses:");
+ debug(" vring_desc at %p", (void *)vq->vring.desc);
+ debug(" vring_used at %p", (void *)vq->vring.used);
+ debug(" vring_avail at %p", (void *)vq->vring.avail);
+
+ return !(vq->vring.desc && vq->vring.used && vq->vring.avail);
+}
+
+/**
+ * vu_set_mem_table_exec() - Sets the memory map regions to be able to
+ * translate the vring addresses.
+ * @vdev: vhost-user device
+ * @vmsg: vhost-user message
+ *
+ * Return: False as no reply is requested
+ *
+ * #syscalls:vu mmap munmap
+ */
+static bool vu_set_mem_table_exec(struct vu_dev *vdev,
+ struct vhost_user_msg *msg)
+{
+ struct vhost_user_memory m = msg->payload.memory, *memory = &m;
+ unsigned int i;
+
+ for (i = 0; i < vdev->nregions; i++) {
+ const struct vu_dev_region *r = &vdev->regions[i];
+
+ if (r->mmap_addr) {
+ /* NOLINTNEXTLINE(performance-no-int-to-ptr) */
+ munmap((void *)r->mmap_addr, r->size + r->mmap_offset);
+ }
+ }
+ vdev->nregions = memory->nregions;
+
+ debug("vhost-user nregions: %u", memory->nregions);
+ for (i = 0; i < vdev->nregions; i++) {
+ struct vhost_user_memory_region *msg_region = &memory->regions[i];
+ struct vu_dev_region *dev_region = &vdev->regions[i];
+ void *mmap_addr;
+
+ debug("vhost-user region %d", i);
+ debug(" guest_phys_addr: 0x%016"PRIx64,
+ msg_region->guest_phys_addr);
+ debug(" memory_size: 0x%016"PRIx64,
+ msg_region->memory_size);
+ debug(" userspace_addr 0x%016"PRIx64,
+ msg_region->userspace_addr);
+ debug(" mmap_offset 0x%016"PRIx64,
+ msg_region->mmap_offset);
+
+ dev_region->gpa = msg_region->guest_phys_addr;
+ dev_region->size = msg_region->memory_size;
+ dev_region->qva = msg_region->userspace_addr;
+ dev_region->mmap_offset = msg_region->mmap_offset;
+
+ /* We don't use offset argument of mmap() since the
+ * mapped address has to be page aligned.
+ */
+ mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
+ PROT_READ | PROT_WRITE, MAP_SHARED |
+ MAP_NORESERVE, msg->fds[i], 0);
+
+ if (mmap_addr == MAP_FAILED)
+ die_perror("vhost-user region mmap error");
+
+ dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
+ debug(" mmap_addr: 0x%016"PRIx64,
+ dev_region->mmap_addr);
+
+ close(msg->fds[i]);
+ }
+
+ for (i = 0; i < VHOST_USER_MAX_QUEUES; i++) {
+ if (vdev->vq[i].vring.desc) {
+ if (map_ring(vdev, &vdev->vq[i]))
+ die("remapping queue %d during setmemtable", i);
+ }
+ }
+
+ return false;
+}
+
+/**
+ * vu_set_vring_num_exec() - Set the size of the queue (vring size)
+ * @vdev: vhost-user device
+ * @vmsg: vhost-user message
+ *
+ * Return: False as no reply is requested
+ */
+static bool vu_set_vring_num_exec(struct vu_dev *vdev,
+ struct vhost_user_msg *msg)
+{
+ unsigned int idx = msg->payload.state.index;
+ unsigned int num = msg->payload.state.num;
+
+ debug("State.index: %u", idx);
+ debug("State.num: %u", num);
+ vdev->vq[idx].vring.num = num;
+
+ return false;
+}
+
+/**
+ * vu_set_vring_addr_exec() - Set the addresses of the vring
+ * @vdev: vhost-user device
+ * @vmsg: vhost-user message
+ *
+ * Return: False as no reply is requested
+ */
+static bool vu_set_vring_addr_exec(struct vu_dev *vdev,
+ struct vhost_user_msg *msg)
+{
+ /* We need to copy the payload to vhost_vring_addr structure
+ * to access index because address of msg->payload.addr
+ * can be unaligned as it is packed.
+ */
+ struct vhost_vring_addr addr = msg->payload.addr;
+ struct vu_virtq *vq = &vdev->vq[addr.index];
+
+ debug("vhost_vring_addr:");
+ debug(" index: %d", addr.index);
+ debug(" flags: %d", addr.flags);
+ debug(" desc_user_addr: 0x%016" PRIx64,
+ (uint64_t)addr.desc_user_addr);
+ debug(" used_user_addr: 0x%016" PRIx64,
+ (uint64_t)addr.used_user_addr);
+ debug(" avail_user_addr: 0x%016" PRIx64,
+ (uint64_t)addr.avail_user_addr);
+ debug(" log_guest_addr: 0x%016" PRIx64,
+ (uint64_t)addr.log_guest_addr);
+
+ vq->vra = msg->payload.addr;
+ vq->vring.flags = addr.flags;
+ vq->vring.log_guest_addr = addr.log_guest_addr;
+
+ if (map_ring(vdev, vq))
+ die("Invalid vring_addr message");
+
+ vq->used_idx = le16toh(vq->vring.used->idx);
+
+ if (vq->last_avail_idx != vq->used_idx) {
+ debug("Last avail index != used index: %u != %u",
+ vq->last_avail_idx, vq->used_idx);
+ }
+
+ return false;
+}
+/**
+ * vu_set_vring_base_exec() - Sets the next index to use for descriptors
+ * in this vring
+ * @vdev: vhost-user device
+ * @vmsg: vhost-user message
+ *
+ * Return: False as no reply is requested
+ */
+static bool vu_set_vring_base_exec(struct vu_dev *vdev,
+ struct vhost_user_msg *msg)
+{
+ unsigned int idx = msg->payload.state.index;
+ unsigned int num = msg->payload.state.num;
+
+ debug("State.index: %u", idx);
+ debug("State.num: %u", num);
+ vdev->vq[idx].shadow_avail_idx = vdev->vq[idx].last_avail_idx = num;
+
+ return false;
+}
+
+/**
+ * vu_get_vring_base_exec() - Stops the vring and returns the current
+ * descriptor index or indices
+ * @vdev: vhost-user device
+ * @vmsg: vhost-user message
+ *
+ * Return: True as a reply is requested
+ */
+static bool vu_get_vring_base_exec(struct vu_dev *vdev,
+ struct vhost_user_msg *msg)
+{
+ unsigned int idx = msg->payload.state.index;
+
+ debug("State.index: %u", idx);
+ msg->payload.state.num = vdev->vq[idx].last_avail_idx;
+ msg->hdr.size = sizeof(msg->payload.state);
+
+ vdev->vq[idx].started = false;
+
+ if (vdev->vq[idx].call_fd != -1) {
+ close(vdev->vq[idx].call_fd);
+ vdev->vq[idx].call_fd = -1;
+ }
+ if (vdev->vq[idx].kick_fd != -1) {
+ vu_remove_watch(vdev, vdev->vq[idx].kick_fd);
+ close(vdev->vq[idx].kick_fd);
+ vdev->vq[idx].kick_fd = -1;
+ }
+
+ return true;
+}
+
+/**
+ * vu_set_watch() - Add a file descriptor to the passt epoll file descriptor
+ * @vdev: vhost-user device
+ * @idx: queue index of the file descriptor to add
+ */
+static void vu_set_watch(const struct vu_dev *vdev, int idx)
+{
+ /* Placeholder to add passt related code */
+ (void)vdev;
+ (void)idx;
+}
+
+/**
+ * vu_check_queue_msg_file() - Check if a message is valid,
+ * close fds if NOFD bit is set
+ * @vmsg: vhost-user message
+ */
+static void vu_check_queue_msg_file(struct vhost_user_msg *msg)
+{
+ bool nofd = msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK;
+ int idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
+
+ if (idx >= VHOST_USER_MAX_QUEUES)
+ die("Invalid vhost-user queue index: %u", idx);
+
+ if (nofd) {
+ vmsg_close_fds(msg);
+ return;
+ }
+
+ if (msg->fd_num != 1)
+ die("Invalid fds in vhost-user request: %d", msg->hdr.request);
+}
+
+/**
+ * vu_set_vring_kick_exec() - Set the event file descriptor for adding buffers
+ * to the vring
+ * @vdev: vhost-user device
+ * @vmsg: vhost-user message
+ *
+ * Return: False as no reply is requested
+ */
+static bool vu_set_vring_kick_exec(struct vu_dev *vdev,
+ struct vhost_user_msg *msg)
+{
+ bool nofd = msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK;
+ int idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
+
+ debug("u64: 0x%016"PRIx64, msg->payload.u64);
+
+ vu_check_queue_msg_file(msg);
+
+ if (vdev->vq[idx].kick_fd != -1) {
+ vu_remove_watch(vdev, vdev->vq[idx].kick_fd);
+ close(vdev->vq[idx].kick_fd);
+ vdev->vq[idx].kick_fd = -1;
+ }
+
+ if (!nofd)
+ vdev->vq[idx].kick_fd = msg->fds[0];
+
+ debug("Got kick_fd: %d for vq: %d", vdev->vq[idx].kick_fd, idx);
+
+ vdev->vq[idx].started = true;
+
+ if (vdev->vq[idx].kick_fd != -1 && VHOST_USER_IS_QUEUE_TX(idx)) {
+ vu_set_watch(vdev, idx);
+ debug("Waiting for kicks on fd: %d for vq: %d",
+ vdev->vq[idx].kick_fd, idx);
+ }
+
+ return false;
+}
+
+/**
+ * vu_set_vring_call_exec() - Set the event file descriptor to signal when
+ * buffers are used
+ * @vdev: vhost-user device
+ * @vmsg: vhost-user message
+ *
+ * Return: False as no reply is requested
+ */
+static bool vu_set_vring_call_exec(struct vu_dev *vdev,
+ struct vhost_user_msg *msg)
+{
+ bool nofd = msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK;
+ int idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
+
+ debug("u64: 0x%016"PRIx64, msg->payload.u64);
+
+ vu_check_queue_msg_file(msg);
+
+ if (vdev->vq[idx].call_fd != -1) {
+ close(vdev->vq[idx].call_fd);
+ vdev->vq[idx].call_fd = -1;
+ }
+
+ if (!nofd)
+ vdev->vq[idx].call_fd = msg->fds[0];
+
+ /* in case of I/O hang after reconnecting */
+ if (vdev->vq[idx].call_fd != -1)
+ eventfd_write(msg->fds[0], 1);
+
+ debug("Got call_fd: %d for vq: %d", vdev->vq[idx].call_fd, idx);
+
+ return false;
+}
+
+/**
+ * vu_set_vring_err_exec() - Set the event file descriptor to signal when
+ * error occurs
+ * @vdev: vhost-user device
+ * @vmsg: vhost-user message
+ *
+ * Return: False as no reply is requested
+ */
+static bool vu_set_vring_err_exec(struct vu_dev *vdev,
+ struct vhost_user_msg *msg)
+{
+ bool nofd = msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK;
+ int idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
+
+ debug("u64: 0x%016"PRIx64, msg->payload.u64);
+
+ vu_check_queue_msg_file(msg);
+
+ if (vdev->vq[idx].err_fd != -1) {
+ close(vdev->vq[idx].err_fd);
+ vdev->vq[idx].err_fd = -1;
+ }
+
+ if (!nofd)
+ vdev->vq[idx].err_fd = msg->fds[0];
+
+ return false;
+}
+
+/**
+ * vu_get_protocol_features_exec() - Provide the protocol (vhost-user) features
+ * to the front-end
+ * @vdev: vhost-user device
+ * @vmsg: vhost-user message
+ *
+ * Return: True as a reply is requested
+ */
+static bool vu_get_protocol_features_exec(struct vu_dev *vdev,
+ struct vhost_user_msg *msg)
+{
+ uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK;
+
+ (void)vdev;
+ vmsg_set_reply_u64(msg, features);
+
+ return true;
+}
+
+/**
+ * vu_set_protocol_features_exec() - Enable protocol (vhost-user) features
+ * @vdev: vhost-user device
+ * @vmsg: vhost-user message
+ *
+ * Return: False as no reply is requested
+ */
+static bool vu_set_protocol_features_exec(struct vu_dev *vdev,
+ struct vhost_user_msg *msg)
+{
+ uint64_t features = msg->payload.u64;
+
+ debug("u64: 0x%016"PRIx64, features);
+
+ vdev->protocol_features = msg->payload.u64;
+
+ return false;
+}
+
+/**
+ * vu_get_queue_num_exec() - Tell how many queues we support
+ * @vdev: vhost-user device
+ * @vmsg: vhost-user message
+ *
+ * Return: True as a reply is requested
+ */
+static bool vu_get_queue_num_exec(struct vu_dev *vdev,
+ struct vhost_user_msg *msg)
+{
+ (void)vdev;
+
+ vmsg_set_reply_u64(msg, VHOST_USER_MAX_QUEUES);
+
+ return true;
+}
+
+/**
+ * vu_set_vring_enable_exec() - Enable or disable corresponding vring
+ * @vdev: vhost-user device
+ * @vmsg: vhost-user message
+ *
+ * Return: False as no reply is requested
+ */
+static bool vu_set_vring_enable_exec(struct vu_dev *vdev,
+ struct vhost_user_msg *msg)
+{
+ unsigned int enable = msg->payload.state.num;
+ unsigned int idx = msg->payload.state.index;
+
+ debug("State.index: %u", idx);
+ debug("State.enable: %u", enable);
+
+ if (idx >= VHOST_USER_MAX_QUEUES)
+ die("Invalid vring_enable index: %u", idx);
+
+ vdev->vq[idx].enable = enable;
+ return false;
+}
+
+/**
+ * vu_init() - Initialize vhost-user device structure
+ * @c: execution context
+ * @vdev: vhost-user device
+ */
+/* cppcheck-suppress unusedFunction */
+void vu_init(struct ctx *c, struct vu_dev *vdev)
+{
+ int i;
+
+ vdev->context = c;
+ for (i = 0; i < VHOST_USER_MAX_QUEUES; i++) {
+ vdev->vq[i] = (struct vu_virtq){
+ .call_fd = -1,
+ .kick_fd = -1,
+ .err_fd = -1,
+ .notification = true,
+ };
+ }
+}
+
+/**
+ * vu_cleanup() - Reset vhost-user device
+ * @vdev: vhost-user device
+ */
+/* cppcheck-suppress unusedFunction */
+void vu_cleanup(struct vu_dev *vdev)
+{
+ unsigned int i;
+
+ for (i = 0; i < VHOST_USER_MAX_QUEUES; i++) {
+ struct vu_virtq *vq = &vdev->vq[i];
+
+ vq->started = false;
+ vq->notification = true;
+
+ if (vq->call_fd != -1) {
+ close(vq->call_fd);
+ vq->call_fd = -1;
+ }
+ if (vq->err_fd != -1) {
+ close(vq->err_fd);
+ vq->err_fd = -1;
+ }
+ if (vq->kick_fd != -1) {
+ vu_remove_watch(vdev, vq->kick_fd);
+ close(vq->kick_fd);
+ vq->kick_fd = -1;
+ }
+
+ vq->vring.desc = 0;
+ vq->vring.used = 0;
+ vq->vring.avail = 0;
+ }
+
+ for (i = 0; i < vdev->nregions; i++) {
+ const struct vu_dev_region *r = &vdev->regions[i];
+
+ if (r->mmap_addr) {
+ /* NOLINTNEXTLINE(performance-no-int-to-ptr) */
+ munmap((void *)r->mmap_addr, r->size + r->mmap_offset);
+ }
+ }
+ vdev->nregions = 0;
+}
+
+/**
+ * vu_sock_reset() - Reset connection socket
+ * @vdev: vhost-user device
+ */
+static void vu_sock_reset(struct vu_dev *vdev)
+{
+ /* Placeholder to add passt related code */
+ (void)vdev;
+}
+
+static bool (*vu_handle[VHOST_USER_MAX])(struct vu_dev *vdev,
+ struct vhost_user_msg *msg) = {
+ [VHOST_USER_GET_FEATURES] = vu_get_features_exec,
+ [VHOST_USER_SET_FEATURES] = vu_set_features_exec,
+ [VHOST_USER_GET_PROTOCOL_FEATURES] = vu_get_protocol_features_exec,
+ [VHOST_USER_SET_PROTOCOL_FEATURES] = vu_set_protocol_features_exec,
+ [VHOST_USER_GET_QUEUE_NUM] = vu_get_queue_num_exec,
+ [VHOST_USER_SET_OWNER] = vu_set_owner_exec,
+ [VHOST_USER_SET_MEM_TABLE] = vu_set_mem_table_exec,
+ [VHOST_USER_SET_VRING_NUM] = vu_set_vring_num_exec,
+ [VHOST_USER_SET_VRING_ADDR] = vu_set_vring_addr_exec,
+ [VHOST_USER_SET_VRING_BASE] = vu_set_vring_base_exec,
+ [VHOST_USER_GET_VRING_BASE] = vu_get_vring_base_exec,
+ [VHOST_USER_SET_VRING_KICK] = vu_set_vring_kick_exec,
+ [VHOST_USER_SET_VRING_CALL] = vu_set_vring_call_exec,
+ [VHOST_USER_SET_VRING_ERR] = vu_set_vring_err_exec,
+ [VHOST_USER_SET_VRING_ENABLE] = vu_set_vring_enable_exec,
+};
+
+/**
+ * vu_control_handler() - Handle control commands for vhost-user
+ * @vdev: vhost-user device
+ * @fd: vhost-user message socket
+ * @events: epoll events
+ */
+/* cppcheck-suppress unusedFunction */
+void vu_control_handler(struct vu_dev *vdev, int fd, uint32_t events)
+{
+ struct vhost_user_msg msg = { 0 };
+ bool need_reply, reply_requested;
+ int ret;
+
+ if (events & (EPOLLRDHUP | EPOLLHUP | EPOLLERR)) {
+ vu_sock_reset(vdev);
+ return;
+ }
+
+ ret = vu_message_read_default(fd, &msg);
+ if (ret == 0) {
+ vu_sock_reset(vdev);
+ return;
+ }
+ debug("================ Vhost user message ================");
+ debug("Request: %s (%d)", vu_request_to_string(msg.hdr.request),
+ msg.hdr.request);
+ debug("Flags: 0x%x", msg.hdr.flags);
+ debug("Size: %u", msg.hdr.size);
+
+ need_reply = msg.hdr.flags & VHOST_USER_NEED_REPLY_MASK;
+
+ if (msg.hdr.request >= 0 && msg.hdr.request < VHOST_USER_MAX &&
+ vu_handle[msg.hdr.request])
+ reply_requested = vu_handle[msg.hdr.request](vdev, &msg);
+ else
+ die("Unhandled request: %d", msg.hdr.request);
+
+ /* cppcheck-suppress legacyUninitvar */
+ if (!reply_requested && need_reply) {
+ msg.payload.u64 = 0;
+ msg.hdr.flags = 0;
+ msg.hdr.size = sizeof(msg.payload.u64);
+ msg.fd_num = 0;
+ reply_requested = true;
+ }
+
+ if (reply_requested)
+ vu_send_reply(fd, &msg);
+}
diff --git a/vhost_user.h b/vhost_user.h
new file mode 100644
index 000000000000..5af349ba58b8
--- /dev/null
+++ b/vhost_user.h
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * vhost-user API, command management and virtio interface
+ *
+ * Copyright Red Hat
+ * Author: Laurent Vivier <lvivier@redhat.com>
+ */
+
+/* some parts from subprojects/libvhost-user/libvhost-user.h */
+
+#ifndef VHOST_USER_H
+#define VHOST_USER_H
+
+#include "virtio.h"
+#include "iov.h"
+
+#define VHOST_USER_F_PROTOCOL_FEATURES 30
+
+#define VHOST_MEMORY_BASELINE_NREGIONS 8
+
+/**
+ * enum vhost_user_protocol_feature - List of available vhost-user features
+ */
+enum vhost_user_protocol_feature {
+ VHOST_USER_PROTOCOL_F_MQ = 0,
+ VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
+ VHOST_USER_PROTOCOL_F_RARP = 2,
+ VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
+ VHOST_USER_PROTOCOL_F_NET_MTU = 4,
+ VHOST_USER_PROTOCOL_F_BACKEND_REQ = 5,
+ VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
+ VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
+ VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
+ VHOST_USER_PROTOCOL_F_CONFIG = 9,
+ VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
+ VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
+ VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
+ VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
+ VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
+
+ VHOST_USER_PROTOCOL_F_MAX
+};
+
+/**
+ * enum vhost_user_request - List of available vhost-user requests
+ */
+enum vhost_user_request {
+ VHOST_USER_NONE = 0,
+ VHOST_USER_GET_FEATURES = 1,
+ VHOST_USER_SET_FEATURES = 2,
+ VHOST_USER_SET_OWNER = 3,
+ VHOST_USER_RESET_OWNER = 4,
+ VHOST_USER_SET_MEM_TABLE = 5,
+ VHOST_USER_SET_LOG_BASE = 6,
+ VHOST_USER_SET_LOG_FD = 7,
+ VHOST_USER_SET_VRING_NUM = 8,
+ VHOST_USER_SET_VRING_ADDR = 9,
+ VHOST_USER_SET_VRING_BASE = 10,
+ VHOST_USER_GET_VRING_BASE = 11,
+ VHOST_USER_SET_VRING_KICK = 12,
+ VHOST_USER_SET_VRING_CALL = 13,
+ VHOST_USER_SET_VRING_ERR = 14,
+ VHOST_USER_GET_PROTOCOL_FEATURES = 15,
+ VHOST_USER_SET_PROTOCOL_FEATURES = 16,
+ VHOST_USER_GET_QUEUE_NUM = 17,
+ VHOST_USER_SET_VRING_ENABLE = 18,
+ VHOST_USER_SEND_RARP = 19,
+ VHOST_USER_NET_SET_MTU = 20,
+ VHOST_USER_SET_BACKEND_REQ_FD = 21,
+ VHOST_USER_IOTLB_MSG = 22,
+ VHOST_USER_SET_VRING_ENDIAN = 23,
+ VHOST_USER_GET_CONFIG = 24,
+ VHOST_USER_SET_CONFIG = 25,
+ VHOST_USER_CREATE_CRYPTO_SESSION = 26,
+ VHOST_USER_CLOSE_CRYPTO_SESSION = 27,
+ VHOST_USER_POSTCOPY_ADVISE = 28,
+ VHOST_USER_POSTCOPY_LISTEN = 29,
+ VHOST_USER_POSTCOPY_END = 30,
+ VHOST_USER_GET_INFLIGHT_FD = 31,
+ VHOST_USER_SET_INFLIGHT_FD = 32,
+ VHOST_USER_GPU_SET_SOCKET = 33,
+ VHOST_USER_VRING_KICK = 35,
+ VHOST_USER_GET_MAX_MEM_SLOTS = 36,
+ VHOST_USER_ADD_MEM_REG = 37,
+ VHOST_USER_REM_MEM_REG = 38,
+ VHOST_USER_MAX
+};
+
+/**
+ * struct vhost_user_header - vhost-user message header
+ * @request: Request type of the message
+ * @flags: Request flags
+ * @size: The following payload size
+ */
+struct vhost_user_header {
+ enum vhost_user_request request;
+
+#define VHOST_USER_VERSION_MASK 0x3
+#define VHOST_USER_REPLY_MASK (0x1 << 2)
+#define VHOST_USER_NEED_REPLY_MASK (0x1 << 3)
+ uint32_t flags;
+ uint32_t size;
+} __attribute__ ((__packed__));
+
+/**
+ * struct vhost_user_memory_region - Front-end shared memory region information
+ * @guest_phys_addr: Guest physical address of the region
+ * @memory_size: Memory size
+ * @userspace_addr: front-end (QEMU) userspace address
+ * @mmap_offset: region offset in the shared memory area
+ */
+struct vhost_user_memory_region {
+ uint64_t guest_phys_addr;
+ uint64_t memory_size;
+ uint64_t userspace_addr;
+ uint64_t mmap_offset;
+};
+
+/**
+ * struct vhost_user_memory - List of all the shared memory regions
+ * @nregions: Number of memory regions
+ * @padding: Padding
+ * @regions: Memory regions list
+ */
+struct vhost_user_memory {
+ uint32_t nregions;
+ uint32_t padding;
+ struct vhost_user_memory_region regions[VHOST_MEMORY_BASELINE_NREGIONS];
+};
+
+/**
+ * union vhost_user_payload - vhost-user message payload
+ * @u64: 64-bit payload
+ * @state: vring state payload
+ * @addr: vring addresses payload
+ * vhost_user_memory: Memory regions information payload
+ */
+union vhost_user_payload {
+#define VHOST_USER_VRING_IDX_MASK 0xff
+#define VHOST_USER_VRING_NOFD_MASK (0x1 << 8)
+ uint64_t u64;
+ struct vhost_vring_state state;
+ struct vhost_vring_addr addr;
+ struct vhost_user_memory memory;
+};
+
+/**
+ * struct vhost_user_msg - vhost-use message
+ * @hdr: Message header
+ * @payload: Message payload
+ * @fds: File descriptors associated with the message
+ * in the ancillary data.
+ * (shared memory or event file descriptors)
+ * @fd_num: Number of file descriptors
+ */
+struct vhost_user_msg {
+ struct vhost_user_header hdr;
+ union vhost_user_payload payload;
+
+ int fds[VHOST_MEMORY_BASELINE_NREGIONS];
+ int fd_num;
+} __attribute__ ((__packed__));
+#define VHOST_USER_HDR_SIZE sizeof(struct vhost_user_header)
+
+/* index of the RX virtqueue */
+#define VHOST_USER_RX_QUEUE 0
+/* index of the TX virtqueue */
+#define VHOST_USER_TX_QUEUE 1
+
+/* in case of multiqueue, the RX and TX queues are interleaved */
+#define VHOST_USER_IS_QUEUE_TX(n) (n % 2)
+#define VHOST_USER_IS_QUEUE_RX(n) (!(n % 2))
+
+/* Default virtio-net header for passt */
+#define VU_HEADER ((struct virtio_net_hdr){ \
+ .flags = VIRTIO_NET_HDR_F_DATA_VALID, \
+ .gso_type = VIRTIO_NET_HDR_GSO_NONE, \
+})
+
+/**
+ * vu_queue_enabled - Return state of a virtqueue
+ * @vq: virtqueue to check
+ *
+ * Return: true if the virqueue is enabled, false otherwise
+ */
+/* cppcheck-suppress unusedFunction */
+static inline bool vu_queue_enabled(const struct vu_virtq *vq)
+{
+ return vq->enable;
+}
+
+/**
+ * vu_queue_started - Return state of a virtqueue
+ * @vq: virtqueue to check
+ *
+ * Return: true if the virqueue is started, false otherwise
+ */
+/* cppcheck-suppress unusedFunction */
+static inline bool vu_queue_started(const struct vu_virtq *vq)
+{
+ return vq->started;
+}
+
+void vu_print_capabilities(void);
+void vu_init(struct ctx *c, struct vu_dev *vdev);
+void vu_cleanup(struct vu_dev *vdev);
+void vu_control_handler(struct vu_dev *vdev, int fd, uint32_t events);
+#endif /* VHOST_USER_H */
diff --git a/virtio.h b/virtio.h
index 94efeb049fbc..6410d60f9b3f 100644
--- a/virtio.h
+++ b/virtio.h
@@ -105,6 +105,7 @@ struct vu_dev_region {
* @protocol_features: Vhost-user protocol features
*/
struct vu_dev {
+ struct ctx *context;
uint32_t nregions;
struct vu_dev_region regions[VHOST_USER_MAX_RAM_SLOTS];
struct vu_virtq vq[VHOST_USER_MAX_QUEUES];
--
@@ -105,6 +105,7 @@ struct vu_dev_region {
* @protocol_features: Vhost-user protocol features
*/
struct vu_dev {
+ struct ctx *context;
uint32_t nregions;
struct vu_dev_region regions[VHOST_USER_MAX_RAM_SLOTS];
struct vu_virtq vq[VHOST_USER_MAX_QUEUES];
--
2.47.0
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH v14 4/9] udp: Prepare udp.c to be shared with vhost-user
2024-11-22 16:43 [PATCH v14 0/9] Add vhost-user support to passt. (part 3) Laurent Vivier
` (2 preceding siblings ...)
2024-11-22 16:43 ` [PATCH v14 3/9] vhost-user: introduce vhost-user API Laurent Vivier
@ 2024-11-22 16:43 ` Laurent Vivier
2024-11-22 16:43 ` [PATCH v14 5/9] tcp: Export headers functions Laurent Vivier
` (5 subsequent siblings)
9 siblings, 0 replies; 26+ messages in thread
From: Laurent Vivier @ 2024-11-22 16:43 UTC (permalink / raw)
To: passt-dev; +Cc: Laurent Vivier, David Gibson
Export udp_payload_t, udp_update_hdr4(), udp_update_hdr6() and
udp_sock_errs().
Rename udp_listen_sock_handler() to udp_buf_listen_sock_handler() and
udp_reply_sock_handler to udp_buf_reply_sock_handler().
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
---
udp.c | 74 ++++++++++++++++++++++++++++++--------------------
udp_internal.h | 34 +++++++++++++++++++++++
2 files changed, 79 insertions(+), 29 deletions(-)
create mode 100644 udp_internal.h
diff --git a/udp.c b/udp.c
index 4be165f7971b..9718ed85e796 100644
--- a/udp.c
+++ b/udp.c
@@ -109,8 +109,7 @@
#include "pcap.h"
#include "log.h"
#include "flow_table.h"
-
-#define UDP_MAX_FRAMES 32 /* max # of frames to receive at once */
+#include "udp_internal.h"
/* "Spliced" sockets indexed by bound port (host order) */
static int udp_splice_ns [IP_VERSIONS][NUM_PORTS];
@@ -118,20 +117,8 @@ static int udp_splice_init[IP_VERSIONS][NUM_PORTS];
/* Static buffers */
-/**
- * struct udp_payload_t - UDP header and data for inbound messages
- * @uh: UDP header
- * @data: UDP data
- */
-static struct udp_payload_t {
- struct udphdr uh;
- char data[USHRT_MAX - sizeof(struct udphdr)];
-#ifdef __AVX2__
-} __attribute__ ((packed, aligned(32)))
-#else
-} __attribute__ ((packed, aligned(__alignof__(unsigned int))))
-#endif
-udp_payload[UDP_MAX_FRAMES];
+/* UDP header and data for inbound messages */
+static struct udp_payload_t udp_payload[UDP_MAX_FRAMES];
/* Ethernet header for IPv4 frames */
static struct ethhdr udp4_eth_hdr;
@@ -302,9 +289,9 @@ static void udp_splice_send(const struct ctx *c, size_t start, size_t n,
*
* Return: size of IPv4 payload (UDP header + data)
*/
-static size_t udp_update_hdr4(struct iphdr *ip4h, struct udp_payload_t *bp,
- const struct flowside *toside, size_t dlen,
- bool no_udp_csum)
+size_t udp_update_hdr4(struct iphdr *ip4h, struct udp_payload_t *bp,
+ const struct flowside *toside, size_t dlen,
+ bool no_udp_csum)
{
const struct in_addr *src = inany_v4(&toside->oaddr);
const struct in_addr *dst = inany_v4(&toside->eaddr);
@@ -345,9 +332,9 @@ static size_t udp_update_hdr4(struct iphdr *ip4h, struct udp_payload_t *bp,
*
* Return: size of IPv6 payload (UDP header + data)
*/
-static size_t udp_update_hdr6(struct ipv6hdr *ip6h, struct udp_payload_t *bp,
- const struct flowside *toside, size_t dlen,
- bool no_udp_csum)
+size_t udp_update_hdr6(struct ipv6hdr *ip6h, struct udp_payload_t *bp,
+ const struct flowside *toside, size_t dlen,
+ bool no_udp_csum)
{
uint16_t l4len = dlen + sizeof(bp->uh);
@@ -477,7 +464,7 @@ static int udp_sock_recverr(int s)
*
* Return: Number of errors handled, or < 0 if we have an unrecoverable error
*/
-static int udp_sock_errs(const struct ctx *c, int s, uint32_t events)
+int udp_sock_errs(const struct ctx *c, int s, uint32_t events)
{
unsigned n_err = 0;
socklen_t errlen;
@@ -554,7 +541,7 @@ static int udp_sock_recv(const struct ctx *c, int s, uint32_t events,
}
/**
- * udp_listen_sock_handler() - Handle new data from socket
+ * udp_buf_listen_sock_handler() - Handle new data from socket
* @c: Execution context
* @ref: epoll reference
* @events: epoll events bitmap
@@ -562,8 +549,9 @@ static int udp_sock_recv(const struct ctx *c, int s, uint32_t events,
*
* #syscalls recvmmsg
*/
-void udp_listen_sock_handler(const struct ctx *c, union epoll_ref ref,
- uint32_t events, const struct timespec *now)
+static void udp_buf_listen_sock_handler(const struct ctx *c,
+ union epoll_ref ref, uint32_t events,
+ const struct timespec *now)
{
const socklen_t sasize = sizeof(udp_meta[0].s_in);
int n, i;
@@ -630,7 +618,21 @@ void udp_listen_sock_handler(const struct ctx *c, union epoll_ref ref,
}
/**
- * udp_reply_sock_handler() - Handle new data from flow specific socket
+ * udp_listen_sock_handler() - Handle new data from socket
+ * @c: Execution context
+ * @ref: epoll reference
+ * @events: epoll events bitmap
+ * @now: Current timestamp
+ */
+void udp_listen_sock_handler(const struct ctx *c,
+ union epoll_ref ref, uint32_t events,
+ const struct timespec *now)
+{
+ udp_buf_listen_sock_handler(c, ref, events, now);
+}
+
+/**
+ * udp_buf_reply_sock_handler() - Handle new data from flow specific socket
* @c: Execution context
* @ref: epoll reference
* @events: epoll events bitmap
@@ -638,8 +640,9 @@ void udp_listen_sock_handler(const struct ctx *c, union epoll_ref ref,
*
* #syscalls recvmmsg
*/
-void udp_reply_sock_handler(const struct ctx *c, union epoll_ref ref,
- uint32_t events, const struct timespec *now)
+static void udp_buf_reply_sock_handler(const struct ctx *c, union epoll_ref ref,
+ uint32_t events,
+ const struct timespec *now)
{
flow_sidx_t tosidx = flow_sidx_opposite(ref.flowside);
const struct flowside *toside = flowside_at_sidx(tosidx);
@@ -685,6 +688,19 @@ void udp_reply_sock_handler(const struct ctx *c, union epoll_ref ref,
}
}
+/**
+ * udp_reply_sock_handler() - Handle new data from flow specific socket
+ * @c: Execution context
+ * @ref: epoll reference
+ * @events: epoll events bitmap
+ * @now: Current timestamp
+ */
+void udp_reply_sock_handler(const struct ctx *c, union epoll_ref ref,
+ uint32_t events, const struct timespec *now)
+{
+ udp_buf_reply_sock_handler(c, ref, events, now);
+}
+
/**
* udp_tap_handler() - Handle packets from tap
* @c: Execution context
diff --git a/udp_internal.h b/udp_internal.h
new file mode 100644
index 000000000000..cc80e3055423
--- /dev/null
+++ b/udp_internal.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright (c) 2021 Red Hat GmbH
+ * Author: Stefano Brivio <sbrivio@redhat.com>
+ */
+
+#ifndef UDP_INTERNAL_H
+#define UDP_INTERNAL_H
+
+#include "tap.h" /* needed by udp_meta_t */
+
+#define UDP_MAX_FRAMES 32 /* max # of frames to receive at once */
+
+/**
+ * struct udp_payload_t - UDP header and data for inbound messages
+ * @uh: UDP header
+ * @data: UDP data
+ */
+struct udp_payload_t {
+ struct udphdr uh;
+ char data[USHRT_MAX - sizeof(struct udphdr)];
+#ifdef __AVX2__
+} __attribute__ ((packed, aligned(32)));
+#else
+} __attribute__ ((packed, aligned(__alignof__(unsigned int))));
+#endif
+
+size_t udp_update_hdr4(struct iphdr *ip4h, struct udp_payload_t *bp,
+ const struct flowside *toside, size_t dlen,
+ bool no_udp_csum);
+size_t udp_update_hdr6(struct ipv6hdr *ip6h, struct udp_payload_t *bp,
+ const struct flowside *toside, size_t dlen,
+ bool no_udp_csum);
+int udp_sock_errs(const struct ctx *c, int s, uint32_t events);
+#endif /* UDP_INTERNAL_H */
--
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright (c) 2021 Red Hat GmbH
+ * Author: Stefano Brivio <sbrivio@redhat.com>
+ */
+
+#ifndef UDP_INTERNAL_H
+#define UDP_INTERNAL_H
+
+#include "tap.h" /* needed by udp_meta_t */
+
+#define UDP_MAX_FRAMES 32 /* max # of frames to receive at once */
+
+/**
+ * struct udp_payload_t - UDP header and data for inbound messages
+ * @uh: UDP header
+ * @data: UDP data
+ */
+struct udp_payload_t {
+ struct udphdr uh;
+ char data[USHRT_MAX - sizeof(struct udphdr)];
+#ifdef __AVX2__
+} __attribute__ ((packed, aligned(32)));
+#else
+} __attribute__ ((packed, aligned(__alignof__(unsigned int))));
+#endif
+
+size_t udp_update_hdr4(struct iphdr *ip4h, struct udp_payload_t *bp,
+ const struct flowside *toside, size_t dlen,
+ bool no_udp_csum);
+size_t udp_update_hdr6(struct ipv6hdr *ip6h, struct udp_payload_t *bp,
+ const struct flowside *toside, size_t dlen,
+ bool no_udp_csum);
+int udp_sock_errs(const struct ctx *c, int s, uint32_t events);
+#endif /* UDP_INTERNAL_H */
--
2.47.0
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH v14 5/9] tcp: Export headers functions
2024-11-22 16:43 [PATCH v14 0/9] Add vhost-user support to passt. (part 3) Laurent Vivier
` (3 preceding siblings ...)
2024-11-22 16:43 ` [PATCH v14 4/9] udp: Prepare udp.c to be shared with vhost-user Laurent Vivier
@ 2024-11-22 16:43 ` Laurent Vivier
2024-11-22 16:43 ` [PATCH v14 6/9] passt: rename tap_sock_init() to tap_backend_init() Laurent Vivier
` (4 subsequent siblings)
9 siblings, 0 replies; 26+ messages in thread
From: Laurent Vivier @ 2024-11-22 16:43 UTC (permalink / raw)
To: passt-dev; +Cc: Laurent Vivier, David Gibson
Export tcp_fill_headers[4|6]() and tcp_update_check_tcp[4|6]().
They'll be needed by vhost-user.
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
---
tcp.c | 30 +++++++++++++++---------------
tcp_internal.h | 15 +++++++++++++++
2 files changed, 30 insertions(+), 15 deletions(-)
diff --git a/tcp.c b/tcp.c
index 6a98dfaed59b..5d9968847d20 100644
--- a/tcp.c
+++ b/tcp.c
@@ -758,9 +758,9 @@ static void tcp_sock_set_bufsize(const struct ctx *c, int s)
* @iov_cnt: Length of the array
* @l4offset: IPv4 payload offset in the iovec array
*/
-static void tcp_update_check_tcp4(const struct iphdr *iph,
- const struct iovec *iov, int iov_cnt,
- size_t l4offset)
+void tcp_update_check_tcp4(const struct iphdr *iph,
+ const struct iovec *iov, int iov_cnt,
+ size_t l4offset)
{
uint16_t l4len = ntohs(iph->tot_len) - sizeof(struct iphdr);
struct in_addr saddr = { .s_addr = iph->saddr };
@@ -810,9 +810,9 @@ static void tcp_update_check_tcp4(const struct iphdr *iph,
* @iov_cnt: Length of the array
* @l4offset: IPv6 payload offset in the iovec array
*/
-static void tcp_update_check_tcp6(const struct ipv6hdr *ip6h,
- const struct iovec *iov, int iov_cnt,
- size_t l4offset)
+void tcp_update_check_tcp6(const struct ipv6hdr *ip6h,
+ const struct iovec *iov, int iov_cnt,
+ size_t l4offset)
{
uint16_t l4len = ntohs(ip6h->payload_len);
size_t check_ofs;
@@ -978,11 +978,11 @@ static void tcp_fill_header(struct tcphdr *th,
*
* Return: The IPv4 payload length, host order
*/
-static size_t tcp_fill_headers4(const struct tcp_tap_conn *conn,
- struct tap_hdr *taph,
- struct iphdr *iph, struct tcp_payload_t *bp,
- size_t dlen, const uint16_t *check,
- uint32_t seq, bool no_tcp_csum)
+size_t tcp_fill_headers4(const struct tcp_tap_conn *conn,
+ struct tap_hdr *taph,
+ struct iphdr *iph, struct tcp_payload_t *bp,
+ size_t dlen, const uint16_t *check,
+ uint32_t seq, bool no_tcp_csum)
{
const struct flowside *tapside = TAPFLOW(conn);
const struct in_addr *src4 = inany_v4(&tapside->oaddr);
@@ -1030,10 +1030,10 @@ static size_t tcp_fill_headers4(const struct tcp_tap_conn *conn,
*
* Return: The IPv6 payload length, host order
*/
-static size_t tcp_fill_headers6(const struct tcp_tap_conn *conn,
- struct tap_hdr *taph,
- struct ipv6hdr *ip6h, struct tcp_payload_t *bp,
- size_t dlen, uint32_t seq, bool no_tcp_csum)
+size_t tcp_fill_headers6(const struct tcp_tap_conn *conn,
+ struct tap_hdr *taph,
+ struct ipv6hdr *ip6h, struct tcp_payload_t *bp,
+ size_t dlen, uint32_t seq, bool no_tcp_csum)
{
const struct flowside *tapside = TAPFLOW(conn);
size_t l4len = dlen + sizeof(bp->th);
diff --git a/tcp_internal.h b/tcp_internal.h
index c846f605834c..8625eed894d5 100644
--- a/tcp_internal.h
+++ b/tcp_internal.h
@@ -162,6 +162,21 @@ void tcp_rst_do(const struct ctx *c, struct tcp_tap_conn *conn);
struct tcp_info_linux;
+void tcp_update_check_tcp4(const struct iphdr *iph,
+ const struct iovec *iov, int iov_cnt,
+ size_t l4offset);
+void tcp_update_check_tcp6(const struct ipv6hdr *ip6h,
+ const struct iovec *iov, int iov_cnt,
+ size_t l4offset);
+size_t tcp_fill_headers4(const struct tcp_tap_conn *conn,
+ struct tap_hdr *taph,
+ struct iphdr *iph, struct tcp_payload_t *bp,
+ size_t dlen, const uint16_t *check,
+ uint32_t seq, bool no_tcp_csum);
+size_t tcp_fill_headers6(const struct tcp_tap_conn *conn,
+ struct tap_hdr *taph,
+ struct ipv6hdr *ip6h, struct tcp_payload_t *bp,
+ size_t dlen, uint32_t seq, bool no_tcp_csum);
size_t tcp_l2_buf_fill_headers(const struct tcp_tap_conn *conn,
struct iovec *iov, size_t dlen,
const uint16_t *check, uint32_t seq,
--
@@ -162,6 +162,21 @@ void tcp_rst_do(const struct ctx *c, struct tcp_tap_conn *conn);
struct tcp_info_linux;
+void tcp_update_check_tcp4(const struct iphdr *iph,
+ const struct iovec *iov, int iov_cnt,
+ size_t l4offset);
+void tcp_update_check_tcp6(const struct ipv6hdr *ip6h,
+ const struct iovec *iov, int iov_cnt,
+ size_t l4offset);
+size_t tcp_fill_headers4(const struct tcp_tap_conn *conn,
+ struct tap_hdr *taph,
+ struct iphdr *iph, struct tcp_payload_t *bp,
+ size_t dlen, const uint16_t *check,
+ uint32_t seq, bool no_tcp_csum);
+size_t tcp_fill_headers6(const struct tcp_tap_conn *conn,
+ struct tap_hdr *taph,
+ struct ipv6hdr *ip6h, struct tcp_payload_t *bp,
+ size_t dlen, uint32_t seq, bool no_tcp_csum);
size_t tcp_l2_buf_fill_headers(const struct tcp_tap_conn *conn,
struct iovec *iov, size_t dlen,
const uint16_t *check, uint32_t seq,
--
2.47.0
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH v14 6/9] passt: rename tap_sock_init() to tap_backend_init()
2024-11-22 16:43 [PATCH v14 0/9] Add vhost-user support to passt. (part 3) Laurent Vivier
` (4 preceding siblings ...)
2024-11-22 16:43 ` [PATCH v14 5/9] tcp: Export headers functions Laurent Vivier
@ 2024-11-22 16:43 ` Laurent Vivier
2024-11-22 16:43 ` [PATCH v14 7/9] vhost-user: add vhost-user Laurent Vivier
` (3 subsequent siblings)
9 siblings, 0 replies; 26+ messages in thread
From: Laurent Vivier @ 2024-11-22 16:43 UTC (permalink / raw)
To: passt-dev; +Cc: Laurent Vivier, David Gibson
Extract pool storage initialization loop to tap_sock_update_pool(),
extract QEMU hints to tap_backend_show_hints().
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
---
passt.c | 2 +-
tap.c | 56 +++++++++++++++++++++++++++++++++++++++++---------------
tap.h | 2 +-
3 files changed, 43 insertions(+), 17 deletions(-)
diff --git a/passt.c b/passt.c
index a51a4e112c98..25f5c1a1a2fd 100644
--- a/passt.c
+++ b/passt.c
@@ -244,7 +244,7 @@ int main(int argc, char **argv)
pasta_netns_quit_init(&c);
- tap_sock_init(&c);
+ tap_backend_init(&c);
random_init(&c);
diff --git a/tap.c b/tap.c
index 14d9b3d37a3e..238f248ca45b 100644
--- a/tap.c
+++ b/tap.c
@@ -1190,11 +1190,31 @@ int tap_sock_unix_open(char *sock_path)
return fd;
}
+/**
+ * tap_backend_show_hints() - Give help information to start QEMU
+ * @c: Execution context
+ */
+static void tap_backend_show_hints(struct ctx *c)
+{
+ switch (c->mode) {
+ case MODE_PASTA:
+ /* No hints */
+ break;
+ case MODE_PASST:
+ info("\nYou can now start qemu (>= 7.2, with commit 13c6be96618c):");
+ info(" kvm ... -device virtio-net-pci,netdev=s -netdev stream,id=s,server=off,addr.type=unix,addr.path=%s",
+ c->sock_path);
+ info("or qrap, for earlier qemu versions:");
+ info(" ./qrap 5 kvm ... -net socket,fd=5 -net nic,model=virtio");
+ break;
+ }
+}
+
/**
* tap_sock_unix_init() - Start listening for connections on AF_UNIX socket
* @c: Execution context
*/
-static void tap_sock_unix_init(struct ctx *c)
+static void tap_sock_unix_init(const struct ctx *c)
{
union epoll_ref ref = { .type = EPOLL_TYPE_TAP_LISTEN };
struct epoll_event ev = { 0 };
@@ -1205,12 +1225,6 @@ static void tap_sock_unix_init(struct ctx *c)
ev.events = EPOLLIN | EPOLLET;
ev.data.u64 = ref.u64;
epoll_ctl(c->epollfd, EPOLL_CTL_ADD, c->fd_tap_listen, &ev);
-
- info("\nYou can now start qemu (>= 7.2, with commit 13c6be96618c):");
- info(" kvm ... -device virtio-net-pci,netdev=s -netdev stream,id=s,server=off,addr.type=unix,addr.path=%s",
- c->sock_path);
- info("or qrap, for earlier qemu versions:");
- info(" ./qrap 5 kvm ... -net socket,fd=5 -net nic,model=virtio");
}
/**
@@ -1323,21 +1337,31 @@ static void tap_sock_tun_init(struct ctx *c)
}
/**
- * tap_sock_init() - Create and set up AF_UNIX socket or tuntap file descriptor
- * @c: Execution context
+ * tap_sock_update_pool() - Set the buffer base and size for the pool of packets
+ * @base: Buffer base
+ * @size Buffer size
*/
-void tap_sock_init(struct ctx *c)
+static void tap_sock_update_pool(void *base, size_t size)
{
- size_t sz = sizeof(pkt_buf);
int i;
- pool_tap4_storage = PACKET_INIT(pool_tap4, TAP_MSGS, pkt_buf, sz);
- pool_tap6_storage = PACKET_INIT(pool_tap6, TAP_MSGS, pkt_buf, sz);
+ pool_tap4_storage = PACKET_INIT(pool_tap4, TAP_MSGS, base, size);
+ pool_tap6_storage = PACKET_INIT(pool_tap6, TAP_MSGS, base, size);
for (i = 0; i < TAP_SEQS; i++) {
- tap4_l4[i].p = PACKET_INIT(pool_l4, UIO_MAXIOV, pkt_buf, sz);
- tap6_l4[i].p = PACKET_INIT(pool_l4, UIO_MAXIOV, pkt_buf, sz);
+ tap4_l4[i].p = PACKET_INIT(pool_l4, UIO_MAXIOV, base, size);
+ tap6_l4[i].p = PACKET_INIT(pool_l4, UIO_MAXIOV, base, size);
}
+}
+
+/**
+ * tap_backend_init() - Create and set up AF_UNIX socket or
+ * tuntap file descriptor
+ * @c: Execution context
+ */
+void tap_backend_init(struct ctx *c)
+{
+ tap_sock_update_pool(pkt_buf, sizeof(pkt_buf));
if (c->fd_tap != -1) { /* Passed as --fd */
struct epoll_event ev = { 0 };
@@ -1367,4 +1391,6 @@ void tap_sock_init(struct ctx *c)
*/
memset(&c->guest_mac, 0xff, sizeof(c->guest_mac));
}
+
+ tap_backend_show_hints(c);
}
diff --git a/tap.h b/tap.h
index 85f1e8473711..8728cc5c09c3 100644
--- a/tap.h
+++ b/tap.h
@@ -68,7 +68,7 @@ void tap_handler_pasta(struct ctx *c, uint32_t events,
void tap_handler_passt(struct ctx *c, uint32_t events,
const struct timespec *now);
int tap_sock_unix_open(char *sock_path);
-void tap_sock_init(struct ctx *c);
+void tap_backend_init(struct ctx *c);
void tap_flush_pools(void);
void tap_handler(struct ctx *c, const struct timespec *now);
void tap_add_packet(struct ctx *c, ssize_t l2len, char *p);
--
@@ -68,7 +68,7 @@ void tap_handler_pasta(struct ctx *c, uint32_t events,
void tap_handler_passt(struct ctx *c, uint32_t events,
const struct timespec *now);
int tap_sock_unix_open(char *sock_path);
-void tap_sock_init(struct ctx *c);
+void tap_backend_init(struct ctx *c);
void tap_flush_pools(void);
void tap_handler(struct ctx *c, const struct timespec *now);
void tap_add_packet(struct ctx *c, ssize_t l2len, char *p);
--
2.47.0
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH v14 7/9] vhost-user: add vhost-user
2024-11-22 16:43 [PATCH v14 0/9] Add vhost-user support to passt. (part 3) Laurent Vivier
` (5 preceding siblings ...)
2024-11-22 16:43 ` [PATCH v14 6/9] passt: rename tap_sock_init() to tap_backend_init() Laurent Vivier
@ 2024-11-22 16:43 ` Laurent Vivier
2024-11-26 5:14 ` Stefano Brivio
` (2 more replies)
2024-11-22 16:43 ` [PATCH v14 8/9] test: Add tests for passt in vhost-user mode Laurent Vivier
` (2 subsequent siblings)
9 siblings, 3 replies; 26+ messages in thread
From: Laurent Vivier @ 2024-11-22 16:43 UTC (permalink / raw)
To: passt-dev; +Cc: Laurent Vivier
add virtio and vhost-user functions to connect with QEMU.
$ ./passt --vhost-user
and
# qemu-system-x86_64 ... -m 4G \
-object memory-backend-memfd,id=memfd0,share=on,size=4G \
-numa node,memdev=memfd0 \
-chardev socket,id=chr0,path=/tmp/passt_1.socket \
-netdev vhost-user,id=netdev0,chardev=chr0 \
-device virtio-net,mac=9a:2b:2c:2d:2e:2f,netdev=netdev0 \
...
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
---
Makefile | 6 +-
conf.c | 19 +-
epoll_type.h | 4 +
iov.c | 1 -
isolation.c | 17 +-
packet.c | 11 ++
packet.h | 8 +-
passt.1 | 10 +-
passt.c | 9 +
passt.h | 7 +
pcap.c | 1 -
tap.c | 77 ++++++--
tap.h | 5 +-
tcp.c | 7 +
tcp_vu.c | 497 +++++++++++++++++++++++++++++++++++++++++++++++++++
tcp_vu.h | 12 ++
udp.c | 11 ++
udp_vu.c | 343 +++++++++++++++++++++++++++++++++++
udp_vu.h | 13 ++
vhost_user.c | 41 +++--
vhost_user.h | 4 +-
virtio.c | 5 -
vu_common.c | 282 +++++++++++++++++++++++++++++
vu_common.h | 60 +++++++
24 files changed, 1397 insertions(+), 53 deletions(-)
create mode 100644 tcp_vu.c
create mode 100644 tcp_vu.h
create mode 100644 udp_vu.c
create mode 100644 udp_vu.h
create mode 100644 vu_common.c
create mode 100644 vu_common.h
diff --git a/Makefile b/Makefile
index bcb084e66e4d..faa5c23346ac 100644
--- a/Makefile
+++ b/Makefile
@@ -37,7 +37,8 @@ FLAGS += -DDUAL_STACK_SOCKETS=$(DUAL_STACK_SOCKETS)
PASST_SRCS = arch.c arp.c checksum.c conf.c dhcp.c dhcpv6.c flow.c fwd.c \
icmp.c igmp.c inany.c iov.c ip.c isolation.c lineread.c log.c mld.c \
ndp.c netlink.c packet.c passt.c pasta.c pcap.c pif.c tap.c tcp.c \
- tcp_buf.c tcp_splice.c udp.c udp_flow.c util.c vhost_user.c virtio.c
+ tcp_buf.c tcp_splice.c tcp_vu.c udp.c udp_flow.c udp_vu.c util.c \
+ vhost_user.c virtio.c vu_common.c
QRAP_SRCS = qrap.c
SRCS = $(PASST_SRCS) $(QRAP_SRCS)
@@ -47,7 +48,8 @@ PASST_HEADERS = arch.h arp.h checksum.h conf.h dhcp.h dhcpv6.h flow.h fwd.h \
flow_table.h icmp.h icmp_flow.h inany.h iov.h ip.h isolation.h \
lineread.h log.h ndp.h netlink.h packet.h passt.h pasta.h pcap.h pif.h \
siphash.h tap.h tcp.h tcp_buf.h tcp_conn.h tcp_internal.h tcp_splice.h \
- udp.h udp_flow.h util.h vhost_user.h virtio.h
+ tcp_vu.h udp.h udp_flow.h udp_internal.h udp_vu.h util.h vhost_user.h \
+ virtio.h vu_common.h
HEADERS = $(PASST_HEADERS) seccomp.h
C := \#include <sys/random.h>\nint main(){int a=getrandom(0, 0, 0);}
diff --git a/conf.c b/conf.c
index 86566dbf1ee0..d9d63d70ae5a 100644
--- a/conf.c
+++ b/conf.c
@@ -45,6 +45,7 @@
#include "lineread.h"
#include "isolation.h"
#include "log.h"
+#include "vhost_user.h"
#define NETNS_RUN_DIR "/run/netns"
@@ -769,9 +770,14 @@ static void usage(const char *name, FILE *f, int status)
" default: same interface name as external one\n");
} else {
FPRINTF(f,
- " -s, --socket PATH UNIX domain socket path\n"
+ " -s, --socket, --socket-path PATH UNIX domain socket path\n"
" default: probe free path starting from "
UNIX_SOCK_PATH "\n", 1);
+ FPRINTF(f,
+ " --vhost-user Enable vhost-user mode\n"
+ " UNIX domain socket is provided by -s option\n"
+ " --print-capabilities print back-end capabilities in JSON format,\n"
+ " only meaningful for vhost-user mode\n");
}
FPRINTF(f,
@@ -1305,6 +1311,10 @@ void conf(struct ctx *c, int argc, char **argv)
{"map-guest-addr", required_argument, NULL, 22 },
{"host-lo-to-ns-lo", no_argument, NULL, 23 },
{"dns-host", required_argument, NULL, 24 },
+ {"vhost-user", no_argument, NULL, 25 },
+ /* vhost-user backend program convention */
+ {"print-capabilities", no_argument, NULL, 26 },
+ {"socket-path", required_argument, NULL, 's' },
{ 0 },
};
const char *logname = (c->mode == MODE_PASTA) ? "pasta" : "passt";
@@ -1498,6 +1508,13 @@ void conf(struct ctx *c, int argc, char **argv)
break;
die("Invalid host nameserver address: %s", optarg);
+ case 25:
+ if (c->mode == MODE_PASTA)
+ die("--vhost-user is for passt mode only");
+ c->mode = MODE_VU;
+ break;
+ case 26:
+ vu_print_capabilities();
break;
case 'd':
c->debug = 1;
diff --git a/epoll_type.h b/epoll_type.h
index 0ad1efa0ccec..f3ef41584757 100644
--- a/epoll_type.h
+++ b/epoll_type.h
@@ -36,6 +36,10 @@ enum epoll_type {
EPOLL_TYPE_TAP_PASST,
/* socket listening for qemu socket connections */
EPOLL_TYPE_TAP_LISTEN,
+ /* vhost-user command socket */
+ EPOLL_TYPE_VHOST_CMD,
+ /* vhost-user kick event socket */
+ EPOLL_TYPE_VHOST_KICK,
EPOLL_NUM_TYPES,
};
diff --git a/iov.c b/iov.c
index 3f9e229a305f..3741db21790f 100644
--- a/iov.c
+++ b/iov.c
@@ -68,7 +68,6 @@ size_t iov_skip_bytes(const struct iovec *iov, size_t n,
*
* Returns: The number of bytes successfully copied.
*/
-/* cppcheck-suppress unusedFunction */
size_t iov_from_buf(const struct iovec *iov, size_t iov_cnt,
size_t offset, const void *buf, size_t bytes)
{
diff --git a/isolation.c b/isolation.c
index 45fba1e68b9d..c944fb35c3a4 100644
--- a/isolation.c
+++ b/isolation.c
@@ -379,12 +379,21 @@ void isolate_postfork(const struct ctx *c)
prctl(PR_SET_DUMPABLE, 0);
- if (c->mode == MODE_PASTA) {
- prog.len = (unsigned short)ARRAY_SIZE(filter_pasta);
- prog.filter = filter_pasta;
- } else {
+ switch (c->mode) {
+ case MODE_PASST:
prog.len = (unsigned short)ARRAY_SIZE(filter_passt);
prog.filter = filter_passt;
+ break;
+ case MODE_PASTA:
+ prog.len = (unsigned short)ARRAY_SIZE(filter_pasta);
+ prog.filter = filter_pasta;
+ break;
+ case MODE_VU:
+ prog.len = (unsigned short)ARRAY_SIZE(filter_vu);
+ prog.filter = filter_vu;
+ break;
+ default:
+ ASSERT(0);
}
if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) ||
diff --git a/packet.c b/packet.c
index 37489961a37e..e5a78d079231 100644
--- a/packet.c
+++ b/packet.c
@@ -36,6 +36,17 @@
static int packet_check_range(const struct pool *p, size_t offset, size_t len,
const char *start, const char *func, int line)
{
+ if (p->buf_size == 0) {
+ int ret;
+
+ ret = vu_packet_check_range((void *)p->buf, offset, len, start);
+
+ if (ret == -1)
+ trace("cannot find region, %s:%i", func, line);
+
+ return ret;
+ }
+
if (start < p->buf) {
trace("packet start %p before buffer start %p, "
"%s:%i", (void *)start, (void *)p->buf, func, line);
diff --git a/packet.h b/packet.h
index 8377dcf678bb..3f70e949c066 100644
--- a/packet.h
+++ b/packet.h
@@ -8,8 +8,10 @@
/**
* struct pool - Generic pool of packets stored in a buffer
- * @buf: Buffer storing packet descriptors
- * @buf_size: Total size of buffer
+ * @buf: Buffer storing packet descriptors,
+ * a struct vu_dev_region array for passt vhost-user mode
+ * @buf_size: Total size of buffer,
+ * 0 for passt vhost-user mode
* @size: Number of usable descriptors for the pool
* @count: Number of used descriptors for the pool
* @pkt: Descriptors: see macros below
@@ -22,6 +24,8 @@ struct pool {
struct iovec pkt[1];
};
+int vu_packet_check_range(void *buf, size_t offset, size_t len,
+ const char *start);
void packet_add_do(struct pool *p, size_t len, const char *start,
const char *func, int line);
void *packet_get_do(const struct pool *p, const size_t idx,
diff --git a/passt.1 b/passt.1
index f0849787217e..a100e0f1d727 100644
--- a/passt.1
+++ b/passt.1
@@ -397,12 +397,20 @@ interface address are configured on a given host interface.
.SS \fBpasst\fR-only options
.TP
-.BR \-s ", " \-\-socket " " \fIpath
+.BR \-s ", " \-\-socket-path ", " \-\-socket " " \fIpath
Path for UNIX domain socket used by \fBqemu\fR(1) or \fBqrap\fR(1) to connect to
\fBpasst\fR.
Default is to probe a free socket, not accepting connections, starting from
\fI/tmp/passt_1.socket\fR to \fI/tmp/passt_64.socket\fR.
+.TP
+.BR \-\-vhost-user
+Enable vhost-user. The vhost-user command socket is provided by \fB--socket\fR.
+
+.TP
+.BR \-\-print-capabilities
+Print back-end capabilities in JSON format, only meaningful for vhost-user mode.
+
.TP
.BR \-F ", " \-\-fd " " \fIFD
Pass a pre-opened, connected socket to \fBpasst\fR. Usually the socket is opened
diff --git a/passt.c b/passt.c
index 25f5c1a1a2fd..eb96a449b29e 100644
--- a/passt.c
+++ b/passt.c
@@ -50,6 +50,7 @@
#include "log.h"
#include "tcp_splice.h"
#include "ndp.h"
+#include "vu_common.h"
#define EPOLL_EVENTS 8
@@ -72,6 +73,8 @@ char *epoll_type_str[] = {
[EPOLL_TYPE_TAP_PASTA] = "/dev/net/tun device",
[EPOLL_TYPE_TAP_PASST] = "connected qemu socket",
[EPOLL_TYPE_TAP_LISTEN] = "listening qemu socket",
+ [EPOLL_TYPE_VHOST_CMD] = "vhost-user command socket",
+ [EPOLL_TYPE_VHOST_KICK] = "vhost-user kick socket",
};
static_assert(ARRAY_SIZE(epoll_type_str) == EPOLL_NUM_TYPES,
"epoll_type_str[] doesn't match enum epoll_type");
@@ -346,6 +349,12 @@ loop:
case EPOLL_TYPE_PING:
icmp_sock_handler(&c, ref);
break;
+ case EPOLL_TYPE_VHOST_CMD:
+ vu_control_handler(c.vdev, c.fd_tap, eventmask);
+ break;
+ case EPOLL_TYPE_VHOST_KICK:
+ vu_kick_cb(c.vdev, ref, &now);
+ break;
default:
/* Can't happen */
ASSERT(0);
diff --git a/passt.h b/passt.h
index 72c7f723a7bb..076f7db43345 100644
--- a/passt.h
+++ b/passt.h
@@ -25,6 +25,7 @@ union epoll_ref;
#include "fwd.h"
#include "tcp.h"
#include "udp.h"
+#include "vhost_user.h"
/* Default address for our end on the tap interface. Bit 0 of byte 0 must be 0
* (unicast) and bit 1 of byte 1 must be 1 (locally administered). Otherwise
@@ -43,6 +44,7 @@ union epoll_ref;
* @icmp: ICMP-specific reference part
* @data: Data handled by protocol handlers
* @nsdir_fd: netns dirfd for fallback timer checking if namespace is gone
+ * @queue: vhost-user queue index for this fd
* @u64: Opaque reference for epoll_ctl() and epoll_wait()
*/
union epoll_ref {
@@ -58,6 +60,7 @@ union epoll_ref {
union udp_listen_epoll_ref udp;
uint32_t data;
int nsdir_fd;
+ int queue;
};
};
uint64_t u64;
@@ -94,6 +97,7 @@ struct fqdn {
enum passt_modes {
MODE_PASST,
MODE_PASTA,
+ MODE_VU,
};
/**
@@ -229,6 +233,7 @@ struct ip6_ctx {
* @freebind: Allow binding of non-local addresses for forwarding
* @low_wmem: Low probed net.core.wmem_max
* @low_rmem: Low probed net.core.rmem_max
+ * @vdev: vhost-user device
*/
struct ctx {
enum passt_modes mode;
@@ -291,6 +296,8 @@ struct ctx {
int low_wmem;
int low_rmem;
+
+ struct vu_dev *vdev;
};
void proto_update_l2_buf(const unsigned char *eth_d,
diff --git a/pcap.c b/pcap.c
index 23205ddfed84..3d623cfead77 100644
--- a/pcap.c
+++ b/pcap.c
@@ -143,7 +143,6 @@ void pcap_multiple(const struct iovec *iov, size_t frame_parts, unsigned int n,
* @iovcnt: Number of buffers (@iov entries)
* @offset: Offset of the L2 frame within the full data length
*/
-/* cppcheck-suppress unusedFunction */
void pcap_iov(const struct iovec *iov, size_t iovcnt, size_t offset)
{
struct timespec now = { 0 };
diff --git a/tap.c b/tap.c
index 238f248ca45b..386f0bccd2fb 100644
--- a/tap.c
+++ b/tap.c
@@ -58,6 +58,8 @@
#include "packet.h"
#include "tap.h"
#include "log.h"
+#include "vhost_user.h"
+#include "vu_common.h"
/* IPv4 (plus ARP) and IPv6 message batches from tap/guest to IP handlers */
static PACKET_POOL_NOINIT(pool_tap4, TAP_MSGS, pkt_buf);
@@ -78,16 +80,22 @@ void tap_send_single(const struct ctx *c, const void *data, size_t l2len)
struct iovec iov[2];
size_t iovcnt = 0;
- if (c->mode == MODE_PASST) {
+ switch (c->mode) {
+ case MODE_PASST:
iov[iovcnt] = IOV_OF_LVALUE(vnet_len);
iovcnt++;
- }
-
- iov[iovcnt].iov_base = (void *)data;
- iov[iovcnt].iov_len = l2len;
- iovcnt++;
+ /* fall through */
+ case MODE_PASTA:
+ iov[iovcnt].iov_base = (void *)data;
+ iov[iovcnt].iov_len = l2len;
+ iovcnt++;
- tap_send_frames(c, iov, iovcnt, 1);
+ tap_send_frames(c, iov, iovcnt, 1);
+ break;
+ case MODE_VU:
+ vu_send_single(c, data, l2len);
+ break;
+ }
}
/**
@@ -414,10 +422,18 @@ size_t tap_send_frames(const struct ctx *c, const struct iovec *iov,
if (!nframes)
return 0;
- if (c->mode == MODE_PASTA)
+ switch (c->mode) {
+ case MODE_PASTA:
m = tap_send_frames_pasta(c, iov, bufs_per_frame, nframes);
- else
+ break;
+ case MODE_PASST:
m = tap_send_frames_passt(c, iov, bufs_per_frame, nframes);
+ break;
+ case MODE_VU:
+ /* fall through */
+ default:
+ ASSERT(0);
+ }
if (m < nframes)
debug("tap: failed to send %zu frames of %zu",
@@ -976,7 +992,7 @@ void tap_add_packet(struct ctx *c, ssize_t l2len, char *p)
* tap_sock_reset() - Handle closing or failure of connect AF_UNIX socket
* @c: Execution context
*/
-static void tap_sock_reset(struct ctx *c)
+void tap_sock_reset(struct ctx *c)
{
info("Client connection closed%s", c->one_off ? ", exiting" : "");
@@ -987,6 +1003,8 @@ static void tap_sock_reset(struct ctx *c)
epoll_ctl(c->epollfd, EPOLL_CTL_DEL, c->fd_tap, NULL);
close(c->fd_tap);
c->fd_tap = -1;
+ if (c->mode == MODE_VU)
+ vu_cleanup(c->vdev);
}
/**
@@ -1207,6 +1225,11 @@ static void tap_backend_show_hints(struct ctx *c)
info("or qrap, for earlier qemu versions:");
info(" ./qrap 5 kvm ... -net socket,fd=5 -net nic,model=virtio");
break;
+ case MODE_VU:
+ info("You can start qemu with:");
+ info(" kvm ... -chardev socket,id=chr0,path=%s -netdev vhost-user,id=netdev0,chardev=chr0 -device virtio-net,netdev=netdev0 -object memory-backend-memfd,id=memfd0,share=on,size=$RAMSIZE -numa node,memdev=memfd0\n",
+ c->sock_path);
+ break;
}
}
@@ -1234,8 +1257,8 @@ static void tap_sock_unix_init(const struct ctx *c)
*/
void tap_listen_handler(struct ctx *c, uint32_t events)
{
- union epoll_ref ref = { .type = EPOLL_TYPE_TAP_PASST };
struct epoll_event ev = { 0 };
+ union epoll_ref ref = { 0 };
int v = INT_MAX / 2;
struct ucred ucred;
socklen_t len;
@@ -1275,6 +1298,10 @@ void tap_listen_handler(struct ctx *c, uint32_t events)
trace("tap: failed to set SO_SNDBUF to %i", v);
ref.fd = c->fd_tap;
+ if (c->mode == MODE_VU)
+ ref.type = EPOLL_TYPE_VHOST_CMD;
+ else
+ ref.type = EPOLL_TYPE_TAP_PASST;
ev.events = EPOLLIN | EPOLLRDHUP;
ev.data.u64 = ref.u64;
epoll_ctl(c->epollfd, EPOLL_CTL_ADD, c->fd_tap, &ev);
@@ -1341,7 +1368,7 @@ static void tap_sock_tun_init(struct ctx *c)
* @base: Buffer base
* @size Buffer size
*/
-static void tap_sock_update_pool(void *base, size_t size)
+void tap_sock_update_pool(void *base, size_t size)
{
int i;
@@ -1361,7 +1388,10 @@ static void tap_sock_update_pool(void *base, size_t size)
*/
void tap_backend_init(struct ctx *c)
{
- tap_sock_update_pool(pkt_buf, sizeof(pkt_buf));
+ if (c->mode == MODE_VU)
+ tap_sock_update_pool(NULL, 0);
+ else
+ tap_sock_update_pool(pkt_buf, sizeof(pkt_buf));
if (c->fd_tap != -1) { /* Passed as --fd */
struct epoll_event ev = { 0 };
@@ -1369,10 +1399,17 @@ void tap_backend_init(struct ctx *c)
ASSERT(c->one_off);
ref.fd = c->fd_tap;
- if (c->mode == MODE_PASST)
+ switch (c->mode) {
+ case MODE_PASST:
ref.type = EPOLL_TYPE_TAP_PASST;
- else
+ break;
+ case MODE_PASTA:
ref.type = EPOLL_TYPE_TAP_PASTA;
+ break;
+ case MODE_VU:
+ ref.type = EPOLL_TYPE_VHOST_CMD;
+ break;
+ }
ev.events = EPOLLIN | EPOLLRDHUP;
ev.data.u64 = ref.u64;
@@ -1380,9 +1417,14 @@ void tap_backend_init(struct ctx *c)
return;
}
- if (c->mode == MODE_PASTA) {
+ switch (c->mode) {
+ case MODE_PASTA:
tap_sock_tun_init(c);
- } else {
+ break;
+ case MODE_VU:
+ vu_init(c);
+ /* fall through */
+ case MODE_PASST:
tap_sock_unix_init(c);
/* In passt mode, we don't know the guest's MAC address until it
@@ -1390,6 +1432,7 @@ void tap_backend_init(struct ctx *c)
* first packets will reach it.
*/
memset(&c->guest_mac, 0xff, sizeof(c->guest_mac));
+ break;
}
tap_backend_show_hints(c);
diff --git a/tap.h b/tap.h
index 8728cc5c09c3..dfbd8b9ebd72 100644
--- a/tap.h
+++ b/tap.h
@@ -40,7 +40,8 @@ static inline struct iovec tap_hdr_iov(const struct ctx *c,
*/
static inline void tap_hdr_update(struct tap_hdr *thdr, size_t l2len)
{
- thdr->vnet_len = htonl(l2len);
+ if (thdr)
+ thdr->vnet_len = htonl(l2len);
}
void tap_udp4_send(const struct ctx *c, struct in_addr src, in_port_t sport,
@@ -68,6 +69,8 @@ void tap_handler_pasta(struct ctx *c, uint32_t events,
void tap_handler_passt(struct ctx *c, uint32_t events,
const struct timespec *now);
int tap_sock_unix_open(char *sock_path);
+void tap_sock_reset(struct ctx *c);
+void tap_sock_update_pool(void *base, size_t size);
void tap_backend_init(struct ctx *c);
void tap_flush_pools(void);
void tap_handler(struct ctx *c, const struct timespec *now);
diff --git a/tcp.c b/tcp.c
index 5d9968847d20..2b547876d58a 100644
--- a/tcp.c
+++ b/tcp.c
@@ -304,6 +304,7 @@
#include "flow_table.h"
#include "tcp_internal.h"
#include "tcp_buf.h"
+#include "tcp_vu.h"
/* MSS rounding: see SET_MSS() */
#define MSS_DEFAULT 536
@@ -1312,6 +1313,9 @@ int tcp_prepare_flags(const struct ctx *c, struct tcp_tap_conn *conn,
static int tcp_send_flag(const struct ctx *c, struct tcp_tap_conn *conn,
int flags)
{
+ if (c->mode == MODE_VU)
+ return tcp_vu_send_flag(c, conn, flags);
+
return tcp_buf_send_flag(c, conn, flags);
}
@@ -1705,6 +1709,9 @@ static int tcp_sock_consume(const struct tcp_tap_conn *conn, uint32_t ack_seq)
*/
static int tcp_data_from_sock(const struct ctx *c, struct tcp_tap_conn *conn)
{
+ if (c->mode == MODE_VU)
+ return tcp_vu_data_from_sock(c, conn);
+
return tcp_buf_data_from_sock(c, conn);
}
diff --git a/tcp_vu.c b/tcp_vu.c
new file mode 100644
index 000000000000..be5027a1e921
--- /dev/null
+++ b/tcp_vu.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* tcp_vu.c - TCP L2 vhost-user management functions
+ *
+ * Copyright Red Hat
+ * Author: Laurent Vivier <lvivier@redhat.com>
+ */
+
+#include <errno.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <netinet/ip.h>
+#include <netinet/tcp.h>
+
+#include <sys/socket.h>
+
+#include <linux/virtio_net.h>
+
+#include "util.h"
+#include "ip.h"
+#include "passt.h"
+#include "siphash.h"
+#include "inany.h"
+#include "vhost_user.h"
+#include "tcp.h"
+#include "pcap.h"
+#include "flow.h"
+#include "tcp_conn.h"
+#include "flow_table.h"
+#include "tcp_vu.h"
+#include "tap.h"
+#include "tcp_internal.h"
+#include "checksum.h"
+#include "vu_common.h"
+#include <time.h>
+
+static struct iovec iov_vu[VIRTQUEUE_MAX_SIZE + 1];
+static struct vu_virtq_element elem[VIRTQUEUE_MAX_SIZE];
+static int head[VIRTQUEUE_MAX_SIZE + 1];
+static int head_cnt;
+
+/**
+ * tcp_vu_hdrlen() - return the size of the header in level 2 frame (TCP)
+ * @v6: Set for IPv6 packet
+ *
+ * Return: Return the size of the header
+ */
+static size_t tcp_vu_hdrlen(bool v6)
+{
+ size_t hdrlen;
+
+ hdrlen = sizeof(struct virtio_net_hdr_mrg_rxbuf) +
+ sizeof(struct ethhdr) + sizeof(struct tcphdr);
+
+ if (v6)
+ hdrlen += sizeof(struct ipv6hdr);
+ else
+ hdrlen += sizeof(struct iphdr);
+
+ return hdrlen;
+}
+
+/**
+ * tcp_vu_update_check() - Calculate TCP checksum
+ * @tapside: Address information for one side of the flow
+ * @iov: Pointer to the array of IO vectors
+ * @iov_cnt: Length of the array
+ */
+static void tcp_vu_update_check(const struct flowside *tapside,
+ struct iovec *iov, int iov_cnt)
+{
+ char *base = iov[0].iov_base;
+
+ if (inany_v4(&tapside->oaddr)) {
+ const struct iphdr *iph = vu_ip(base);
+
+ tcp_update_check_tcp4(iph, iov, iov_cnt,
+ (char *)vu_payloadv4(base) - base);
+ } else {
+ const struct ipv6hdr *ip6h = vu_ip(base);
+
+ tcp_update_check_tcp6(ip6h, iov, iov_cnt,
+ (char *)vu_payloadv6(base) - base);
+ }
+}
+
+/**
+ * tcp_vu_send_flag() - Send segment with flags to vhost-user (no payload)
+ * @c: Execution context
+ * @conn: Connection pointer
+ * @flags: TCP flags: if not set, send segment only if ACK is due
+ *
+ * Return: negative error code on connection reset, 0 otherwise
+ */
+int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
+{
+ struct vu_dev *vdev = c->vdev;
+ struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
+ const struct flowside *tapside = TAPFLOW(conn);
+ size_t l2len, l4len, optlen, hdrlen;
+ struct vu_virtq_element flags_elem[2];
+ struct tcp_payload_t *payload;
+ struct ipv6hdr *ip6h = NULL;
+ struct iovec flags_iov[2];
+ struct iphdr *iph = NULL;
+ struct ethhdr *eh;
+ uint32_t seq;
+ int elem_cnt;
+ int nb_ack;
+ int ret;
+
+ hdrlen = tcp_vu_hdrlen(CONN_V6(conn));
+
+ vu_set_element(&flags_elem[0], NULL, &flags_iov[0]);
+
+ elem_cnt = vu_collect(vdev, vq, &flags_elem[0], 1,
+ hdrlen + sizeof(struct tcp_syn_opts), NULL);
+ if (elem_cnt != 1)
+ return -1;
+
+ ASSERT(flags_elem[0].in_sg[0].iov_len >=
+ hdrlen + sizeof(struct tcp_syn_opts));
+
+ vu_set_vnethdr(vdev, flags_elem[0].in_sg[0].iov_base, 1);
+
+ eh = vu_eth(flags_elem[0].in_sg[0].iov_base);
+
+ memcpy(eh->h_dest, c->guest_mac, sizeof(eh->h_dest));
+ memcpy(eh->h_source, c->our_tap_mac, sizeof(eh->h_source));
+
+ if (CONN_V4(conn)) {
+ eh->h_proto = htons(ETH_P_IP);
+
+ iph = vu_ip(flags_elem[0].in_sg[0].iov_base);
+ *iph = (struct iphdr)L2_BUF_IP4_INIT(IPPROTO_TCP);
+
+ payload = vu_payloadv4(flags_elem[0].in_sg[0].iov_base);
+ } else {
+ eh->h_proto = htons(ETH_P_IPV6);
+
+ ip6h = vu_ip(flags_elem[0].in_sg[0].iov_base);
+ *ip6h = (struct ipv6hdr)L2_BUF_IP6_INIT(IPPROTO_TCP);
+ payload = vu_payloadv6(flags_elem[0].in_sg[0].iov_base);
+ }
+
+ memset(&payload->th, 0, sizeof(payload->th));
+ payload->th.doff = offsetof(struct tcp_payload_t, data) / 4;
+ payload->th.ack = 1;
+
+ seq = conn->seq_to_tap;
+ ret = tcp_prepare_flags(c, conn, flags, &payload->th,
+ (struct tcp_syn_opts *)payload->data,
+ &optlen);
+ if (ret <= 0) {
+ vu_queue_rewind(vq, 1);
+ return ret;
+ }
+
+ if (CONN_V4(conn)) {
+ l4len = tcp_fill_headers4(conn, NULL, iph, payload, optlen,
+ NULL, seq, true);
+ l2len = sizeof(*iph);
+ } else {
+ l4len = tcp_fill_headers6(conn, NULL, ip6h, payload, optlen,
+ seq, true);
+ l2len = sizeof(*ip6h);
+ }
+ l2len += l4len + sizeof(struct ethhdr);
+
+ flags_elem[0].in_sg[0].iov_len = l2len +
+ sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ if (*c->pcap) {
+ tcp_vu_update_check(tapside, &flags_elem[0].in_sg[0], 1);
+ pcap_iov(&flags_elem[0].in_sg[0], 1,
+ sizeof(struct virtio_net_hdr_mrg_rxbuf));
+ }
+ nb_ack = 1;
+
+ if (flags & DUP_ACK) {
+ vu_set_element(&flags_elem[1], NULL, &flags_iov[1]);
+
+ elem_cnt = vu_collect(vdev, vq, &flags_elem[1], 1,
+ flags_elem[0].in_sg[0].iov_len, NULL);
+ if (elem_cnt == 1 &&
+ flags_elem[1].in_sg[0].iov_len >=
+ flags_elem[0].in_sg[0].iov_len) {
+ memcpy(flags_elem[1].in_sg[0].iov_base,
+ flags_elem[0].in_sg[0].iov_base,
+ flags_elem[0].in_sg[0].iov_len);
+ nb_ack++;
+
+ if (*c->pcap) {
+ pcap_iov(&flags_elem[1].in_sg[0], 1,
+ sizeof(struct virtio_net_hdr_mrg_rxbuf));
+ }
+ }
+ }
+
+ vu_flush(vdev, vq, flags_elem, nb_ack);
+
+ return 0;
+}
+
+/** tcp_vu_sock_recv() - Receive datastream from socket into vhost-user buffers
+ * @c: Execution context
+ * @conn: Connection pointer
+ * @v6: Set for IPv6 connections
+ * @already_sent: Number of bytes already sent
+ * @fillsize: Maximum bytes to fill in guest-side receiving window
+ * @iov_cnt: number of iov (output)
+ *
+ * Return: Number of iov entries used to store the data or negative error code
+ */
+static ssize_t tcp_vu_sock_recv(const struct ctx *c,
+ const struct tcp_tap_conn *conn, bool v6,
+ uint32_t already_sent, size_t fillsize,
+ int *iov_cnt)
+{
+ struct vu_dev *vdev = c->vdev;
+ struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
+ struct msghdr mh_sock = { 0 };
+ uint16_t mss = MSS_GET(conn);
+ int s = conn->sock;
+ ssize_t ret, len;
+ size_t hdrlen;
+ int elem_cnt;
+ int i;
+
+ *iov_cnt = 0;
+
+ hdrlen = tcp_vu_hdrlen(v6);
+
+ vu_init_elem(elem, &iov_vu[1], VIRTQUEUE_MAX_SIZE);
+
+ elem_cnt = 0;
+ head_cnt = 0;
+ while (fillsize > 0 && elem_cnt < VIRTQUEUE_MAX_SIZE) {
+ struct iovec *iov;
+ size_t frame_size, dlen;
+ int cnt;
+
+ cnt = vu_collect(vdev, vq, &elem[elem_cnt],
+ VIRTQUEUE_MAX_SIZE - elem_cnt,
+ MIN(mss, fillsize) + hdrlen, &frame_size);
+ if (cnt == 0)
+ break;
+
+ dlen = frame_size - hdrlen;
+
+ /* reserve space for headers in iov */
+ iov = &elem[elem_cnt].in_sg[0];
+ ASSERT(iov->iov_len >= hdrlen);
+ iov->iov_base = (char *)iov->iov_base + hdrlen;
+ iov->iov_len -= hdrlen;
+ head[head_cnt++] = elem_cnt;
+
+ fillsize -= dlen;
+ elem_cnt += cnt;
+ }
+
+ if (peek_offset_cap) {
+ mh_sock.msg_iov = iov_vu + 1;
+ mh_sock.msg_iovlen = elem_cnt;
+ } else {
+ iov_vu[0].iov_base = tcp_buf_discard;
+ iov_vu[0].iov_len = already_sent;
+
+ mh_sock.msg_iov = iov_vu;
+ mh_sock.msg_iovlen = elem_cnt + 1;
+ }
+
+ do
+ ret = recvmsg(s, &mh_sock, MSG_PEEK);
+ while (ret < 0 && errno == EINTR);
+
+ if (ret < 0) {
+ vu_queue_rewind(vq, elem_cnt);
+ return -errno;
+ }
+
+ if (!peek_offset_cap)
+ ret -= already_sent;
+
+ /* adjust iov number and length of the last iov */
+ len = ret;
+ for (i = 0; len && i < elem_cnt; i++) {
+ struct iovec *iov = &elem[i].in_sg[0];
+
+ if (iov->iov_len > (size_t)len)
+ iov->iov_len = len;
+
+ len -= iov->iov_len;
+ }
+ /* adjust head count */
+ while (head_cnt > 0 && head[head_cnt - 1] > i)
+ head_cnt--;
+ /* mark end of array */
+ head[head_cnt] = i;
+ *iov_cnt = i;
+
+ /* release unused buffers */
+ vu_queue_rewind(vq, elem_cnt - i);
+
+ /* restore space for headers in iov */
+ for (i = 0; i < head_cnt; i++) {
+ struct iovec *iov = &elem[head[i]].in_sg[0];
+
+ iov->iov_base = (char *)iov->iov_base - hdrlen;
+ iov->iov_len += hdrlen;
+ }
+
+ return ret;
+}
+
+/**
+ * tcp_vu_prepare() - Prepare the frame header
+ * @c: Execution context
+ * @conn: Connection pointer
+ * @first: Pointer to the array of IO vectors
+ * @dlen: Packet data length
+ * @check: Checksum, if already known
+ */
+static void tcp_vu_prepare(const struct ctx *c,
+ struct tcp_tap_conn *conn, char *base,
+ size_t dlen, const uint16_t **check)
+{
+ const struct flowside *toside = TAPFLOW(conn);
+ struct tcp_payload_t *payload;
+ struct ipv6hdr *ip6h = NULL;
+ struct iphdr *iph = NULL;
+ struct ethhdr *eh;
+
+ /* we guess the first iovec provided by the guest can embed
+ * all the headers needed by L2 frame
+ */
+
+ eh = vu_eth(base);
+
+ memcpy(eh->h_dest, c->guest_mac, sizeof(eh->h_dest));
+ memcpy(eh->h_source, c->our_tap_mac, sizeof(eh->h_source));
+
+ /* initialize header */
+
+ if (inany_v4(&toside->eaddr) && inany_v4(&toside->oaddr)) {
+ eh->h_proto = htons(ETH_P_IP);
+
+ iph = vu_ip(base);
+ *iph = (struct iphdr)L2_BUF_IP4_INIT(IPPROTO_TCP);
+ payload = vu_payloadv4(base);
+ } else {
+ eh->h_proto = htons(ETH_P_IPV6);
+
+ ip6h = vu_ip(base);
+ *ip6h = (struct ipv6hdr)L2_BUF_IP6_INIT(IPPROTO_TCP);
+
+ payload = vu_payloadv6(base);
+ }
+
+ memset(&payload->th, 0, sizeof(payload->th));
+ payload->th.doff = offsetof(struct tcp_payload_t, data) / 4;
+ payload->th.ack = 1;
+
+ if (inany_v4(&toside->eaddr) && inany_v4(&toside->oaddr)) {
+ tcp_fill_headers4(conn, NULL, iph, payload, dlen,
+ *check, conn->seq_to_tap, true);
+ *check = &iph->check;
+ } else {
+ tcp_fill_headers6(conn, NULL, ip6h, payload, dlen,
+ conn->seq_to_tap, true);
+ }
+}
+
+/**
+ * tcp_vu_data_from_sock() - Handle new data from socket, queue to vhost-user,
+ * in window
+ * @c: Execution context
+ * @conn: Connection pointer
+ *
+ * Return: Negative on connection reset, 0 otherwise
+ */
+int tcp_vu_data_from_sock(const struct ctx *c, struct tcp_tap_conn *conn)
+{
+ uint32_t wnd_scaled = conn->wnd_from_tap << conn->ws_from_tap;
+ struct vu_dev *vdev = c->vdev;
+ struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
+ const struct flowside *tapside = TAPFLOW(conn);
+ size_t fillsize, hdrlen;
+ int v6 = CONN_V6(conn);
+ uint32_t already_sent;
+ const uint16_t *check;
+ int i, iov_cnt;
+ ssize_t len;
+
+ if (!vu_queue_enabled(vq) || !vu_queue_started(vq)) {
+ debug("Got packet, but RX virtqueue not usable yet");
+ return 0;
+ }
+
+ already_sent = conn->seq_to_tap - conn->seq_ack_from_tap;
+
+ if (SEQ_LT(already_sent, 0)) {
+ /* RFC 761, section 2.1. */
+ flow_trace(conn, "ACK sequence gap: ACK for %u, sent: %u",
+ conn->seq_ack_from_tap, conn->seq_to_tap);
+ conn->seq_to_tap = conn->seq_ack_from_tap;
+ already_sent = 0;
+ if (tcp_set_peek_offset(conn->sock, 0)) {
+ tcp_rst(c, conn);
+ return -1;
+ }
+ }
+
+ if (!wnd_scaled || already_sent >= wnd_scaled) {
+ conn_flag(c, conn, STALLED);
+ conn_flag(c, conn, ACK_FROM_TAP_DUE);
+ return 0;
+ }
+
+ /* Set up buffer descriptors we'll fill completely and partially. */
+
+ fillsize = wnd_scaled - already_sent;
+
+ /* collect the buffers from vhost-user and fill them with the
+ * data from the socket
+ */
+ len = tcp_vu_sock_recv(c, conn, v6, already_sent, fillsize, &iov_cnt);
+ if (len < 0) {
+ if (len != -EAGAIN && len != -EWOULDBLOCK) {
+ tcp_rst(c, conn);
+ return len;
+ }
+ return 0;
+ }
+
+ if (!len) {
+ if (already_sent) {
+ conn_flag(c, conn, STALLED);
+ } else if ((conn->events & (SOCK_FIN_RCVD | TAP_FIN_SENT)) ==
+ SOCK_FIN_RCVD) {
+ int ret = tcp_vu_send_flag(c, conn, FIN | ACK);
+ if (ret) {
+ tcp_rst(c, conn);
+ return ret;
+ }
+
+ conn_event(c, conn, TAP_FIN_SENT);
+ }
+
+ return 0;
+ }
+
+ conn_flag(c, conn, ~STALLED);
+
+ /* Likely, some new data was acked too. */
+ tcp_update_seqack_wnd(c, conn, false, NULL);
+
+ /* initialize headers */
+ /* iov_vu is an array of buffers and the buffer size can be
+ * smaller than the frame size we want to use but with
+ * num_buffer we can merge several virtio iov buffers in one packet
+ * we need only to set the packet headers in the first iov and
+ * num_buffer to the number of iov entries
+ */
+
+ hdrlen = tcp_vu_hdrlen(v6);
+ for (i = 0, check = NULL; i < head_cnt; i++) {
+ struct iovec *iov = &elem[head[i]].in_sg[0];
+ int buf_cnt = head[i + 1] - head[i];
+ int dlen = iov_size(iov, buf_cnt) - hdrlen;
+
+ vu_set_vnethdr(vdev, iov->iov_base, buf_cnt);
+
+ /* we compute IPv4 header checksum only for the
+ * first and the last, all other checksums are the
+ * same as the first one
+ */
+ if (i + 1 == head_cnt)
+ check = NULL;
+
+ tcp_vu_prepare(c, conn, iov->iov_base, dlen, &check);
+
+ if (*c->pcap) {
+ tcp_vu_update_check(tapside, iov, buf_cnt);
+ pcap_iov(iov, buf_cnt,
+ sizeof(struct virtio_net_hdr_mrg_rxbuf));
+ }
+
+ conn->seq_to_tap += dlen;
+ }
+
+ /* send packets */
+ vu_flush(vdev, vq, elem, iov_cnt);
+
+ conn_flag(c, conn, ACK_FROM_TAP_DUE);
+
+ return 0;
+}
diff --git a/tcp_vu.h b/tcp_vu.h
new file mode 100644
index 000000000000..6ab6057f352a
--- /dev/null
+++ b/tcp_vu.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright Red Hat
+ * Author: Laurent Vivier <lvivier@redhat.com>
+ */
+
+#ifndef TCP_VU_H
+#define TCP_VU_H
+
+int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags);
+int tcp_vu_data_from_sock(const struct ctx *c, struct tcp_tap_conn *conn);
+
+#endif /*TCP_VU_H */
diff --git a/udp.c b/udp.c
index 9718ed85e796..5b0093a15a30 100644
--- a/udp.c
+++ b/udp.c
@@ -110,6 +110,7 @@
#include "log.h"
#include "flow_table.h"
#include "udp_internal.h"
+#include "udp_vu.h"
/* "Spliced" sockets indexed by bound port (host order) */
static int udp_splice_ns [IP_VERSIONS][NUM_PORTS];
@@ -628,6 +629,11 @@ void udp_listen_sock_handler(const struct ctx *c,
union epoll_ref ref, uint32_t events,
const struct timespec *now)
{
+ if (c->mode == MODE_VU) {
+ udp_vu_listen_sock_handler(c, ref, events, now);
+ return;
+ }
+
udp_buf_listen_sock_handler(c, ref, events, now);
}
@@ -698,6 +704,11 @@ static void udp_buf_reply_sock_handler(const struct ctx *c, union epoll_ref ref,
void udp_reply_sock_handler(const struct ctx *c, union epoll_ref ref,
uint32_t events, const struct timespec *now)
{
+ if (c->mode == MODE_VU) {
+ udp_vu_reply_sock_handler(c, ref, events, now);
+ return;
+ }
+
udp_buf_reply_sock_handler(c, ref, events, now);
}
diff --git a/udp_vu.c b/udp_vu.c
new file mode 100644
index 000000000000..c911022546c1
--- /dev/null
+++ b/udp_vu.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* udp_vu.c - UDP L2 vhost-user management functions
+ *
+ * Copyright Red Hat
+ * Author: Laurent Vivier <lvivier@redhat.com>
+ */
+
+#include <unistd.h>
+#include <assert.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/udp.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <sys/uio.h>
+#include <linux/virtio_net.h>
+
+#include "checksum.h"
+#include "util.h"
+#include "ip.h"
+#include "siphash.h"
+#include "inany.h"
+#include "passt.h"
+#include "pcap.h"
+#include "log.h"
+#include "vhost_user.h"
+#include "udp_internal.h"
+#include "flow.h"
+#include "flow_table.h"
+#include "udp_flow.h"
+#include "udp_vu.h"
+#include "vu_common.h"
+
+static struct iovec iov_vu [VIRTQUEUE_MAX_SIZE];
+static struct vu_virtq_element elem [VIRTQUEUE_MAX_SIZE];
+
+/**
+ * udp_vu_hdrlen() - return the size of the header in level 2 frame (UDP)
+ * @v6: Set for IPv6 packet
+ *
+ * Return: Return the size of the header
+ */
+static size_t udp_vu_hdrlen(bool v6)
+{
+ size_t hdrlen;
+
+ hdrlen = sizeof(struct virtio_net_hdr_mrg_rxbuf) +
+ sizeof(struct ethhdr) + sizeof(struct udphdr);
+
+ if (v6)
+ hdrlen += sizeof(struct ipv6hdr);
+ else
+ hdrlen += sizeof(struct iphdr);
+
+ return hdrlen;
+}
+
+/**
+ * udp_vu_sock_info() - get socket information
+ * @s: Socket to get information from
+ * @s_in: Socket address (output)
+ *
+ * Return: 0 if socket address can be read, -1 otherwise
+ */
+static int udp_vu_sock_info(int s, union sockaddr_inany *s_in)
+{
+ struct msghdr msg = {
+ .msg_name = s_in,
+ .msg_namelen = sizeof(union sockaddr_inany),
+ };
+
+ return recvmsg(s, &msg, MSG_PEEK | MSG_DONTWAIT);
+}
+
+/**
+ * udp_vu_sock_recv() - Receive datagrams from socket into vhost-user buffers
+ * @c: Execution context
+ * @s: Socket to receive from
+ * @events: epoll events bitmap
+ * @v6: Set for IPv6 connections
+ * @dlen: Size of received data (output)
+ *
+ * Return: Number of iov entries used to store the datagram
+ */
+static int udp_vu_sock_recv(const struct ctx *c, int s, uint32_t events,
+ bool v6, ssize_t *dlen)
+{
+ struct vu_dev *vdev = c->vdev;
+ struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
+ int iov_cnt, idx, iov_used;
+ struct msghdr msg = { 0 };
+ size_t off, hdrlen;
+
+ ASSERT(!c->no_udp);
+
+ if (!(events & EPOLLIN))
+ return 0;
+
+ /* compute L2 header length */
+ hdrlen = udp_vu_hdrlen(v6);
+
+ vu_init_elem(elem, iov_vu, VIRTQUEUE_MAX_SIZE);
+
+ iov_cnt = vu_collect(vdev, vq, elem, VIRTQUEUE_MAX_SIZE,
+ IP_MAX_MTU - sizeof(struct udphdr) + hdrlen,
+ NULL);
+ if (iov_cnt == 0)
+ return 0;
+
+ /* reserve space for the headers */
+ ASSERT(iov_vu[0].iov_len >= hdrlen);
+ iov_vu[0].iov_base = (char *)iov_vu[0].iov_base + hdrlen;
+ iov_vu[0].iov_len -= hdrlen;
+
+ /* read data from the socket */
+ msg.msg_iov = iov_vu;
+ msg.msg_iovlen = iov_cnt;
+
+ *dlen = recvmsg(s, &msg, 0);
+ if (*dlen < 0) {
+ vu_queue_rewind(vq, iov_cnt);
+ return 0;
+ }
+
+ /* restore the pointer to the headers address */
+ iov_vu[0].iov_base = (char *)iov_vu[0].iov_base - hdrlen;
+ iov_vu[0].iov_len += hdrlen;
+
+ /* count the numbers of buffer filled by recvmsg() */
+ idx = iov_skip_bytes(iov_vu, iov_cnt, *dlen + hdrlen, &off);
+
+ /* adjust last iov length */
+ if (idx < iov_cnt)
+ iov_vu[idx].iov_len = off;
+ iov_used = idx + !!off;
+
+ vu_set_vnethdr(vdev, iov_vu[0].iov_base, iov_used);
+
+ /* release unused buffers */
+ vu_queue_rewind(vq, iov_cnt - iov_used);
+
+ return iov_used;
+}
+
+/**
+ * udp_vu_prepare() - Prepare the packet header
+ * @c: Execution context
+ * @toside: Address information for one side of the flow
+ * @dlen: Packet data length
+ *
+ * Return: Layer-4 length
+ */
+static size_t udp_vu_prepare(const struct ctx *c,
+ const struct flowside *toside, ssize_t dlen)
+{
+ struct ethhdr *eh;
+ size_t l4len;
+
+ /* ethernet header */
+ eh = vu_eth(iov_vu[0].iov_base);
+
+ memcpy(eh->h_dest, c->guest_mac, sizeof(eh->h_dest));
+ memcpy(eh->h_source, c->our_tap_mac, sizeof(eh->h_source));
+
+ /* initialize header */
+ if (inany_v4(&toside->eaddr) && inany_v4(&toside->oaddr)) {
+ struct iphdr *iph = vu_ip(iov_vu[0].iov_base);
+ struct udp_payload_t *bp = vu_payloadv4(iov_vu[0].iov_base);
+
+ eh->h_proto = htons(ETH_P_IP);
+
+ *iph = (struct iphdr)L2_BUF_IP4_INIT(IPPROTO_UDP);
+
+ l4len = udp_update_hdr4(iph, bp, toside, dlen, true);
+ } else {
+ struct ipv6hdr *ip6h = vu_ip(iov_vu[0].iov_base);
+ struct udp_payload_t *bp = vu_payloadv6(iov_vu[0].iov_base);
+
+ eh->h_proto = htons(ETH_P_IPV6);
+
+ *ip6h = (struct ipv6hdr)L2_BUF_IP6_INIT(IPPROTO_UDP);
+
+ l4len = udp_update_hdr6(ip6h, bp, toside, dlen, true);
+ }
+
+ return l4len;
+}
+
+/**
+ * udp_vu_csum() - Calculate and set checksum for a UDP packet
+ * @toside: Address information for one side of the flow
+ * @iov_used: Number of used iov_vu items
+ */
+static void udp_vu_csum(const struct flowside *toside, int iov_used)
+{
+ const struct in_addr *src4 = inany_v4(&toside->oaddr);
+ const struct in_addr *dst4 = inany_v4(&toside->eaddr);
+ char *base = iov_vu[0].iov_base;
+ struct udp_payload_t *bp;
+
+ if (src4 && dst4) {
+ bp = vu_payloadv4(base);
+ csum_udp4(&bp->uh, *src4, *dst4, iov_vu, iov_used,
+ (char *)&bp->data - base);
+ } else {
+ bp = vu_payloadv6(base);
+ csum_udp6(&bp->uh, &toside->oaddr.a6, &toside->eaddr.a6,
+ iov_vu, iov_used, (char *)&bp->data - base);
+ }
+}
+
+/**
+ * udp_vu_listen_sock_handler() - Handle new data from socket
+ * @c: Execution context
+ * @ref: epoll reference
+ * @events: epoll events bitmap
+ * @now: Current timestamp
+ */
+void udp_vu_listen_sock_handler(const struct ctx *c, union epoll_ref ref,
+ uint32_t events, const struct timespec *now)
+{
+ struct vu_dev *vdev = c->vdev;
+ struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
+ int i;
+
+ if (udp_sock_errs(c, ref.fd, events) < 0) {
+ err("UDP: Unrecoverable error on listening socket:"
+ " (%s port %hu)", pif_name(ref.udp.pif), ref.udp.port);
+ return;
+ }
+
+ for (i = 0; i < UDP_MAX_FRAMES; i++) {
+ const struct flowside *toside;
+ union sockaddr_inany s_in;
+ flow_sidx_t sidx;
+ uint8_t pif;
+ ssize_t dlen;
+ int iov_used;
+ bool v6;
+
+ if (udp_vu_sock_info(ref.fd, &s_in) < 0)
+ break;
+
+ sidx = udp_flow_from_sock(c, ref, &s_in, now);
+ pif = pif_at_sidx(sidx);
+
+ if (pif != PIF_TAP) {
+ if (flow_sidx_valid(sidx)) {
+ flow_sidx_t fromsidx = flow_sidx_opposite(sidx);
+ struct udp_flow *uflow = udp_at_sidx(sidx);
+
+ flow_err(uflow,
+ "No support for forwarding UDP from %s to %s",
+ pif_name(pif_at_sidx(fromsidx)),
+ pif_name(pif));
+ } else {
+ debug("Discarding 1 datagram without flow");
+ }
+
+ continue;
+ }
+
+ toside = flowside_at_sidx(sidx);
+
+ v6 = !(inany_v4(&toside->eaddr) && inany_v4(&toside->oaddr));
+
+ iov_used = udp_vu_sock_recv(c, ref.fd, events, v6, &dlen);
+ if (iov_used <= 0)
+ break;
+
+ udp_vu_prepare(c, toside, dlen);
+ if (*c->pcap) {
+ udp_vu_csum(toside, iov_used);
+ pcap_iov(iov_vu, iov_used,
+ sizeof(struct virtio_net_hdr_mrg_rxbuf));
+ }
+ vu_flush(vdev, vq, elem, iov_used);
+ }
+}
+
+/**
+ * udp_vu_reply_sock_handler() - Handle new data from flow specific socket
+ * @c: Execution context
+ * @ref: epoll reference
+ * @events: epoll events bitmap
+ * @now: Current timestamp
+ */
+void udp_vu_reply_sock_handler(const struct ctx *c, union epoll_ref ref,
+ uint32_t events, const struct timespec *now)
+{
+ flow_sidx_t tosidx = flow_sidx_opposite(ref.flowside);
+ const struct flowside *toside = flowside_at_sidx(tosidx);
+ struct udp_flow *uflow = udp_at_sidx(ref.flowside);
+ int from_s = uflow->s[ref.flowside.sidei];
+ struct vu_dev *vdev = c->vdev;
+ struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
+ int i;
+
+ ASSERT(!c->no_udp);
+
+ if (udp_sock_errs(c, from_s, events) < 0) {
+ flow_err(uflow, "Unrecoverable error on reply socket");
+ flow_err_details(uflow);
+ udp_flow_close(c, uflow);
+ return;
+ }
+
+ for (i = 0; i < UDP_MAX_FRAMES; i++) {
+ uint8_t topif = pif_at_sidx(tosidx);
+ ssize_t dlen;
+ int iov_used;
+ bool v6;
+
+ ASSERT(uflow);
+
+ if (topif != PIF_TAP) {
+ uint8_t frompif = pif_at_sidx(ref.flowside);
+
+ flow_err(uflow,
+ "No support for forwarding UDP from %s to %s",
+ pif_name(frompif), pif_name(topif));
+ continue;
+ }
+
+ v6 = !(inany_v4(&toside->eaddr) && inany_v4(&toside->oaddr));
+
+ iov_used = udp_vu_sock_recv(c, from_s, events, v6, &dlen);
+ if (iov_used <= 0)
+ break;
+ flow_trace(uflow, "Received 1 datagram on reply socket");
+ uflow->ts = now->tv_sec;
+
+ udp_vu_prepare(c, toside, dlen);
+ if (*c->pcap) {
+ udp_vu_csum(toside, iov_used);
+ pcap_iov(iov_vu, iov_used,
+ sizeof(struct virtio_net_hdr_mrg_rxbuf));
+ }
+ vu_flush(vdev, vq, elem, iov_used);
+ }
+}
diff --git a/udp_vu.h b/udp_vu.h
new file mode 100644
index 000000000000..ba7018d3bf01
--- /dev/null
+++ b/udp_vu.h
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright Red Hat
+ * Author: Laurent Vivier <lvivier@redhat.com>
+ */
+
+#ifndef UDP_VU_H
+#define UDP_VU_H
+
+void udp_vu_listen_sock_handler(const struct ctx *c, union epoll_ref ref,
+ uint32_t events, const struct timespec *now);
+void udp_vu_reply_sock_handler(const struct ctx *c, union epoll_ref ref,
+ uint32_t events, const struct timespec *now);
+#endif /* UDP_VU_H */
diff --git a/vhost_user.c b/vhost_user.c
index 89627a227ff1..51c90db10e7b 100644
--- a/vhost_user.c
+++ b/vhost_user.c
@@ -48,12 +48,13 @@
/* vhost-user version we are compatible with */
#define VHOST_USER_VERSION 1
+static struct vu_dev vdev_storage;
+
/**
* vu_print_capabilities() - print vhost-user capabilities
* this is part of the vhost-user backend
* convention.
*/
-/* cppcheck-suppress unusedFunction */
void vu_print_capabilities(void)
{
info("{");
@@ -163,9 +164,7 @@ static void vmsg_close_fds(const struct vhost_user_msg *vmsg)
*/
static void vu_remove_watch(const struct vu_dev *vdev, int fd)
{
- /* Placeholder to add passt related code */
- (void)vdev;
- (void)fd;
+ epoll_ctl(vdev->context->epollfd, EPOLL_CTL_DEL, fd, NULL);
}
/**
@@ -487,6 +486,14 @@ static bool vu_set_mem_table_exec(struct vu_dev *vdev,
}
}
+ /* As vu_packet_check_range() has no access to the number of
+ * memory regions, mark the end of the array with mmap_addr = 0
+ */
+ ASSERT(vdev->nregions < VHOST_USER_MAX_RAM_SLOTS - 1);
+ vdev->regions[vdev->nregions].mmap_addr = 0;
+
+ tap_sock_update_pool(vdev->regions, 0);
+
return false;
}
@@ -615,9 +622,16 @@ static bool vu_get_vring_base_exec(struct vu_dev *vdev,
*/
static void vu_set_watch(const struct vu_dev *vdev, int idx)
{
- /* Placeholder to add passt related code */
- (void)vdev;
- (void)idx;
+ union epoll_ref ref = {
+ .type = EPOLL_TYPE_VHOST_KICK,
+ .fd = vdev->vq[idx].kick_fd,
+ .queue = idx
+ };
+ struct epoll_event ev = { 0 };
+
+ ev.data.u64 = ref.u64;
+ ev.events = EPOLLIN;
+ epoll_ctl(vdev->context->epollfd, EPOLL_CTL_ADD, ref.fd, &ev);
}
/**
@@ -829,14 +843,14 @@ static bool vu_set_vring_enable_exec(struct vu_dev *vdev,
* @c: execution context
* @vdev: vhost-user device
*/
-/* cppcheck-suppress unusedFunction */
-void vu_init(struct ctx *c, struct vu_dev *vdev)
+void vu_init(struct ctx *c)
{
int i;
- vdev->context = c;
+ c->vdev = &vdev_storage;
+ c->vdev->context = c;
for (i = 0; i < VHOST_USER_MAX_QUEUES; i++) {
- vdev->vq[i] = (struct vu_virtq){
+ c->vdev->vq[i] = (struct vu_virtq){
.call_fd = -1,
.kick_fd = -1,
.err_fd = -1,
@@ -849,7 +863,6 @@ void vu_init(struct ctx *c, struct vu_dev *vdev)
* vu_cleanup() - Reset vhost-user device
* @vdev: vhost-user device
*/
-/* cppcheck-suppress unusedFunction */
void vu_cleanup(struct vu_dev *vdev)
{
unsigned int i;
@@ -896,8 +909,7 @@ void vu_cleanup(struct vu_dev *vdev)
*/
static void vu_sock_reset(struct vu_dev *vdev)
{
- /* Placeholder to add passt related code */
- (void)vdev;
+ tap_sock_reset(vdev->context);
}
static bool (*vu_handle[VHOST_USER_MAX])(struct vu_dev *vdev,
@@ -925,7 +937,6 @@ static bool (*vu_handle[VHOST_USER_MAX])(struct vu_dev *vdev,
* @fd: vhost-user message socket
* @events: epoll events
*/
-/* cppcheck-suppress unusedFunction */
void vu_control_handler(struct vu_dev *vdev, int fd, uint32_t events)
{
struct vhost_user_msg msg = { 0 };
diff --git a/vhost_user.h b/vhost_user.h
index 5af349ba58b8..464ba21e962f 100644
--- a/vhost_user.h
+++ b/vhost_user.h
@@ -183,7 +183,6 @@ struct vhost_user_msg {
*
* Return: true if the virqueue is enabled, false otherwise
*/
-/* cppcheck-suppress unusedFunction */
static inline bool vu_queue_enabled(const struct vu_virtq *vq)
{
return vq->enable;
@@ -195,14 +194,13 @@ static inline bool vu_queue_enabled(const struct vu_virtq *vq)
*
* Return: true if the virqueue is started, false otherwise
*/
-/* cppcheck-suppress unusedFunction */
static inline bool vu_queue_started(const struct vu_virtq *vq)
{
return vq->started;
}
void vu_print_capabilities(void);
-void vu_init(struct ctx *c, struct vu_dev *vdev);
+void vu_init(struct ctx *c);
void vu_cleanup(struct vu_dev *vdev);
void vu_control_handler(struct vu_dev *vdev, int fd, uint32_t events);
#endif /* VHOST_USER_H */
diff --git a/virtio.c b/virtio.c
index b23a68c4917f..6a97435e2965 100644
--- a/virtio.c
+++ b/virtio.c
@@ -325,7 +325,6 @@ static bool vring_can_notify(const struct vu_dev *dev, struct vu_virtq *vq)
* @dev: Vhost-user device
* @vq: Virtqueue
*/
-/* cppcheck-suppress unusedFunction */
void vu_queue_notify(const struct vu_dev *dev, struct vu_virtq *vq)
{
if (!vring_can_notify(dev, vq)) {
@@ -498,7 +497,6 @@ static int vu_queue_map_desc(struct vu_dev *dev, struct vu_virtq *vq, unsigned i
*
* Return: -1 if there is an error, 0 otherwise
*/
-/* cppcheck-suppress unusedFunction */
int vu_queue_pop(struct vu_dev *dev, struct vu_virtq *vq, struct vu_virtq_element *elem)
{
unsigned int head;
@@ -556,7 +554,6 @@ void vu_queue_unpop(struct vu_virtq *vq)
* @vq: Virtqueue
* @num: Number of element to unpop
*/
-/* cppcheck-suppress unusedFunction */
bool vu_queue_rewind(struct vu_virtq *vq, unsigned int num)
{
if (num > vq->inuse)
@@ -609,7 +606,6 @@ void vu_queue_fill_by_index(struct vu_virtq *vq, unsigned int index,
* @len: Size of the element
* @idx: Used ring entry index
*/
-/* cppcheck-suppress unusedFunction */
void vu_queue_fill(struct vu_virtq *vq, const struct vu_virtq_element *elem,
unsigned int len, unsigned int idx)
{
@@ -633,7 +629,6 @@ static inline void vring_used_idx_set(struct vu_virtq *vq, uint16_t val)
* @vq: Virtqueue
* @count: Number of entry to flush
*/
-/* cppcheck-suppress unusedFunction */
void vu_queue_flush(struct vu_virtq *vq, unsigned int count)
{
uint16_t old, new;
diff --git a/vu_common.c b/vu_common.c
new file mode 100644
index 000000000000..2a18e9794b5c
--- /dev/null
+++ b/vu_common.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright Red Hat
+ * Author: Laurent Vivier <lvivier@redhat.com>
+ *
+ * common_vu.c - vhost-user common UDP and TCP functions
+ */
+
+#include <unistd.h>
+#include <sys/uio.h>
+#include <sys/eventfd.h>
+#include <linux/virtio_net.h>
+
+#include "util.h"
+#include "passt.h"
+#include "tap.h"
+#include "vhost_user.h"
+#include "pcap.h"
+#include "vu_common.h"
+
+/**
+ * vu_packet_check_range() - Check if a given memory zone is contained in
+ * a mapped guest memory region
+ * @buf: Array of the available memory regions
+ * @offset: Offset of data range in packet descriptor
+ * @size: Length of desired data range
+ * @start: Start of the packet descriptor
+ *
+ * Return: 0 if the zone is in a mapped memory region, -1 otherwise
+ */
+int vu_packet_check_range(void *buf, size_t offset, size_t len,
+ const char *start)
+{
+ struct vu_dev_region *dev_region;
+
+ for (dev_region = buf; dev_region->mmap_addr; dev_region++) {
+ /* NOLINTNEXTLINE(performance-no-int-to-ptr) */
+ char *m = (char *)dev_region->mmap_addr;
+
+ if (m <= start &&
+ start + offset + len <= m + dev_region->mmap_offset +
+ dev_region->size)
+ return 0;
+ }
+
+ return -1;
+}
+
+/**
+ * vu_init_elem() - initialize an array of virtqueue elements with 1 iov in each
+ * @elem: Array of virtqueue elements to initialize
+ * @iov: Array of iovec to assign to virtqueue element
+ * @elem_cnt: Number of virtqueue element
+ */
+void vu_init_elem(struct vu_virtq_element *elem, struct iovec *iov, int elem_cnt)
+{
+ int i;
+
+ for (i = 0; i < elem_cnt; i++)
+ vu_set_element(&elem[i], NULL, &iov[i]);
+}
+
+/**
+ * vu_collect() - collect virtio buffers from a given virtqueue
+ * @vdev: vhost-user device
+ * @vq: virtqueue to collect from
+ * @elem: Array of virtqueue element
+ * each element must be initialized with one iovec entry
+ * in the in_sg array.
+ * @max_elem: Number of virtqueue elements in the array
+ * @size: Maximum size of the data in the frame
+ * @frame_size: The total size of the buffers (output)
+ *
+ * Return: number of elements used to contain the frame
+ */
+int vu_collect(struct vu_dev *vdev, struct vu_virtq *vq,
+ struct vu_virtq_element *elem, int max_elem,
+ size_t size, size_t *frame_size)
+{
+ size_t current_size = 0;
+ int elem_cnt = 0;
+
+ while (current_size < size && elem_cnt < max_elem) {
+ struct iovec *iov;
+ int ret;
+
+ ret = vu_queue_pop(vdev, vq, &elem[elem_cnt]);
+ if (ret < 0)
+ break;
+
+ if (elem[elem_cnt].in_num < 1) {
+ warn("virtio-net receive queue contains no in buffers");
+ vu_queue_detach_element(vq);
+ break;
+ }
+
+ iov = &elem[elem_cnt].in_sg[0];
+
+ if (iov->iov_len > size - current_size)
+ iov->iov_len = size - current_size;
+
+ current_size += iov->iov_len;
+ elem_cnt++;
+
+ if (!vu_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
+ break;
+ }
+
+ if (frame_size)
+ *frame_size = current_size;
+
+ return elem_cnt;
+}
+
+/**
+ * vu_set_vnethdr() - set virtio-net headers
+ * @vdev: vhost-user device
+ * @vnethdr: Address of the header to set
+ * @num_buffers: Number of guest buffers of the frame
+ */
+void vu_set_vnethdr(const struct vu_dev *vdev,
+ struct virtio_net_hdr_mrg_rxbuf *vnethdr,
+ int num_buffers)
+{
+ vnethdr->hdr = VU_HEADER;
+ if (vu_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
+ vnethdr->num_buffers = htole16(num_buffers);
+}
+
+/**
+ * vu_flush() - flush all the collected buffers to the vhost-user interface
+ * @vdev: vhost-user device
+ * @vq: vhost-user virtqueue
+ * @elem: virtqueue elements array to send back to the virtqueue
+ * @elem_cnt: Length of the array
+ */
+void vu_flush(const struct vu_dev *vdev, struct vu_virtq *vq,
+ struct vu_virtq_element *elem, int elem_cnt)
+{
+ int i;
+
+ for (i = 0; i < elem_cnt; i++)
+ vu_queue_fill(vq, &elem[i], elem[i].in_sg[0].iov_len, i);
+
+ vu_queue_flush(vq, elem_cnt);
+ vu_queue_notify(vdev, vq);
+}
+
+/**
+ * vu_handle_tx() - Receive data from the TX virtqueue
+ * @vdev: vhost-user device
+ * @index: index of the virtqueue
+ * @now: Current timestamp
+ */
+static void vu_handle_tx(struct vu_dev *vdev, int index,
+ const struct timespec *now)
+{
+ struct vu_virtq_element elem[VIRTQUEUE_MAX_SIZE];
+ struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
+ struct vu_virtq *vq = &vdev->vq[index];
+ int hdrlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ int out_sg_count;
+ int count;
+
+ ASSERT(VHOST_USER_IS_QUEUE_TX(index));
+
+ tap_flush_pools();
+
+ count = 0;
+ out_sg_count = 0;
+ while (count < VIRTQUEUE_MAX_SIZE) {
+ int ret;
+
+ vu_set_element(&elem[count], &out_sg[out_sg_count], NULL);
+ ret = vu_queue_pop(vdev, vq, &elem[count]);
+ if (ret < 0)
+ break;
+ out_sg_count += elem[count].out_num;
+
+ if (elem[count].out_num < 1) {
+ warn("virtio-net transmit queue contains no out buffers");
+ break;
+ }
+ ASSERT(elem[count].out_num == 1);
+
+ tap_add_packet(vdev->context,
+ elem[count].out_sg[0].iov_len - hdrlen,
+ (char *)elem[count].out_sg[0].iov_base + hdrlen);
+ count++;
+ }
+ tap_handler(vdev->context, now);
+
+ if (count) {
+ int i;
+
+ for (i = 0; i < count; i++)
+ vu_queue_fill(vq, &elem[i], 0, i);
+ vu_queue_flush(vq, count);
+ vu_queue_notify(vdev, vq);
+ }
+}
+
+/**
+ * vu_kick_cb() - Called on a kick event to start to receive data
+ * @vdev: vhost-user device
+ * @ref: epoll reference information
+ * @now: Current timestamp
+ */
+void vu_kick_cb(struct vu_dev *vdev, union epoll_ref ref,
+ const struct timespec *now)
+{
+ eventfd_t kick_data;
+ ssize_t rc;
+
+ rc = eventfd_read(ref.fd, &kick_data);
+ if (rc == -1)
+ die_perror("vhost-user kick eventfd_read()");
+
+ debug("vhost-user: got kick_data: %016"PRIx64" idx: %d",
+ kick_data, ref.queue);
+ if (VHOST_USER_IS_QUEUE_TX(ref.queue))
+ vu_handle_tx(vdev, ref.queue, now);
+}
+
+/**
+ * vu_send_single() - Send a buffer to the front-end using the RX virtqueue
+ * @c: execution context
+ * @buf: address of the buffer
+ * @size: size of the buffer
+ *
+ * Return: number of bytes sent, -1 if there is an error
+ */
+int vu_send_single(const struct ctx *c, const void *buf, size_t size)
+{
+ struct vu_dev *vdev = c->vdev;
+ struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
+ struct vu_virtq_element elem[VIRTQUEUE_MAX_SIZE];
+ struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
+ size_t total;
+ int elem_cnt;
+ int i;
+
+ debug("vu_send_single size %zu", size);
+
+ if (!vu_queue_enabled(vq) || !vu_queue_started(vq)) {
+ debug("Got packet, but RX virtqueue not usable yet");
+ return -1;
+ }
+
+ vu_init_elem(elem, in_sg, VIRTQUEUE_MAX_SIZE);
+
+ size += sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ elem_cnt = vu_collect(vdev, vq, elem, VIRTQUEUE_MAX_SIZE, size, &total);
+ if (total < size) {
+ debug("vu_send_single: no space to send the data "
+ "elem_cnt %d size %zd", elem_cnt, total);
+ goto err;
+ }
+
+ vu_set_vnethdr(vdev, in_sg[0].iov_base, elem_cnt);
+
+ total -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
+
+ /* copy data from the buffer to the iovec */
+ iov_from_buf(in_sg, elem_cnt, sizeof(struct virtio_net_hdr_mrg_rxbuf),
+ buf, total);
+
+ if (*c->pcap) {
+ pcap_iov(in_sg, elem_cnt,
+ sizeof(struct virtio_net_hdr_mrg_rxbuf));
+ }
+
+ vu_flush(vdev, vq, elem, elem_cnt);
+
+ debug("vhost-user sent %zu", total);
+
+ return total;
+err:
+ for (i = 0; i < elem_cnt; i++)
+ vu_queue_detach_element(vq);
+
+ return -1;
+}
diff --git a/vu_common.h b/vu_common.h
new file mode 100644
index 000000000000..901d97216c67
--- /dev/null
+++ b/vu_common.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright Red Hat
+ * Author: Laurent Vivier <lvivier@redhat.com>
+ *
+ * vhost-user common UDP and TCP functions
+ */
+
+#ifndef VU_COMMON_H
+#define VU_COMMON_H
+#include <linux/virtio_net.h>
+
+static inline void *vu_eth(void *base)
+{
+ return ((char *)base + sizeof(struct virtio_net_hdr_mrg_rxbuf));
+}
+
+static inline void *vu_ip(void *base)
+{
+ return (struct ethhdr *)vu_eth(base) + 1;
+}
+
+static inline void *vu_payloadv4(void *base)
+{
+ return (struct iphdr *)vu_ip(base) + 1;
+}
+
+static inline void *vu_payloadv6(void *base)
+{
+ return (struct ipv6hdr *)vu_ip(base) + 1;
+}
+
+/**
+ * vu_set_element() - Initialize a vu_virtq_element
+ * @elem: Element to initialize
+ * @out_sg: One out iovec entry to set in elem
+ * @in_sg: One in iovec entry to set in elem
+ */
+static inline void vu_set_element(struct vu_virtq_element *elem,
+ struct iovec *out_sg, struct iovec *in_sg)
+{
+ elem->out_num = !!out_sg;
+ elem->out_sg = out_sg;
+ elem->in_num = !!in_sg;
+ elem->in_sg = in_sg;
+}
+
+void vu_init_elem(struct vu_virtq_element *elem, struct iovec *iov,
+ int elem_cnt);
+int vu_collect(struct vu_dev *vdev, struct vu_virtq *vq,
+ struct vu_virtq_element *elem, int max_elem, size_t size,
+ size_t *frame_size);
+void vu_set_vnethdr(const struct vu_dev *vdev,
+ struct virtio_net_hdr_mrg_rxbuf *vnethdr,
+ int num_buffers);
+void vu_flush(const struct vu_dev *vdev, struct vu_virtq *vq,
+ struct vu_virtq_element *elem, int elem_cnt);
+void vu_kick_cb(struct vu_dev *vdev, union epoll_ref ref,
+ const struct timespec *now);
+int vu_send_single(const struct ctx *c, const void *buf, size_t size);
+#endif /* VU_COMMON_H */
--
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright Red Hat
+ * Author: Laurent Vivier <lvivier@redhat.com>
+ *
+ * vhost-user common UDP and TCP functions
+ */
+
+#ifndef VU_COMMON_H
+#define VU_COMMON_H
+#include <linux/virtio_net.h>
+
+static inline void *vu_eth(void *base)
+{
+ return ((char *)base + sizeof(struct virtio_net_hdr_mrg_rxbuf));
+}
+
+static inline void *vu_ip(void *base)
+{
+ return (struct ethhdr *)vu_eth(base) + 1;
+}
+
+static inline void *vu_payloadv4(void *base)
+{
+ return (struct iphdr *)vu_ip(base) + 1;
+}
+
+static inline void *vu_payloadv6(void *base)
+{
+ return (struct ipv6hdr *)vu_ip(base) + 1;
+}
+
+/**
+ * vu_set_element() - Initialize a vu_virtq_element
+ * @elem: Element to initialize
+ * @out_sg: One out iovec entry to set in elem
+ * @in_sg: One in iovec entry to set in elem
+ */
+static inline void vu_set_element(struct vu_virtq_element *elem,
+ struct iovec *out_sg, struct iovec *in_sg)
+{
+ elem->out_num = !!out_sg;
+ elem->out_sg = out_sg;
+ elem->in_num = !!in_sg;
+ elem->in_sg = in_sg;
+}
+
+void vu_init_elem(struct vu_virtq_element *elem, struct iovec *iov,
+ int elem_cnt);
+int vu_collect(struct vu_dev *vdev, struct vu_virtq *vq,
+ struct vu_virtq_element *elem, int max_elem, size_t size,
+ size_t *frame_size);
+void vu_set_vnethdr(const struct vu_dev *vdev,
+ struct virtio_net_hdr_mrg_rxbuf *vnethdr,
+ int num_buffers);
+void vu_flush(const struct vu_dev *vdev, struct vu_virtq *vq,
+ struct vu_virtq_element *elem, int elem_cnt);
+void vu_kick_cb(struct vu_dev *vdev, union epoll_ref ref,
+ const struct timespec *now);
+int vu_send_single(const struct ctx *c, const void *buf, size_t size);
+#endif /* VU_COMMON_H */
--
2.47.0
^ permalink raw reply related [flat|nested] 26+ messages in thread
* Re: [PATCH v14 7/9] vhost-user: add vhost-user
2024-11-22 16:43 ` [PATCH v14 7/9] vhost-user: add vhost-user Laurent Vivier
@ 2024-11-26 5:14 ` Stefano Brivio
2024-11-26 13:53 ` Stefano Brivio
2024-11-26 5:24 ` David Gibson
2024-11-27 4:47 ` Stefano Brivio
2 siblings, 1 reply; 26+ messages in thread
From: Stefano Brivio @ 2024-11-26 5:14 UTC (permalink / raw)
To: Laurent Vivier; +Cc: passt-dev
On Fri, 22 Nov 2024 17:43:34 +0100
Laurent Vivier <lvivier@redhat.com> wrote:
> +/**
> + * tcp_vu_data_from_sock() - Handle new data from socket, queue to vhost-user,
> + * in window
> + * @c: Execution context
> + * @conn: Connection pointer
> + *
> + * Return: Negative on connection reset, 0 otherwise
> + */
> +int tcp_vu_data_from_sock(const struct ctx *c, struct tcp_tap_conn *conn)
> +{
> + uint32_t wnd_scaled = conn->wnd_from_tap << conn->ws_from_tap;
> + struct vu_dev *vdev = c->vdev;
> + struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
> + const struct flowside *tapside = TAPFLOW(conn);
> + size_t fillsize, hdrlen;
> + int v6 = CONN_V6(conn);
> + uint32_t already_sent;
> + const uint16_t *check;
> + int i, iov_cnt;
> + ssize_t len;
> +
> + if (!vu_queue_enabled(vq) || !vu_queue_started(vq)) {
> + debug("Got packet, but RX virtqueue not usable yet");
> + return 0;
> + }
> +
> + already_sent = conn->seq_to_tap - conn->seq_ack_from_tap;
> +
> + if (SEQ_LT(already_sent, 0)) {
> + /* RFC 761, section 2.1. */
> + flow_trace(conn, "ACK sequence gap: ACK for %u, sent: %u",
> + conn->seq_ack_from_tap, conn->seq_to_tap);
> + conn->seq_to_tap = conn->seq_ack_from_tap;
> + already_sent = 0;
> + if (tcp_set_peek_offset(conn->sock, 0)) {
> + tcp_rst(c, conn);
> + return -1;
> + }
> + }
> +
> + if (!wnd_scaled || already_sent >= wnd_scaled) {
> + conn_flag(c, conn, STALLED);
> + conn_flag(c, conn, ACK_FROM_TAP_DUE);
> + return 0;
> + }
> +
> + /* Set up buffer descriptors we'll fill completely and partially. */
> +
> + fillsize = wnd_scaled - already_sent;
> +
> + /* collect the buffers from vhost-user and fill them with the
> + * data from the socket
> + */
> + len = tcp_vu_sock_recv(c, conn, v6, already_sent, fillsize, &iov_cnt);
> + if (len < 0) {
> + if (len != -EAGAIN && len != -EWOULDBLOCK) {
> + tcp_rst(c, conn);
> + return len;
> + }
> + return 0;
> + }
> +
> + if (!len) {
> + if (already_sent) {
> + conn_flag(c, conn, STALLED);
> + } else if ((conn->events & (SOCK_FIN_RCVD | TAP_FIN_SENT)) ==
> + SOCK_FIN_RCVD) {
> + int ret = tcp_vu_send_flag(c, conn, FIN | ACK);
> + if (ret) {
> + tcp_rst(c, conn);
> + return ret;
> + }
> +
> + conn_event(c, conn, TAP_FIN_SENT);
> + }
> +
> + return 0;
> + }
> +
> + conn_flag(c, conn, ~STALLED);
> +
> + /* Likely, some new data was acked too. */
> + tcp_update_seqack_wnd(c, conn, false, NULL);
> +
> + /* initialize headers */
> + /* iov_vu is an array of buffers and the buffer size can be
> + * smaller than the frame size we want to use but with
> + * num_buffer we can merge several virtio iov buffers in one packet
> + * we need only to set the packet headers in the first iov and
> + * num_buffer to the number of iov entries
> + */
> +
> + hdrlen = tcp_vu_hdrlen(v6);
> + for (i = 0, check = NULL; i < head_cnt; i++) {
> + struct iovec *iov = &elem[head[i]].in_sg[0];
> + int buf_cnt = head[i + 1] - head[i];
> + int dlen = iov_size(iov, buf_cnt) - hdrlen;
Unless I'm missing something, to me this looks like a false positive,
but Coverity now reports, for this line:
(17) Event function_return: Function "iov_size(iov, buf_cnt)" returns 0.
(18) Event overflow_const: Expression "iov_size(iov, buf_cnt) - hdrlen", which is equal to 18446744073709551550, where "iov_size(iov, buf_cnt)" is known to be equal to 0, and "hdrlen" is known to be equal to 66, underflows the type that receives it, an unsigned integer 64 bits wide.
...I don't think iov_size() can ever return 0 if we reach this point,
but I would try to cover this by either, in order of preference:
1. not sending this frame if iov_size(iov, buf_cnt) < hdrlen
2. an ASSERT(iov_size(iov, buf_cnt) >= hdrlen)
It can be a follow-up patch, there's no need to re-post the whole thing
(at least not just for this), unless you see something that actually
needs to be fixed.
--
Stefano
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v14 7/9] vhost-user: add vhost-user
2024-11-26 5:14 ` Stefano Brivio
@ 2024-11-26 13:53 ` Stefano Brivio
2024-11-26 14:11 ` Laurent Vivier
0 siblings, 1 reply; 26+ messages in thread
From: Stefano Brivio @ 2024-11-26 13:53 UTC (permalink / raw)
To: Laurent Vivier; +Cc: passt-dev
On Tue, 26 Nov 2024 06:14:43 +0100
Stefano Brivio <sbrivio@redhat.com> wrote:
> On Fri, 22 Nov 2024 17:43:34 +0100
> Laurent Vivier <lvivier@redhat.com> wrote:
>
> > +/**
> > + * tcp_vu_data_from_sock() - Handle new data from socket, queue to vhost-user,
> > + * in window
> > + * @c: Execution context
> > + * @conn: Connection pointer
> > + *
> > + * Return: Negative on connection reset, 0 otherwise
> > + */
> > +int tcp_vu_data_from_sock(const struct ctx *c, struct tcp_tap_conn *conn)
> > +{
> > + uint32_t wnd_scaled = conn->wnd_from_tap << conn->ws_from_tap;
> > + struct vu_dev *vdev = c->vdev;
> > + struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
> > + const struct flowside *tapside = TAPFLOW(conn);
> > + size_t fillsize, hdrlen;
> > + int v6 = CONN_V6(conn);
> > + uint32_t already_sent;
> > + const uint16_t *check;
> > + int i, iov_cnt;
> > + ssize_t len;
> > +
> > + if (!vu_queue_enabled(vq) || !vu_queue_started(vq)) {
> > + debug("Got packet, but RX virtqueue not usable yet");
> > + return 0;
> > + }
> > +
> > + already_sent = conn->seq_to_tap - conn->seq_ack_from_tap;
> > +
> > + if (SEQ_LT(already_sent, 0)) {
> > + /* RFC 761, section 2.1. */
> > + flow_trace(conn, "ACK sequence gap: ACK for %u, sent: %u",
> > + conn->seq_ack_from_tap, conn->seq_to_tap);
> > + conn->seq_to_tap = conn->seq_ack_from_tap;
> > + already_sent = 0;
> > + if (tcp_set_peek_offset(conn->sock, 0)) {
> > + tcp_rst(c, conn);
> > + return -1;
> > + }
> > + }
> > +
> > + if (!wnd_scaled || already_sent >= wnd_scaled) {
> > + conn_flag(c, conn, STALLED);
> > + conn_flag(c, conn, ACK_FROM_TAP_DUE);
> > + return 0;
> > + }
> > +
> > + /* Set up buffer descriptors we'll fill completely and partially. */
> > +
> > + fillsize = wnd_scaled - already_sent;
> > +
> > + /* collect the buffers from vhost-user and fill them with the
> > + * data from the socket
> > + */
> > + len = tcp_vu_sock_recv(c, conn, v6, already_sent, fillsize, &iov_cnt);
> > + if (len < 0) {
> > + if (len != -EAGAIN && len != -EWOULDBLOCK) {
> > + tcp_rst(c, conn);
> > + return len;
> > + }
> > + return 0;
> > + }
> > +
> > + if (!len) {
> > + if (already_sent) {
> > + conn_flag(c, conn, STALLED);
> > + } else if ((conn->events & (SOCK_FIN_RCVD | TAP_FIN_SENT)) ==
> > + SOCK_FIN_RCVD) {
> > + int ret = tcp_vu_send_flag(c, conn, FIN | ACK);
> > + if (ret) {
> > + tcp_rst(c, conn);
> > + return ret;
> > + }
> > +
> > + conn_event(c, conn, TAP_FIN_SENT);
> > + }
> > +
> > + return 0;
> > + }
> > +
> > + conn_flag(c, conn, ~STALLED);
> > +
> > + /* Likely, some new data was acked too. */
> > + tcp_update_seqack_wnd(c, conn, false, NULL);
> > +
> > + /* initialize headers */
> > + /* iov_vu is an array of buffers and the buffer size can be
> > + * smaller than the frame size we want to use but with
> > + * num_buffer we can merge several virtio iov buffers in one packet
> > + * we need only to set the packet headers in the first iov and
> > + * num_buffer to the number of iov entries
> > + */
> > +
> > + hdrlen = tcp_vu_hdrlen(v6);
> > + for (i = 0, check = NULL; i < head_cnt; i++) {
> > + struct iovec *iov = &elem[head[i]].in_sg[0];
> > + int buf_cnt = head[i + 1] - head[i];
> > + int dlen = iov_size(iov, buf_cnt) - hdrlen;
>
> Unless I'm missing something, to me this looks like a false positive,
> but Coverity now reports, for this line:
>
> (17) Event function_return: Function "iov_size(iov, buf_cnt)" returns 0.
> (18) Event overflow_const: Expression "iov_size(iov, buf_cnt) - hdrlen", which is equal to 18446744073709551550, where "iov_size(iov, buf_cnt)" is known to be equal to 0, and "hdrlen" is known to be equal to 66, underflows the type that receives it, an unsigned integer 64 bits wide.
>
> ...I don't think iov_size() can ever return 0 if we reach this point,
> but I would try to cover this by either, in order of preference:
>
> 1. not sending this frame if iov_size(iov, buf_cnt) < hdrlen
>
> 2. an ASSERT(iov_size(iov, buf_cnt) >= hdrlen)
>
> It can be a follow-up patch, there's no need to re-post the whole thing
> (at least not just for this), unless you see something that actually
> needs to be fixed.
...nothing to be fixed in your opinion, I suppose?
--
Stefano
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v14 7/9] vhost-user: add vhost-user
2024-11-26 13:53 ` Stefano Brivio
@ 2024-11-26 14:11 ` Laurent Vivier
2024-11-26 15:20 ` Stefano Brivio
0 siblings, 1 reply; 26+ messages in thread
From: Laurent Vivier @ 2024-11-26 14:11 UTC (permalink / raw)
To: Stefano Brivio; +Cc: passt-dev
On 26/11/2024 14:53, Stefano Brivio wrote:
> On Tue, 26 Nov 2024 06:14:43 +0100
> Stefano Brivio <sbrivio@redhat.com> wrote:
>
>> On Fri, 22 Nov 2024 17:43:34 +0100
>> Laurent Vivier <lvivier@redhat.com> wrote:
>>
>>> +/**
>>> + * tcp_vu_data_from_sock() - Handle new data from socket, queue to vhost-user,
>>> + * in window
>>> + * @c: Execution context
>>> + * @conn: Connection pointer
>>> + *
>>> + * Return: Negative on connection reset, 0 otherwise
>>> + */
>>> +int tcp_vu_data_from_sock(const struct ctx *c, struct tcp_tap_conn *conn)
>>> +{
>>> + uint32_t wnd_scaled = conn->wnd_from_tap << conn->ws_from_tap;
>>> + struct vu_dev *vdev = c->vdev;
>>> + struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
>>> + const struct flowside *tapside = TAPFLOW(conn);
>>> + size_t fillsize, hdrlen;
>>> + int v6 = CONN_V6(conn);
>>> + uint32_t already_sent;
>>> + const uint16_t *check;
>>> + int i, iov_cnt;
>>> + ssize_t len;
>>> +
>>> + if (!vu_queue_enabled(vq) || !vu_queue_started(vq)) {
>>> + debug("Got packet, but RX virtqueue not usable yet");
>>> + return 0;
>>> + }
>>> +
>>> + already_sent = conn->seq_to_tap - conn->seq_ack_from_tap;
>>> +
>>> + if (SEQ_LT(already_sent, 0)) {
>>> + /* RFC 761, section 2.1. */
>>> + flow_trace(conn, "ACK sequence gap: ACK for %u, sent: %u",
>>> + conn->seq_ack_from_tap, conn->seq_to_tap);
>>> + conn->seq_to_tap = conn->seq_ack_from_tap;
>>> + already_sent = 0;
>>> + if (tcp_set_peek_offset(conn->sock, 0)) {
>>> + tcp_rst(c, conn);
>>> + return -1;
>>> + }
>>> + }
>>> +
>>> + if (!wnd_scaled || already_sent >= wnd_scaled) {
>>> + conn_flag(c, conn, STALLED);
>>> + conn_flag(c, conn, ACK_FROM_TAP_DUE);
>>> + return 0;
>>> + }
>>> +
>>> + /* Set up buffer descriptors we'll fill completely and partially. */
>>> +
>>> + fillsize = wnd_scaled - already_sent;
>>> +
>>> + /* collect the buffers from vhost-user and fill them with the
>>> + * data from the socket
>>> + */
>>> + len = tcp_vu_sock_recv(c, conn, v6, already_sent, fillsize, &iov_cnt);
>>> + if (len < 0) {
>>> + if (len != -EAGAIN && len != -EWOULDBLOCK) {
>>> + tcp_rst(c, conn);
>>> + return len;
>>> + }
>>> + return 0;
>>> + }
>>> +
>>> + if (!len) {
>>> + if (already_sent) {
>>> + conn_flag(c, conn, STALLED);
>>> + } else if ((conn->events & (SOCK_FIN_RCVD | TAP_FIN_SENT)) ==
>>> + SOCK_FIN_RCVD) {
>>> + int ret = tcp_vu_send_flag(c, conn, FIN | ACK);
>>> + if (ret) {
>>> + tcp_rst(c, conn);
>>> + return ret;
>>> + }
>>> +
>>> + conn_event(c, conn, TAP_FIN_SENT);
>>> + }
>>> +
>>> + return 0;
>>> + }
>>> +
>>> + conn_flag(c, conn, ~STALLED);
>>> +
>>> + /* Likely, some new data was acked too. */
>>> + tcp_update_seqack_wnd(c, conn, false, NULL);
>>> +
>>> + /* initialize headers */
>>> + /* iov_vu is an array of buffers and the buffer size can be
>>> + * smaller than the frame size we want to use but with
>>> + * num_buffer we can merge several virtio iov buffers in one packet
>>> + * we need only to set the packet headers in the first iov and
>>> + * num_buffer to the number of iov entries
>>> + */
>>> +
>>> + hdrlen = tcp_vu_hdrlen(v6);
>>> + for (i = 0, check = NULL; i < head_cnt; i++) {
>>> + struct iovec *iov = &elem[head[i]].in_sg[0];
>>> + int buf_cnt = head[i + 1] - head[i];
>>> + int dlen = iov_size(iov, buf_cnt) - hdrlen;
>>
>> Unless I'm missing something, to me this looks like a false positive,
>> but Coverity now reports, for this line:
>>
>> (17) Event function_return: Function "iov_size(iov, buf_cnt)" returns 0.
>> (18) Event overflow_const: Expression "iov_size(iov, buf_cnt) - hdrlen", which is equal to 18446744073709551550, where "iov_size(iov, buf_cnt)" is known to be equal to 0, and "hdrlen" is known to be equal to 66, underflows the type that receives it, an unsigned integer 64 bits wide.
>>
>> ...I don't think iov_size() can ever return 0 if we reach this point,
>> but I would try to cover this by either, in order of preference:
>>
>> 1. not sending this frame if iov_size(iov, buf_cnt) < hdrlen
>>
>> 2. an ASSERT(iov_size(iov, buf_cnt) >= hdrlen)
>>
>> It can be a follow-up patch, there's no need to re-post the whole thing
>> (at least not just for this), unless you see something that actually
>> needs to be fixed.
>
> ...nothing to be fixed in your opinion, I suppose?
>
There is an ASSERT() in tcp_vu_sock_recv() that ensure size of the first iovec of segment
is at least hdrlen.
Thanks,
Laurent
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v14 7/9] vhost-user: add vhost-user
2024-11-26 14:11 ` Laurent Vivier
@ 2024-11-26 15:20 ` Stefano Brivio
2024-11-26 15:41 ` Laurent Vivier
0 siblings, 1 reply; 26+ messages in thread
From: Stefano Brivio @ 2024-11-26 15:20 UTC (permalink / raw)
To: Laurent Vivier; +Cc: passt-dev
On Tue, 26 Nov 2024 15:11:25 +0100
Laurent Vivier <lvivier@redhat.com> wrote:
> On 26/11/2024 14:53, Stefano Brivio wrote:
> > On Tue, 26 Nov 2024 06:14:43 +0100
> > Stefano Brivio <sbrivio@redhat.com> wrote:
> >
> >> On Fri, 22 Nov 2024 17:43:34 +0100
> >> Laurent Vivier <lvivier@redhat.com> wrote:
> >>
> >>> +/**
> >>> + * tcp_vu_data_from_sock() - Handle new data from socket, queue to vhost-user,
> >>> + * in window
> >>> + * @c: Execution context
> >>> + * @conn: Connection pointer
> >>> + *
> >>> + * Return: Negative on connection reset, 0 otherwise
> >>> + */
> >>> +int tcp_vu_data_from_sock(const struct ctx *c, struct tcp_tap_conn *conn)
> >>> +{
> >>> + uint32_t wnd_scaled = conn->wnd_from_tap << conn->ws_from_tap;
> >>> + struct vu_dev *vdev = c->vdev;
> >>> + struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
> >>> + const struct flowside *tapside = TAPFLOW(conn);
> >>> + size_t fillsize, hdrlen;
> >>> + int v6 = CONN_V6(conn);
> >>> + uint32_t already_sent;
> >>> + const uint16_t *check;
> >>> + int i, iov_cnt;
> >>> + ssize_t len;
> >>> +
> >>> + if (!vu_queue_enabled(vq) || !vu_queue_started(vq)) {
> >>> + debug("Got packet, but RX virtqueue not usable yet");
> >>> + return 0;
> >>> + }
> >>> +
> >>> + already_sent = conn->seq_to_tap - conn->seq_ack_from_tap;
> >>> +
> >>> + if (SEQ_LT(already_sent, 0)) {
> >>> + /* RFC 761, section 2.1. */
> >>> + flow_trace(conn, "ACK sequence gap: ACK for %u, sent: %u",
> >>> + conn->seq_ack_from_tap, conn->seq_to_tap);
> >>> + conn->seq_to_tap = conn->seq_ack_from_tap;
> >>> + already_sent = 0;
> >>> + if (tcp_set_peek_offset(conn->sock, 0)) {
> >>> + tcp_rst(c, conn);
> >>> + return -1;
> >>> + }
> >>> + }
> >>> +
> >>> + if (!wnd_scaled || already_sent >= wnd_scaled) {
> >>> + conn_flag(c, conn, STALLED);
> >>> + conn_flag(c, conn, ACK_FROM_TAP_DUE);
> >>> + return 0;
> >>> + }
> >>> +
> >>> + /* Set up buffer descriptors we'll fill completely and partially. */
> >>> +
> >>> + fillsize = wnd_scaled - already_sent;
> >>> +
> >>> + /* collect the buffers from vhost-user and fill them with the
> >>> + * data from the socket
> >>> + */
> >>> + len = tcp_vu_sock_recv(c, conn, v6, already_sent, fillsize, &iov_cnt);
> >>> + if (len < 0) {
> >>> + if (len != -EAGAIN && len != -EWOULDBLOCK) {
> >>> + tcp_rst(c, conn);
> >>> + return len;
> >>> + }
> >>> + return 0;
> >>> + }
> >>> +
> >>> + if (!len) {
> >>> + if (already_sent) {
> >>> + conn_flag(c, conn, STALLED);
> >>> + } else if ((conn->events & (SOCK_FIN_RCVD | TAP_FIN_SENT)) ==
> >>> + SOCK_FIN_RCVD) {
> >>> + int ret = tcp_vu_send_flag(c, conn, FIN | ACK);
> >>> + if (ret) {
> >>> + tcp_rst(c, conn);
> >>> + return ret;
> >>> + }
> >>> +
> >>> + conn_event(c, conn, TAP_FIN_SENT);
> >>> + }
> >>> +
> >>> + return 0;
> >>> + }
> >>> +
> >>> + conn_flag(c, conn, ~STALLED);
> >>> +
> >>> + /* Likely, some new data was acked too. */
> >>> + tcp_update_seqack_wnd(c, conn, false, NULL);
> >>> +
> >>> + /* initialize headers */
> >>> + /* iov_vu is an array of buffers and the buffer size can be
> >>> + * smaller than the frame size we want to use but with
> >>> + * num_buffer we can merge several virtio iov buffers in one packet
> >>> + * we need only to set the packet headers in the first iov and
> >>> + * num_buffer to the number of iov entries
> >>> + */
> >>> +
> >>> + hdrlen = tcp_vu_hdrlen(v6);
> >>> + for (i = 0, check = NULL; i < head_cnt; i++) {
> >>> + struct iovec *iov = &elem[head[i]].in_sg[0];
> >>> + int buf_cnt = head[i + 1] - head[i];
> >>> + int dlen = iov_size(iov, buf_cnt) - hdrlen;
> >>
> >> Unless I'm missing something, to me this looks like a false positive,
> >> but Coverity now reports, for this line:
> >>
> >> (17) Event function_return: Function "iov_size(iov, buf_cnt)" returns 0.
> >> (18) Event overflow_const: Expression "iov_size(iov, buf_cnt) - hdrlen", which is equal to 18446744073709551550, where "iov_size(iov, buf_cnt)" is known to be equal to 0, and "hdrlen" is known to be equal to 66, underflows the type that receives it, an unsigned integer 64 bits wide.
> >>
> >> ...I don't think iov_size() can ever return 0 if we reach this point,
> >> but I would try to cover this by either, in order of preference:
> >>
> >> 1. not sending this frame if iov_size(iov, buf_cnt) < hdrlen
> >>
> >> 2. an ASSERT(iov_size(iov, buf_cnt) >= hdrlen)
> >>
> >> It can be a follow-up patch, there's no need to re-post the whole thing
> >> (at least not just for this), unless you see something that actually
> >> needs to be fixed.
> >
> > ...nothing to be fixed in your opinion, I suppose?
>
> There is an ASSERT() in tcp_vu_sock_recv() that ensure size of the first iovec of segment
> is at least hdrlen.
Oh, I didn't see that. Instead of duplicating it here, turning 'dlen'
to ssize_t also takes care of the warning. Probably size_t would be a
better fit, but ssize_t is anyway harmless. I can change that in a
follow-up patch too.
By the way, I built the series on different architectures and C
libraries, there are a few "formal" issues, which I can also fix up on
merge or as follow-up.
That is, if you want to re-post I'm also fine with it of course, but I
don't see a reason to delay this because of those. I would wait a bit
to see if David has further comments, and if not, I would make a
(further) release *first*, so that we have one just before these
changes, then merge (with fix-ups).
- Debian i686:
--
In file included from util.h:21,
from packet.c:22:
packet.c: In function ‘packet_check_range’:
packet.c:57:23: warning: format ‘%lu’ expects argument of type ‘long unsigned int’, but argument 5 has type ‘size_t’ {aka ‘unsigned int’} [-Wformat=]
57 | trace("packet offset plus length %lu from size %lu, "
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
58 | "%s:%i", start - p->buf + len + offset,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| |
| size_t {aka unsigned int}
log.h:25:66: note: in definition of macro ‘debug’
25 | #define debug(...) logmsg(true, false, LOG_DEBUG, __VA_ARGS__)
| ^~~~~~~~~~~
packet.c:57:17: note: in expansion of macro ‘trace’
57 | trace("packet offset plus length %lu from size %lu, "
| ^~~~~
packet.c:57:52: note: format string is defined here
57 | trace("packet offset plus length %lu from size %lu, "
| ~~^
| |
| long unsigned int
| %u
packet.c:57:23: warning: format ‘%lu’ expects argument of type ‘long unsigned int’, but argument 6 has type ‘size_t’ {aka ‘unsigned int’} [-Wformat=]
57 | trace("packet offset plus length %lu from size %lu, "
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
58 | "%s:%i", start - p->buf + len + offset,
59 | p->buf_size, func, line);
| ~~~~~~~~~~~
| |
| size_t {aka unsigned int}
log.h:25:66: note: in definition of macro ‘debug’
25 | #define debug(...) logmsg(true, false, LOG_DEBUG, __VA_ARGS__)
| ^~~~~~~~~~~
packet.c:57:17: note: in expansion of macro ‘trace’
57 | trace("packet offset plus length %lu from size %lu, "
| ^~~~~
packet.c:57:66: note: format string is defined here
57 | trace("packet offset plus length %lu from size %lu, "
| ~~^
| |
| long unsigned int
| %u
vhost_user.c: In function ‘qva_to_va’:
vhost_user.c:139:32: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
139 | return (void *)(qemu_addr - r->qva + r->mmap_addr +
| ^
vhost_user.c: In function ‘vu_set_mem_table_exec’:
vhost_user.c:439:32: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
439 | munmap((void *)r->mmap_addr, r->size + r->mmap_offset);
| ^
vhost_user.c: In function ‘vu_cleanup’:
vhost_user.c:900:32: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
900 | munmap((void *)r->mmap_addr, r->size + r->mmap_offset);
| ^
virtio.c: In function ‘vu_gpa_to_va’:
virtio.c:111:32: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
111 | return (void *)(guest_addr - r->gpa + r->mmap_addr +
| ^
vu_common.c: In function ‘vu_packet_check_range’:
vu_common.c:37:27: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
37 | char *m = (char *)dev_region->mmap_addr;
| ^
--
- Alpine (musl) x86:
--
In file included from passt.h:185,
from tcp_vu.c:21:
/usr/include/netinet/if_ether.h:115:8: error: redefinition of 'struct ethhdr'
115 | struct ethhdr {
| ^~~~~~
In file included from /usr/include/linux/virtio_net.h:32,
from tcp_vu.c:17:
/usr/include/linux/if_ether.h:173:8: note: originally defined here
173 | struct ethhdr {
| ^~~~~~
In file included from passt.h:185,
from vu_common.c:14:
/usr/include/netinet/if_ether.h:115:8: error: redefinition of 'struct ethhdr'
115 | struct ethhdr {
| ^~~~~~
In file included from /usr/include/linux/virtio_net.h:32,
from vu_common.c:11:
/usr/include/linux/if_ether.h:173:8: note: originally defined here
173 | struct ethhdr {
| ^~~~~~
make: *** [Makefile:87: passt] Error 1
--
- Debian armhf (same issues as i686):
--
In file included from util.h:21,
from packet.c:22:
packet.c: In function ‘packet_check_range’:
packet.c:57:23: warning: format ‘%lu’ expects argument of type ‘long unsigned int’, but argument 5 has type ‘size_t’ {aka ‘unsigned int’} [-Wformat=]
57 | trace("packet offset plus length %lu from size %lu, "
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
58 | "%s:%i", start - p->buf + len + offset,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| |
| size_t {aka unsigned int}
log.h:25:66: note: in definition of macro ‘debug’
25 | #define debug(...) logmsg(true, false, LOG_DEBUG, __VA_ARGS__)
| ^~~~~~~~~~~
packet.c:57:17: note: in expansion of macro ‘trace’
57 | trace("packet offset plus length %lu from size %lu, "
| ^~~~~
packet.c:57:52: note: format string is defined here
57 | trace("packet offset plus length %lu from size %lu, "
| ~~^
| |
| long unsigned int
| %u
packet.c:57:23: warning: format ‘%lu’ expects argument of type ‘long unsigned int’, but argument 6 has type ‘size_t’ {aka ‘unsigned int’} [-Wformat=]
57 | trace("packet offset plus length %lu from size %lu, "
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
58 | "%s:%i", start - p->buf + len + offset,
59 | p->buf_size, func, line);
| ~~~~~~~~~~~
| |
| size_t {aka unsigned int}
log.h:25:66: note: in definition of macro ‘debug’
25 | #define debug(...) logmsg(true, false, LOG_DEBUG, __VA_ARGS__)
| ^~~~~~~~~~~
packet.c:57:17: note: in expansion of macro ‘trace’
57 | trace("packet offset plus length %lu from size %lu, "
| ^~~~~
packet.c:57:66: note: format string is defined here
57 | trace("packet offset plus length %lu from size %lu, "
| ~~^
| |
| long unsigned int
| %u
vhost_user.c: In function ‘qva_to_va’:
vhost_user.c:139:32: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
139 | return (void *)(qemu_addr - r->qva + r->mmap_addr +
| ^
vhost_user.c: In function ‘vu_set_mem_table_exec’:
vhost_user.c:439:32: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
439 | munmap((void *)r->mmap_addr, r->size + r->mmap_offset);
| ^
vhost_user.c: In function ‘vu_cleanup’:
vhost_user.c:900:32: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
900 | munmap((void *)r->mmap_addr, r->size + r->mmap_offset);
| ^
virtio.c: In function ‘vu_gpa_to_va’:
virtio.c:111:32: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
111 | return (void *)(guest_addr - r->gpa + r->mmap_addr +
| ^
vu_common.c: In function ‘vu_packet_check_range’:
vu_common.c:37:27: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
37 | char *m = (char *)dev_region->mmap_addr;
| ^
--
--
Stefano
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v14 7/9] vhost-user: add vhost-user
2024-11-26 15:20 ` Stefano Brivio
@ 2024-11-26 15:41 ` Laurent Vivier
0 siblings, 0 replies; 26+ messages in thread
From: Laurent Vivier @ 2024-11-26 15:41 UTC (permalink / raw)
To: Stefano Brivio; +Cc: passt-dev
On 26/11/2024 16:20, Stefano Brivio wrote:
> On Tue, 26 Nov 2024 15:11:25 +0100
> Laurent Vivier <lvivier@redhat.com> wrote:
>
>> On 26/11/2024 14:53, Stefano Brivio wrote:
>>> On Tue, 26 Nov 2024 06:14:43 +0100
>>> Stefano Brivio <sbrivio@redhat.com> wrote:
>>>
>>>> On Fri, 22 Nov 2024 17:43:34 +0100
>>>> Laurent Vivier <lvivier@redhat.com> wrote:
>>>>
>>>>> +/**
>>>>> + * tcp_vu_data_from_sock() - Handle new data from socket, queue to vhost-user,
>>>>> + * in window
>>>>> + * @c: Execution context
>>>>> + * @conn: Connection pointer
>>>>> + *
>>>>> + * Return: Negative on connection reset, 0 otherwise
>>>>> + */
>>>>> +int tcp_vu_data_from_sock(const struct ctx *c, struct tcp_tap_conn *conn)
>>>>> +{
>>>>> + uint32_t wnd_scaled = conn->wnd_from_tap << conn->ws_from_tap;
>>>>> + struct vu_dev *vdev = c->vdev;
>>>>> + struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
>>>>> + const struct flowside *tapside = TAPFLOW(conn);
>>>>> + size_t fillsize, hdrlen;
>>>>> + int v6 = CONN_V6(conn);
>>>>> + uint32_t already_sent;
>>>>> + const uint16_t *check;
>>>>> + int i, iov_cnt;
>>>>> + ssize_t len;
>>>>> +
>>>>> + if (!vu_queue_enabled(vq) || !vu_queue_started(vq)) {
>>>>> + debug("Got packet, but RX virtqueue not usable yet");
>>>>> + return 0;
>>>>> + }
>>>>> +
>>>>> + already_sent = conn->seq_to_tap - conn->seq_ack_from_tap;
>>>>> +
>>>>> + if (SEQ_LT(already_sent, 0)) {
>>>>> + /* RFC 761, section 2.1. */
>>>>> + flow_trace(conn, "ACK sequence gap: ACK for %u, sent: %u",
>>>>> + conn->seq_ack_from_tap, conn->seq_to_tap);
>>>>> + conn->seq_to_tap = conn->seq_ack_from_tap;
>>>>> + already_sent = 0;
>>>>> + if (tcp_set_peek_offset(conn->sock, 0)) {
>>>>> + tcp_rst(c, conn);
>>>>> + return -1;
>>>>> + }
>>>>> + }
>>>>> +
>>>>> + if (!wnd_scaled || already_sent >= wnd_scaled) {
>>>>> + conn_flag(c, conn, STALLED);
>>>>> + conn_flag(c, conn, ACK_FROM_TAP_DUE);
>>>>> + return 0;
>>>>> + }
>>>>> +
>>>>> + /* Set up buffer descriptors we'll fill completely and partially. */
>>>>> +
>>>>> + fillsize = wnd_scaled - already_sent;
>>>>> +
>>>>> + /* collect the buffers from vhost-user and fill them with the
>>>>> + * data from the socket
>>>>> + */
>>>>> + len = tcp_vu_sock_recv(c, conn, v6, already_sent, fillsize, &iov_cnt);
>>>>> + if (len < 0) {
>>>>> + if (len != -EAGAIN && len != -EWOULDBLOCK) {
>>>>> + tcp_rst(c, conn);
>>>>> + return len;
>>>>> + }
>>>>> + return 0;
>>>>> + }
>>>>> +
>>>>> + if (!len) {
>>>>> + if (already_sent) {
>>>>> + conn_flag(c, conn, STALLED);
>>>>> + } else if ((conn->events & (SOCK_FIN_RCVD | TAP_FIN_SENT)) ==
>>>>> + SOCK_FIN_RCVD) {
>>>>> + int ret = tcp_vu_send_flag(c, conn, FIN | ACK);
>>>>> + if (ret) {
>>>>> + tcp_rst(c, conn);
>>>>> + return ret;
>>>>> + }
>>>>> +
>>>>> + conn_event(c, conn, TAP_FIN_SENT);
>>>>> + }
>>>>> +
>>>>> + return 0;
>>>>> + }
>>>>> +
>>>>> + conn_flag(c, conn, ~STALLED);
>>>>> +
>>>>> + /* Likely, some new data was acked too. */
>>>>> + tcp_update_seqack_wnd(c, conn, false, NULL);
>>>>> +
>>>>> + /* initialize headers */
>>>>> + /* iov_vu is an array of buffers and the buffer size can be
>>>>> + * smaller than the frame size we want to use but with
>>>>> + * num_buffer we can merge several virtio iov buffers in one packet
>>>>> + * we need only to set the packet headers in the first iov and
>>>>> + * num_buffer to the number of iov entries
>>>>> + */
>>>>> +
>>>>> + hdrlen = tcp_vu_hdrlen(v6);
>>>>> + for (i = 0, check = NULL; i < head_cnt; i++) {
>>>>> + struct iovec *iov = &elem[head[i]].in_sg[0];
>>>>> + int buf_cnt = head[i + 1] - head[i];
>>>>> + int dlen = iov_size(iov, buf_cnt) - hdrlen;
>>>>
>>>> Unless I'm missing something, to me this looks like a false positive,
>>>> but Coverity now reports, for this line:
>>>>
>>>> (17) Event function_return: Function "iov_size(iov, buf_cnt)" returns 0.
>>>> (18) Event overflow_const: Expression "iov_size(iov, buf_cnt) - hdrlen", which is equal to 18446744073709551550, where "iov_size(iov, buf_cnt)" is known to be equal to 0, and "hdrlen" is known to be equal to 66, underflows the type that receives it, an unsigned integer 64 bits wide.
>>>>
>>>> ...I don't think iov_size() can ever return 0 if we reach this point,
>>>> but I would try to cover this by either, in order of preference:
>>>>
>>>> 1. not sending this frame if iov_size(iov, buf_cnt) < hdrlen
>>>>
>>>> 2. an ASSERT(iov_size(iov, buf_cnt) >= hdrlen)
>>>>
>>>> It can be a follow-up patch, there's no need to re-post the whole thing
>>>> (at least not just for this), unless you see something that actually
>>>> needs to be fixed.
>>>
>>> ...nothing to be fixed in your opinion, I suppose?
>>
>> There is an ASSERT() in tcp_vu_sock_recv() that ensure size of the first iovec of segment
>> is at least hdrlen.
>
> Oh, I didn't see that. Instead of duplicating it here, turning 'dlen'
> to ssize_t also takes care of the warning. Probably size_t would be a
> better fit, but ssize_t is anyway harmless. I can change that in a
> follow-up patch too.
dlen is limited by MTU size, so any type bigger then unsigned short is OK...
>
> By the way, I built the series on different architectures and C
> libraries, there are a few "formal" issues, which I can also fix up on
> merge or as follow-up.
>
> That is, if you want to re-post I'm also fine with it of course, but I
> don't see a reason to delay this because of those. I would wait a bit
> to see if David has further comments, and if not, I would make a
> (further) release *first*, so that we have one just before these
> changes, then merge (with fix-ups).
You can fix some of them on merge if you want, I will fix all the remaining ones once merged.
Thanks,
Laurent
>
> - Debian i686:
>
> --
> In file included from util.h:21,
> from packet.c:22:
> packet.c: In function ‘packet_check_range’:
> packet.c:57:23: warning: format ‘%lu’ expects argument of type ‘long unsigned int’, but argument 5 has type ‘size_t’ {aka ‘unsigned int’} [-Wformat=]
> 57 | trace("packet offset plus length %lu from size %lu, "
> | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> 58 | "%s:%i", start - p->buf + len + offset,
> | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> | |
> | size_t {aka unsigned int}
> log.h:25:66: note: in definition of macro ‘debug’
> 25 | #define debug(...) logmsg(true, false, LOG_DEBUG, __VA_ARGS__)
> | ^~~~~~~~~~~
> packet.c:57:17: note: in expansion of macro ‘trace’
> 57 | trace("packet offset plus length %lu from size %lu, "
> | ^~~~~
> packet.c:57:52: note: format string is defined here
> 57 | trace("packet offset plus length %lu from size %lu, "
> | ~~^
> | |
> | long unsigned int
> | %u
> packet.c:57:23: warning: format ‘%lu’ expects argument of type ‘long unsigned int’, but argument 6 has type ‘size_t’ {aka ‘unsigned int’} [-Wformat=]
> 57 | trace("packet offset plus length %lu from size %lu, "
> | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> 58 | "%s:%i", start - p->buf + len + offset,
> 59 | p->buf_size, func, line);
> | ~~~~~~~~~~~
> | |
> | size_t {aka unsigned int}
> log.h:25:66: note: in definition of macro ‘debug’
> 25 | #define debug(...) logmsg(true, false, LOG_DEBUG, __VA_ARGS__)
> | ^~~~~~~~~~~
> packet.c:57:17: note: in expansion of macro ‘trace’
> 57 | trace("packet offset plus length %lu from size %lu, "
> | ^~~~~
> packet.c:57:66: note: format string is defined here
> 57 | trace("packet offset plus length %lu from size %lu, "
> | ~~^
> | |
> | long unsigned int
> | %u
> vhost_user.c: In function ‘qva_to_va’:
> vhost_user.c:139:32: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
> 139 | return (void *)(qemu_addr - r->qva + r->mmap_addr +
> | ^
> vhost_user.c: In function ‘vu_set_mem_table_exec’:
> vhost_user.c:439:32: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
> 439 | munmap((void *)r->mmap_addr, r->size + r->mmap_offset);
> | ^
> vhost_user.c: In function ‘vu_cleanup’:
> vhost_user.c:900:32: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
> 900 | munmap((void *)r->mmap_addr, r->size + r->mmap_offset);
> | ^
> virtio.c: In function ‘vu_gpa_to_va’:
> virtio.c:111:32: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
> 111 | return (void *)(guest_addr - r->gpa + r->mmap_addr +
> | ^
> vu_common.c: In function ‘vu_packet_check_range’:
> vu_common.c:37:27: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
> 37 | char *m = (char *)dev_region->mmap_addr;
> | ^
> --
>
> - Alpine (musl) x86:
>
> --
> In file included from passt.h:185,
> from tcp_vu.c:21:
> /usr/include/netinet/if_ether.h:115:8: error: redefinition of 'struct ethhdr'
> 115 | struct ethhdr {
> | ^~~~~~
> In file included from /usr/include/linux/virtio_net.h:32,
> from tcp_vu.c:17:
> /usr/include/linux/if_ether.h:173:8: note: originally defined here
> 173 | struct ethhdr {
> | ^~~~~~
> In file included from passt.h:185,
> from vu_common.c:14:
> /usr/include/netinet/if_ether.h:115:8: error: redefinition of 'struct ethhdr'
> 115 | struct ethhdr {
> | ^~~~~~
> In file included from /usr/include/linux/virtio_net.h:32,
> from vu_common.c:11:
> /usr/include/linux/if_ether.h:173:8: note: originally defined here
> 173 | struct ethhdr {
> | ^~~~~~
> make: *** [Makefile:87: passt] Error 1
> --
>
> - Debian armhf (same issues as i686):
>
> --
> In file included from util.h:21,
> from packet.c:22:
> packet.c: In function ‘packet_check_range’:
> packet.c:57:23: warning: format ‘%lu’ expects argument of type ‘long unsigned int’, but argument 5 has type ‘size_t’ {aka ‘unsigned int’} [-Wformat=]
> 57 | trace("packet offset plus length %lu from size %lu, "
> | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> 58 | "%s:%i", start - p->buf + len + offset,
> | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> | |
> | size_t {aka unsigned int}
> log.h:25:66: note: in definition of macro ‘debug’
> 25 | #define debug(...) logmsg(true, false, LOG_DEBUG, __VA_ARGS__)
> | ^~~~~~~~~~~
> packet.c:57:17: note: in expansion of macro ‘trace’
> 57 | trace("packet offset plus length %lu from size %lu, "
> | ^~~~~
> packet.c:57:52: note: format string is defined here
> 57 | trace("packet offset plus length %lu from size %lu, "
> | ~~^
> | |
> | long unsigned int
> | %u
> packet.c:57:23: warning: format ‘%lu’ expects argument of type ‘long unsigned int’, but argument 6 has type ‘size_t’ {aka ‘unsigned int’} [-Wformat=]
> 57 | trace("packet offset plus length %lu from size %lu, "
> | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> 58 | "%s:%i", start - p->buf + len + offset,
> 59 | p->buf_size, func, line);
> | ~~~~~~~~~~~
> | |
> | size_t {aka unsigned int}
> log.h:25:66: note: in definition of macro ‘debug’
> 25 | #define debug(...) logmsg(true, false, LOG_DEBUG, __VA_ARGS__)
> | ^~~~~~~~~~~
> packet.c:57:17: note: in expansion of macro ‘trace’
> 57 | trace("packet offset plus length %lu from size %lu, "
> | ^~~~~
> packet.c:57:66: note: format string is defined here
> 57 | trace("packet offset plus length %lu from size %lu, "
> | ~~^
> | |
> | long unsigned int
> | %u
> vhost_user.c: In function ‘qva_to_va’:
> vhost_user.c:139:32: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
> 139 | return (void *)(qemu_addr - r->qva + r->mmap_addr +
> | ^
> vhost_user.c: In function ‘vu_set_mem_table_exec’:
> vhost_user.c:439:32: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
> 439 | munmap((void *)r->mmap_addr, r->size + r->mmap_offset);
> | ^
> vhost_user.c: In function ‘vu_cleanup’:
> vhost_user.c:900:32: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
> 900 | munmap((void *)r->mmap_addr, r->size + r->mmap_offset);
> | ^
> virtio.c: In function ‘vu_gpa_to_va’:
> virtio.c:111:32: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
> 111 | return (void *)(guest_addr - r->gpa + r->mmap_addr +
> | ^
> vu_common.c: In function ‘vu_packet_check_range’:
> vu_common.c:37:27: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast]
> 37 | char *m = (char *)dev_region->mmap_addr;
> | ^
> --
>
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v14 7/9] vhost-user: add vhost-user
2024-11-22 16:43 ` [PATCH v14 7/9] vhost-user: add vhost-user Laurent Vivier
2024-11-26 5:14 ` Stefano Brivio
@ 2024-11-26 5:24 ` David Gibson
2024-11-28 12:57 ` Laurent Vivier
2024-11-27 4:47 ` Stefano Brivio
2 siblings, 1 reply; 26+ messages in thread
From: David Gibson @ 2024-11-26 5:24 UTC (permalink / raw)
To: Laurent Vivier; +Cc: passt-dev
[-- Attachment #1: Type: text/plain, Size: 65230 bytes --]
On Fri, Nov 22, 2024 at 05:43:34PM +0100, Laurent Vivier wrote:
> add virtio and vhost-user functions to connect with QEMU.
>
> $ ./passt --vhost-user
>
> and
>
> # qemu-system-x86_64 ... -m 4G \
> -object memory-backend-memfd,id=memfd0,share=on,size=4G \
> -numa node,memdev=memfd0 \
> -chardev socket,id=chr0,path=/tmp/passt_1.socket \
> -netdev vhost-user,id=netdev0,chardev=chr0 \
> -device virtio-net,mac=9a:2b:2c:2d:2e:2f,netdev=netdev0 \
> ...
>
> Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
I have several remaining comments below for things that I think could
be improved. However, I don't think any are serious enough to delay
merge - they can be addressed as follow up changes.
> ---
> Makefile | 6 +-
> conf.c | 19 +-
> epoll_type.h | 4 +
> iov.c | 1 -
> isolation.c | 17 +-
> packet.c | 11 ++
> packet.h | 8 +-
> passt.1 | 10 +-
> passt.c | 9 +
> passt.h | 7 +
> pcap.c | 1 -
> tap.c | 77 ++++++--
> tap.h | 5 +-
> tcp.c | 7 +
> tcp_vu.c | 497 +++++++++++++++++++++++++++++++++++++++++++++++++++
> tcp_vu.h | 12 ++
> udp.c | 11 ++
> udp_vu.c | 343 +++++++++++++++++++++++++++++++++++
> udp_vu.h | 13 ++
> vhost_user.c | 41 +++--
> vhost_user.h | 4 +-
> virtio.c | 5 -
> vu_common.c | 282 +++++++++++++++++++++++++++++
> vu_common.h | 60 +++++++
> 24 files changed, 1397 insertions(+), 53 deletions(-)
> create mode 100644 tcp_vu.c
> create mode 100644 tcp_vu.h
> create mode 100644 udp_vu.c
> create mode 100644 udp_vu.h
> create mode 100644 vu_common.c
> create mode 100644 vu_common.h
>
> diff --git a/Makefile b/Makefile
> index bcb084e66e4d..faa5c23346ac 100644
> --- a/Makefile
> +++ b/Makefile
> @@ -37,7 +37,8 @@ FLAGS += -DDUAL_STACK_SOCKETS=$(DUAL_STACK_SOCKETS)
> PASST_SRCS = arch.c arp.c checksum.c conf.c dhcp.c dhcpv6.c flow.c fwd.c \
> icmp.c igmp.c inany.c iov.c ip.c isolation.c lineread.c log.c mld.c \
> ndp.c netlink.c packet.c passt.c pasta.c pcap.c pif.c tap.c tcp.c \
> - tcp_buf.c tcp_splice.c udp.c udp_flow.c util.c vhost_user.c virtio.c
> + tcp_buf.c tcp_splice.c tcp_vu.c udp.c udp_flow.c udp_vu.c util.c \
> + vhost_user.c virtio.c vu_common.c
> QRAP_SRCS = qrap.c
> SRCS = $(PASST_SRCS) $(QRAP_SRCS)
>
> @@ -47,7 +48,8 @@ PASST_HEADERS = arch.h arp.h checksum.h conf.h dhcp.h dhcpv6.h flow.h fwd.h \
> flow_table.h icmp.h icmp_flow.h inany.h iov.h ip.h isolation.h \
> lineread.h log.h ndp.h netlink.h packet.h passt.h pasta.h pcap.h pif.h \
> siphash.h tap.h tcp.h tcp_buf.h tcp_conn.h tcp_internal.h tcp_splice.h \
> - udp.h udp_flow.h util.h vhost_user.h virtio.h
> + tcp_vu.h udp.h udp_flow.h udp_internal.h udp_vu.h util.h vhost_user.h \
> + virtio.h vu_common.h
> HEADERS = $(PASST_HEADERS) seccomp.h
>
> C := \#include <sys/random.h>\nint main(){int a=getrandom(0, 0, 0);}
> diff --git a/conf.c b/conf.c
> index 86566dbf1ee0..d9d63d70ae5a 100644
> --- a/conf.c
> +++ b/conf.c
> @@ -45,6 +45,7 @@
> #include "lineread.h"
> #include "isolation.h"
> #include "log.h"
> +#include "vhost_user.h"
>
> #define NETNS_RUN_DIR "/run/netns"
>
> @@ -769,9 +770,14 @@ static void usage(const char *name, FILE *f, int status)
> " default: same interface name as external one\n");
> } else {
> FPRINTF(f,
> - " -s, --socket PATH UNIX domain socket path\n"
> + " -s, --socket, --socket-path PATH UNIX domain socket path\n"
> " default: probe free path starting from "
> UNIX_SOCK_PATH "\n", 1);
> + FPRINTF(f,
> + " --vhost-user Enable vhost-user mode\n"
> + " UNIX domain socket is provided by -s option\n"
> + " --print-capabilities print back-end capabilities in JSON format,\n"
> + " only meaningful for vhost-user mode\n");
> }
>
> FPRINTF(f,
> @@ -1305,6 +1311,10 @@ void conf(struct ctx *c, int argc, char **argv)
> {"map-guest-addr", required_argument, NULL, 22 },
> {"host-lo-to-ns-lo", no_argument, NULL, 23 },
> {"dns-host", required_argument, NULL, 24 },
> + {"vhost-user", no_argument, NULL, 25 },
> + /* vhost-user backend program convention */
> + {"print-capabilities", no_argument, NULL, 26 },
> + {"socket-path", required_argument, NULL, 's' },
> { 0 },
> };
> const char *logname = (c->mode == MODE_PASTA) ? "pasta" : "passt";
> @@ -1498,6 +1508,13 @@ void conf(struct ctx *c, int argc, char **argv)
> break;
>
> die("Invalid host nameserver address: %s", optarg);
> + case 25:
> + if (c->mode == MODE_PASTA)
> + die("--vhost-user is for passt mode only");
> + c->mode = MODE_VU;
> + break;
> + case 26:
> + vu_print_capabilities();
> break;
> case 'd':
> c->debug = 1;
> diff --git a/epoll_type.h b/epoll_type.h
> index 0ad1efa0ccec..f3ef41584757 100644
> --- a/epoll_type.h
> +++ b/epoll_type.h
> @@ -36,6 +36,10 @@ enum epoll_type {
> EPOLL_TYPE_TAP_PASST,
> /* socket listening for qemu socket connections */
> EPOLL_TYPE_TAP_LISTEN,
> + /* vhost-user command socket */
> + EPOLL_TYPE_VHOST_CMD,
> + /* vhost-user kick event socket */
> + EPOLL_TYPE_VHOST_KICK,
>
> EPOLL_NUM_TYPES,
> };
> diff --git a/iov.c b/iov.c
> index 3f9e229a305f..3741db21790f 100644
> --- a/iov.c
> +++ b/iov.c
> @@ -68,7 +68,6 @@ size_t iov_skip_bytes(const struct iovec *iov, size_t n,
> *
> * Returns: The number of bytes successfully copied.
> */
> -/* cppcheck-suppress unusedFunction */
> size_t iov_from_buf(const struct iovec *iov, size_t iov_cnt,
> size_t offset, const void *buf, size_t bytes)
> {
> diff --git a/isolation.c b/isolation.c
> index 45fba1e68b9d..c944fb35c3a4 100644
> --- a/isolation.c
> +++ b/isolation.c
> @@ -379,12 +379,21 @@ void isolate_postfork(const struct ctx *c)
>
> prctl(PR_SET_DUMPABLE, 0);
>
> - if (c->mode == MODE_PASTA) {
> - prog.len = (unsigned short)ARRAY_SIZE(filter_pasta);
> - prog.filter = filter_pasta;
> - } else {
> + switch (c->mode) {
> + case MODE_PASST:
> prog.len = (unsigned short)ARRAY_SIZE(filter_passt);
> prog.filter = filter_passt;
> + break;
> + case MODE_PASTA:
> + prog.len = (unsigned short)ARRAY_SIZE(filter_pasta);
> + prog.filter = filter_pasta;
> + break;
> + case MODE_VU:
> + prog.len = (unsigned short)ARRAY_SIZE(filter_vu);
> + prog.filter = filter_vu;
> + break;
> + default:
> + ASSERT(0);
> }
>
> if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) ||
> diff --git a/packet.c b/packet.c
> index 37489961a37e..e5a78d079231 100644
> --- a/packet.c
> +++ b/packet.c
> @@ -36,6 +36,17 @@
> static int packet_check_range(const struct pool *p, size_t offset, size_t len,
> const char *start, const char *func, int line)
> {
> + if (p->buf_size == 0) {
> + int ret;
> +
> + ret = vu_packet_check_range((void *)p->buf, offset, len, start);
> +
> + if (ret == -1)
> + trace("cannot find region, %s:%i", func, line);
> +
> + return ret;
> + }
> +
> if (start < p->buf) {
> trace("packet start %p before buffer start %p, "
> "%s:%i", (void *)start, (void *)p->buf, func, line);
> diff --git a/packet.h b/packet.h
> index 8377dcf678bb..3f70e949c066 100644
> --- a/packet.h
> +++ b/packet.h
> @@ -8,8 +8,10 @@
>
> /**
> * struct pool - Generic pool of packets stored in a buffer
> - * @buf: Buffer storing packet descriptors
> - * @buf_size: Total size of buffer
> + * @buf: Buffer storing packet descriptors,
> + * a struct vu_dev_region array for passt vhost-user mode
> + * @buf_size: Total size of buffer,
> + * 0 for passt vhost-user mode
> * @size: Number of usable descriptors for the pool
> * @count: Number of used descriptors for the pool
> * @pkt: Descriptors: see macros below
> @@ -22,6 +24,8 @@ struct pool {
> struct iovec pkt[1];
> };
>
> +int vu_packet_check_range(void *buf, size_t offset, size_t len,
> + const char *start);
> void packet_add_do(struct pool *p, size_t len, const char *start,
> const char *func, int line);
> void *packet_get_do(const struct pool *p, const size_t idx,
> diff --git a/passt.1 b/passt.1
> index f0849787217e..a100e0f1d727 100644
> --- a/passt.1
> +++ b/passt.1
> @@ -397,12 +397,20 @@ interface address are configured on a given host interface.
> .SS \fBpasst\fR-only options
>
> .TP
> -.BR \-s ", " \-\-socket " " \fIpath
> +.BR \-s ", " \-\-socket-path ", " \-\-socket " " \fIpath
> Path for UNIX domain socket used by \fBqemu\fR(1) or \fBqrap\fR(1) to connect to
> \fBpasst\fR.
> Default is to probe a free socket, not accepting connections, starting from
> \fI/tmp/passt_1.socket\fR to \fI/tmp/passt_64.socket\fR.
>
> +.TP
> +.BR \-\-vhost-user
> +Enable vhost-user. The vhost-user command socket is provided by \fB--socket\fR.
> +
> +.TP
> +.BR \-\-print-capabilities
> +Print back-end capabilities in JSON format, only meaningful for vhost-user mode.
> +
> .TP
> .BR \-F ", " \-\-fd " " \fIFD
> Pass a pre-opened, connected socket to \fBpasst\fR. Usually the socket is opened
> diff --git a/passt.c b/passt.c
> index 25f5c1a1a2fd..eb96a449b29e 100644
> --- a/passt.c
> +++ b/passt.c
> @@ -50,6 +50,7 @@
> #include "log.h"
> #include "tcp_splice.h"
> #include "ndp.h"
> +#include "vu_common.h"
>
> #define EPOLL_EVENTS 8
>
> @@ -72,6 +73,8 @@ char *epoll_type_str[] = {
> [EPOLL_TYPE_TAP_PASTA] = "/dev/net/tun device",
> [EPOLL_TYPE_TAP_PASST] = "connected qemu socket",
> [EPOLL_TYPE_TAP_LISTEN] = "listening qemu socket",
> + [EPOLL_TYPE_VHOST_CMD] = "vhost-user command socket",
> + [EPOLL_TYPE_VHOST_KICK] = "vhost-user kick socket",
> };
> static_assert(ARRAY_SIZE(epoll_type_str) == EPOLL_NUM_TYPES,
> "epoll_type_str[] doesn't match enum epoll_type");
> @@ -346,6 +349,12 @@ loop:
> case EPOLL_TYPE_PING:
> icmp_sock_handler(&c, ref);
> break;
> + case EPOLL_TYPE_VHOST_CMD:
> + vu_control_handler(c.vdev, c.fd_tap, eventmask);
> + break;
> + case EPOLL_TYPE_VHOST_KICK:
> + vu_kick_cb(c.vdev, ref, &now);
> + break;
> default:
> /* Can't happen */
> ASSERT(0);
> diff --git a/passt.h b/passt.h
> index 72c7f723a7bb..076f7db43345 100644
> --- a/passt.h
> +++ b/passt.h
> @@ -25,6 +25,7 @@ union epoll_ref;
> #include "fwd.h"
> #include "tcp.h"
> #include "udp.h"
> +#include "vhost_user.h"
>
> /* Default address for our end on the tap interface. Bit 0 of byte 0 must be 0
> * (unicast) and bit 1 of byte 1 must be 1 (locally administered). Otherwise
> @@ -43,6 +44,7 @@ union epoll_ref;
> * @icmp: ICMP-specific reference part
> * @data: Data handled by protocol handlers
> * @nsdir_fd: netns dirfd for fallback timer checking if namespace is gone
> + * @queue: vhost-user queue index for this fd
> * @u64: Opaque reference for epoll_ctl() and epoll_wait()
> */
> union epoll_ref {
> @@ -58,6 +60,7 @@ union epoll_ref {
> union udp_listen_epoll_ref udp;
> uint32_t data;
> int nsdir_fd;
> + int queue;
> };
> };
> uint64_t u64;
> @@ -94,6 +97,7 @@ struct fqdn {
> enum passt_modes {
> MODE_PASST,
> MODE_PASTA,
> + MODE_VU,
> };
>
> /**
> @@ -229,6 +233,7 @@ struct ip6_ctx {
> * @freebind: Allow binding of non-local addresses for forwarding
> * @low_wmem: Low probed net.core.wmem_max
> * @low_rmem: Low probed net.core.rmem_max
> + * @vdev: vhost-user device
> */
> struct ctx {
> enum passt_modes mode;
> @@ -291,6 +296,8 @@ struct ctx {
>
> int low_wmem;
> int low_rmem;
> +
> + struct vu_dev *vdev;
> };
>
> void proto_update_l2_buf(const unsigned char *eth_d,
> diff --git a/pcap.c b/pcap.c
> index 23205ddfed84..3d623cfead77 100644
> --- a/pcap.c
> +++ b/pcap.c
> @@ -143,7 +143,6 @@ void pcap_multiple(const struct iovec *iov, size_t frame_parts, unsigned int n,
> * @iovcnt: Number of buffers (@iov entries)
> * @offset: Offset of the L2 frame within the full data length
> */
> -/* cppcheck-suppress unusedFunction */
> void pcap_iov(const struct iovec *iov, size_t iovcnt, size_t offset)
> {
> struct timespec now = { 0 };
> diff --git a/tap.c b/tap.c
> index 238f248ca45b..386f0bccd2fb 100644
> --- a/tap.c
> +++ b/tap.c
> @@ -58,6 +58,8 @@
> #include "packet.h"
> #include "tap.h"
> #include "log.h"
> +#include "vhost_user.h"
> +#include "vu_common.h"
>
> /* IPv4 (plus ARP) and IPv6 message batches from tap/guest to IP handlers */
> static PACKET_POOL_NOINIT(pool_tap4, TAP_MSGS, pkt_buf);
> @@ -78,16 +80,22 @@ void tap_send_single(const struct ctx *c, const void *data, size_t l2len)
> struct iovec iov[2];
> size_t iovcnt = 0;
>
> - if (c->mode == MODE_PASST) {
> + switch (c->mode) {
> + case MODE_PASST:
> iov[iovcnt] = IOV_OF_LVALUE(vnet_len);
> iovcnt++;
> - }
> -
> - iov[iovcnt].iov_base = (void *)data;
> - iov[iovcnt].iov_len = l2len;
> - iovcnt++;
> + /* fall through */
> + case MODE_PASTA:
> + iov[iovcnt].iov_base = (void *)data;
> + iov[iovcnt].iov_len = l2len;
> + iovcnt++;
>
> - tap_send_frames(c, iov, iovcnt, 1);
> + tap_send_frames(c, iov, iovcnt, 1);
> + break;
> + case MODE_VU:
> + vu_send_single(c, data, l2len);
> + break;
> + }
> }
>
> /**
> @@ -414,10 +422,18 @@ size_t tap_send_frames(const struct ctx *c, const struct iovec *iov,
> if (!nframes)
> return 0;
>
> - if (c->mode == MODE_PASTA)
> + switch (c->mode) {
> + case MODE_PASTA:
> m = tap_send_frames_pasta(c, iov, bufs_per_frame, nframes);
> - else
> + break;
> + case MODE_PASST:
> m = tap_send_frames_passt(c, iov, bufs_per_frame, nframes);
> + break;
> + case MODE_VU:
> + /* fall through */
> + default:
> + ASSERT(0);
> + }
>
> if (m < nframes)
> debug("tap: failed to send %zu frames of %zu",
> @@ -976,7 +992,7 @@ void tap_add_packet(struct ctx *c, ssize_t l2len, char *p)
> * tap_sock_reset() - Handle closing or failure of connect AF_UNIX socket
> * @c: Execution context
> */
> -static void tap_sock_reset(struct ctx *c)
> +void tap_sock_reset(struct ctx *c)
> {
> info("Client connection closed%s", c->one_off ? ", exiting" : "");
>
> @@ -987,6 +1003,8 @@ static void tap_sock_reset(struct ctx *c)
> epoll_ctl(c->epollfd, EPOLL_CTL_DEL, c->fd_tap, NULL);
> close(c->fd_tap);
> c->fd_tap = -1;
> + if (c->mode == MODE_VU)
> + vu_cleanup(c->vdev);
> }
>
> /**
> @@ -1207,6 +1225,11 @@ static void tap_backend_show_hints(struct ctx *c)
> info("or qrap, for earlier qemu versions:");
> info(" ./qrap 5 kvm ... -net socket,fd=5 -net nic,model=virtio");
> break;
> + case MODE_VU:
> + info("You can start qemu with:");
> + info(" kvm ... -chardev socket,id=chr0,path=%s -netdev vhost-user,id=netdev0,chardev=chr0 -device virtio-net,netdev=netdev0 -object memory-backend-memfd,id=memfd0,share=on,size=$RAMSIZE -numa node,memdev=memfd0\n",
> + c->sock_path);
> + break;
> }
> }
>
> @@ -1234,8 +1257,8 @@ static void tap_sock_unix_init(const struct ctx *c)
> */
> void tap_listen_handler(struct ctx *c, uint32_t events)
> {
> - union epoll_ref ref = { .type = EPOLL_TYPE_TAP_PASST };
> struct epoll_event ev = { 0 };
> + union epoll_ref ref = { 0 };
> int v = INT_MAX / 2;
> struct ucred ucred;
> socklen_t len;
> @@ -1275,6 +1298,10 @@ void tap_listen_handler(struct ctx *c, uint32_t events)
> trace("tap: failed to set SO_SNDBUF to %i", v);
>
> ref.fd = c->fd_tap;
> + if (c->mode == MODE_VU)
> + ref.type = EPOLL_TYPE_VHOST_CMD;
> + else
> + ref.type = EPOLL_TYPE_TAP_PASST;
> ev.events = EPOLLIN | EPOLLRDHUP;
> ev.data.u64 = ref.u64;
> epoll_ctl(c->epollfd, EPOLL_CTL_ADD, c->fd_tap, &ev);
> @@ -1341,7 +1368,7 @@ static void tap_sock_tun_init(struct ctx *c)
> * @base: Buffer base
> * @size Buffer size
> */
> -static void tap_sock_update_pool(void *base, size_t size)
> +void tap_sock_update_pool(void *base, size_t size)
> {
> int i;
>
> @@ -1361,7 +1388,10 @@ static void tap_sock_update_pool(void *base, size_t size)
> */
> void tap_backend_init(struct ctx *c)
> {
> - tap_sock_update_pool(pkt_buf, sizeof(pkt_buf));
> + if (c->mode == MODE_VU)
> + tap_sock_update_pool(NULL, 0);
> + else
> + tap_sock_update_pool(pkt_buf, sizeof(pkt_buf));
>
> if (c->fd_tap != -1) { /* Passed as --fd */
> struct epoll_event ev = { 0 };
> @@ -1369,10 +1399,17 @@ void tap_backend_init(struct ctx *c)
>
> ASSERT(c->one_off);
> ref.fd = c->fd_tap;
> - if (c->mode == MODE_PASST)
> + switch (c->mode) {
> + case MODE_PASST:
> ref.type = EPOLL_TYPE_TAP_PASST;
> - else
> + break;
> + case MODE_PASTA:
> ref.type = EPOLL_TYPE_TAP_PASTA;
> + break;
> + case MODE_VU:
> + ref.type = EPOLL_TYPE_VHOST_CMD;
> + break;
> + }
>
> ev.events = EPOLLIN | EPOLLRDHUP;
> ev.data.u64 = ref.u64;
> @@ -1380,9 +1417,14 @@ void tap_backend_init(struct ctx *c)
> return;
> }
>
> - if (c->mode == MODE_PASTA) {
> + switch (c->mode) {
> + case MODE_PASTA:
> tap_sock_tun_init(c);
> - } else {
> + break;
> + case MODE_VU:
> + vu_init(c);
> + /* fall through */
> + case MODE_PASST:
> tap_sock_unix_init(c);
>
> /* In passt mode, we don't know the guest's MAC address until it
> @@ -1390,6 +1432,7 @@ void tap_backend_init(struct ctx *c)
> * first packets will reach it.
> */
> memset(&c->guest_mac, 0xff, sizeof(c->guest_mac));
> + break;
> }
>
> tap_backend_show_hints(c);
> diff --git a/tap.h b/tap.h
> index 8728cc5c09c3..dfbd8b9ebd72 100644
> --- a/tap.h
> +++ b/tap.h
> @@ -40,7 +40,8 @@ static inline struct iovec tap_hdr_iov(const struct ctx *c,
> */
> static inline void tap_hdr_update(struct tap_hdr *thdr, size_t l2len)
> {
> - thdr->vnet_len = htonl(l2len);
> + if (thdr)
> + thdr->vnet_len = htonl(l2len);
> }
>
> void tap_udp4_send(const struct ctx *c, struct in_addr src, in_port_t sport,
> @@ -68,6 +69,8 @@ void tap_handler_pasta(struct ctx *c, uint32_t events,
> void tap_handler_passt(struct ctx *c, uint32_t events,
> const struct timespec *now);
> int tap_sock_unix_open(char *sock_path);
> +void tap_sock_reset(struct ctx *c);
> +void tap_sock_update_pool(void *base, size_t size);
> void tap_backend_init(struct ctx *c);
> void tap_flush_pools(void);
> void tap_handler(struct ctx *c, const struct timespec *now);
> diff --git a/tcp.c b/tcp.c
> index 5d9968847d20..2b547876d58a 100644
> --- a/tcp.c
> +++ b/tcp.c
> @@ -304,6 +304,7 @@
> #include "flow_table.h"
> #include "tcp_internal.h"
> #include "tcp_buf.h"
> +#include "tcp_vu.h"
>
> /* MSS rounding: see SET_MSS() */
> #define MSS_DEFAULT 536
> @@ -1312,6 +1313,9 @@ int tcp_prepare_flags(const struct ctx *c, struct tcp_tap_conn *conn,
> static int tcp_send_flag(const struct ctx *c, struct tcp_tap_conn *conn,
> int flags)
> {
> + if (c->mode == MODE_VU)
> + return tcp_vu_send_flag(c, conn, flags);
> +
> return tcp_buf_send_flag(c, conn, flags);
> }
>
> @@ -1705,6 +1709,9 @@ static int tcp_sock_consume(const struct tcp_tap_conn *conn, uint32_t ack_seq)
> */
> static int tcp_data_from_sock(const struct ctx *c, struct tcp_tap_conn *conn)
> {
> + if (c->mode == MODE_VU)
> + return tcp_vu_data_from_sock(c, conn);
> +
> return tcp_buf_data_from_sock(c, conn);
> }
>
> diff --git a/tcp_vu.c b/tcp_vu.c
> new file mode 100644
> index 000000000000..be5027a1e921
> --- /dev/null
> +++ b/tcp_vu.c
> @@ -0,0 +1,497 @@
> +// SPDX-License-Identifier: GPL-2.0-or-later
> +/* tcp_vu.c - TCP L2 vhost-user management functions
> + *
> + * Copyright Red Hat
> + * Author: Laurent Vivier <lvivier@redhat.com>
> + */
> +
> +#include <errno.h>
> +#include <stddef.h>
> +#include <stdint.h>
> +
> +#include <netinet/ip.h>
> +#include <netinet/tcp.h>
> +
> +#include <sys/socket.h>
> +
> +#include <linux/virtio_net.h>
> +
> +#include "util.h"
> +#include "ip.h"
> +#include "passt.h"
> +#include "siphash.h"
> +#include "inany.h"
> +#include "vhost_user.h"
> +#include "tcp.h"
> +#include "pcap.h"
> +#include "flow.h"
> +#include "tcp_conn.h"
> +#include "flow_table.h"
> +#include "tcp_vu.h"
> +#include "tap.h"
> +#include "tcp_internal.h"
> +#include "checksum.h"
> +#include "vu_common.h"
> +#include <time.h>
> +
> +static struct iovec iov_vu[VIRTQUEUE_MAX_SIZE + 1];
> +static struct vu_virtq_element elem[VIRTQUEUE_MAX_SIZE];
> +static int head[VIRTQUEUE_MAX_SIZE + 1];
> +static int head_cnt;
> +
> +/**
> + * tcp_vu_hdrlen() - return the size of the header in level 2 frame (TCP)
> + * @v6: Set for IPv6 packet
> + *
> + * Return: Return the size of the header
> + */
> +static size_t tcp_vu_hdrlen(bool v6)
> +{
> + size_t hdrlen;
> +
> + hdrlen = sizeof(struct virtio_net_hdr_mrg_rxbuf) +
> + sizeof(struct ethhdr) + sizeof(struct tcphdr);
> +
> + if (v6)
> + hdrlen += sizeof(struct ipv6hdr);
> + else
> + hdrlen += sizeof(struct iphdr);
> +
> + return hdrlen;
> +}
> +
> +/**
> + * tcp_vu_update_check() - Calculate TCP checksum
> + * @tapside: Address information for one side of the flow
> + * @iov: Pointer to the array of IO vectors
> + * @iov_cnt: Length of the array
> + */
> +static void tcp_vu_update_check(const struct flowside *tapside,
> + struct iovec *iov, int iov_cnt)
> +{
> + char *base = iov[0].iov_base;
> +
> + if (inany_v4(&tapside->oaddr)) {
> + const struct iphdr *iph = vu_ip(base);
> +
> + tcp_update_check_tcp4(iph, iov, iov_cnt,
> + (char *)vu_payloadv4(base) - base);
> + } else {
> + const struct ipv6hdr *ip6h = vu_ip(base);
> +
> + tcp_update_check_tcp6(ip6h, iov, iov_cnt,
> + (char *)vu_payloadv6(base) - base);
> + }
> +}
> +
> +/**
> + * tcp_vu_send_flag() - Send segment with flags to vhost-user (no payload)
> + * @c: Execution context
> + * @conn: Connection pointer
> + * @flags: TCP flags: if not set, send segment only if ACK is due
> + *
> + * Return: negative error code on connection reset, 0 otherwise
> + */
> +int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
> +{
> + struct vu_dev *vdev = c->vdev;
> + struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
> + const struct flowside *tapside = TAPFLOW(conn);
> + size_t l2len, l4len, optlen, hdrlen;
> + struct vu_virtq_element flags_elem[2];
> + struct tcp_payload_t *payload;
> + struct ipv6hdr *ip6h = NULL;
> + struct iovec flags_iov[2];
> + struct iphdr *iph = NULL;
> + struct ethhdr *eh;
> + uint32_t seq;
> + int elem_cnt;
> + int nb_ack;
> + int ret;
> +
> + hdrlen = tcp_vu_hdrlen(CONN_V6(conn));
> +
> + vu_set_element(&flags_elem[0], NULL, &flags_iov[0]);
> +
> + elem_cnt = vu_collect(vdev, vq, &flags_elem[0], 1,
> + hdrlen + sizeof(struct tcp_syn_opts), NULL);
> + if (elem_cnt != 1)
> + return -1;
> +
> + ASSERT(flags_elem[0].in_sg[0].iov_len >=
> + hdrlen + sizeof(struct tcp_syn_opts));
> +
> + vu_set_vnethdr(vdev, flags_elem[0].in_sg[0].iov_base, 1);
> +
> + eh = vu_eth(flags_elem[0].in_sg[0].iov_base);
> +
> + memcpy(eh->h_dest, c->guest_mac, sizeof(eh->h_dest));
> + memcpy(eh->h_source, c->our_tap_mac, sizeof(eh->h_source));
> +
> + if (CONN_V4(conn)) {
> + eh->h_proto = htons(ETH_P_IP);
> +
> + iph = vu_ip(flags_elem[0].in_sg[0].iov_base);
> + *iph = (struct iphdr)L2_BUF_IP4_INIT(IPPROTO_TCP);
> +
> + payload = vu_payloadv4(flags_elem[0].in_sg[0].iov_base);
> + } else {
> + eh->h_proto = htons(ETH_P_IPV6);
> +
> + ip6h = vu_ip(flags_elem[0].in_sg[0].iov_base);
> + *ip6h = (struct ipv6hdr)L2_BUF_IP6_INIT(IPPROTO_TCP);
> + payload = vu_payloadv6(flags_elem[0].in_sg[0].iov_base);
> + }
> +
> + memset(&payload->th, 0, sizeof(payload->th));
> + payload->th.doff = offsetof(struct tcp_payload_t, data) / 4;
> + payload->th.ack = 1;
> +
> + seq = conn->seq_to_tap;
> + ret = tcp_prepare_flags(c, conn, flags, &payload->th,
> + (struct tcp_syn_opts *)payload->data,
> + &optlen);
> + if (ret <= 0) {
> + vu_queue_rewind(vq, 1);
> + return ret;
> + }
> +
> + if (CONN_V4(conn)) {
> + l4len = tcp_fill_headers4(conn, NULL, iph, payload, optlen,
> + NULL, seq, true);
> + l2len = sizeof(*iph);
> + } else {
> + l4len = tcp_fill_headers6(conn, NULL, ip6h, payload, optlen,
> + seq, true);
> + l2len = sizeof(*ip6h);
> + }
> + l2len += l4len + sizeof(struct ethhdr);
> +
> + flags_elem[0].in_sg[0].iov_len = l2len +
> + sizeof(struct virtio_net_hdr_mrg_rxbuf);
You could simplify this down to (hdrlen + optlen).
> + if (*c->pcap) {
> + tcp_vu_update_check(tapside, &flags_elem[0].in_sg[0], 1);
> + pcap_iov(&flags_elem[0].in_sg[0], 1,
> + sizeof(struct virtio_net_hdr_mrg_rxbuf));
> + }
> + nb_ack = 1;
> +
> + if (flags & DUP_ACK) {
> + vu_set_element(&flags_elem[1], NULL, &flags_iov[1]);
> +
> + elem_cnt = vu_collect(vdev, vq, &flags_elem[1], 1,
> + flags_elem[0].in_sg[0].iov_len, NULL);
> + if (elem_cnt == 1 &&
> + flags_elem[1].in_sg[0].iov_len >=
> + flags_elem[0].in_sg[0].iov_len) {
> + memcpy(flags_elem[1].in_sg[0].iov_base,
> + flags_elem[0].in_sg[0].iov_base,
> + flags_elem[0].in_sg[0].iov_len);
> + nb_ack++;
> +
> + if (*c->pcap) {
> + pcap_iov(&flags_elem[1].in_sg[0], 1,
> + sizeof(struct virtio_net_hdr_mrg_rxbuf));
> + }
> + }
> + }
> +
> + vu_flush(vdev, vq, flags_elem, nb_ack);
> +
> + return 0;
> +}
> +
> +/** tcp_vu_sock_recv() - Receive datastream from socket into vhost-user buffers
> + * @c: Execution context
> + * @conn: Connection pointer
> + * @v6: Set for IPv6 connections
> + * @already_sent: Number of bytes already sent
> + * @fillsize: Maximum bytes to fill in guest-side receiving window
> + * @iov_cnt: number of iov (output)
> + *
> + * Return: Number of iov entries used to store the data or negative error code
> + */
> +static ssize_t tcp_vu_sock_recv(const struct ctx *c,
> + const struct tcp_tap_conn *conn, bool v6,
> + uint32_t already_sent, size_t fillsize,
> + int *iov_cnt)
> +{
> + struct vu_dev *vdev = c->vdev;
> + struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
> + struct msghdr mh_sock = { 0 };
> + uint16_t mss = MSS_GET(conn);
> + int s = conn->sock;
> + ssize_t ret, len;
> + size_t hdrlen;
> + int elem_cnt;
> + int i;
> +
> + *iov_cnt = 0;
> +
> + hdrlen = tcp_vu_hdrlen(v6);
> +
> + vu_init_elem(elem, &iov_vu[1], VIRTQUEUE_MAX_SIZE);
> +
> + elem_cnt = 0;
> + head_cnt = 0;
> + while (fillsize > 0 && elem_cnt < VIRTQUEUE_MAX_SIZE) {
> + struct iovec *iov;
> + size_t frame_size, dlen;
> + int cnt;
> +
> + cnt = vu_collect(vdev, vq, &elem[elem_cnt],
> + VIRTQUEUE_MAX_SIZE - elem_cnt,
> + MIN(mss, fillsize) + hdrlen, &frame_size);
> + if (cnt == 0)
> + break;
> +
> + dlen = frame_size - hdrlen;
> +
> + /* reserve space for headers in iov */
> + iov = &elem[elem_cnt].in_sg[0];
> + ASSERT(iov->iov_len >= hdrlen);
> + iov->iov_base = (char *)iov->iov_base + hdrlen;
> + iov->iov_len -= hdrlen;
> + head[head_cnt++] = elem_cnt;
> +
> + fillsize -= dlen;
> + elem_cnt += cnt;
> + }
> +
> + if (peek_offset_cap) {
> + mh_sock.msg_iov = iov_vu + 1;
> + mh_sock.msg_iovlen = elem_cnt;
> + } else {
> + iov_vu[0].iov_base = tcp_buf_discard;
> + iov_vu[0].iov_len = already_sent;
> +
> + mh_sock.msg_iov = iov_vu;
> + mh_sock.msg_iovlen = elem_cnt + 1;
> + }
> +
> + do
> + ret = recvmsg(s, &mh_sock, MSG_PEEK);
> + while (ret < 0 && errno == EINTR);
> +
> + if (ret < 0) {
> + vu_queue_rewind(vq, elem_cnt);
> + return -errno;
> + }
> +
> + if (!peek_offset_cap)
> + ret -= already_sent;
> +
> + /* adjust iov number and length of the last iov */
> + len = ret;
> + for (i = 0; len && i < elem_cnt; i++) {
> + struct iovec *iov = &elem[i].in_sg[0];
> +
> + if (iov->iov_len > (size_t)len)
> + iov->iov_len = len;
> +
> + len -= iov->iov_len;
> + }
> + /* adjust head count */
> + while (head_cnt > 0 && head[head_cnt - 1] > i)
> + head_cnt--;
> + /* mark end of array */
> + head[head_cnt] = i;
> + *iov_cnt = i;
> +
> + /* release unused buffers */
> + vu_queue_rewind(vq, elem_cnt - i);
> +
> + /* restore space for headers in iov */
> + for (i = 0; i < head_cnt; i++) {
> + struct iovec *iov = &elem[head[i]].in_sg[0];
> +
> + iov->iov_base = (char *)iov->iov_base - hdrlen;
> + iov->iov_len += hdrlen;
Ah, something just occurred to me: I'd thought the (for now)
requirement that all the headers fit into the first buffer was just
about easily locating those headers to write them. But in fact, the
second reason is that if the headers were split across multiple
buffers then doing this IOV adjustment to exclude/reinclude the
headers becomes much trickier.
This is I think the basic reason behind the trouble I was having with
some of my IOV tail based cleanups, which I thought were just showing
up as technical complications.
> + }
> +
This is still returning different values depending on peek_offset_cap,
which is potentially confusing.
> + return ret;
> +}
> +
> +/**
> + * tcp_vu_prepare() - Prepare the frame header
> + * @c: Execution context
> + * @conn: Connection pointer
> + * @first: Pointer to the array of IO vectors
> + * @dlen: Packet data length
> + * @check: Checksum, if already known
> + */
> +static void tcp_vu_prepare(const struct ctx *c,
> + struct tcp_tap_conn *conn, char *base,
> + size_t dlen, const uint16_t **check)
> +{
> + const struct flowside *toside = TAPFLOW(conn);
> + struct tcp_payload_t *payload;
> + struct ipv6hdr *ip6h = NULL;
> + struct iphdr *iph = NULL;
> + struct ethhdr *eh;
> +
> + /* we guess the first iovec provided by the guest can embed
> + * all the headers needed by L2 frame
> + */
> +
> + eh = vu_eth(base);
> +
> + memcpy(eh->h_dest, c->guest_mac, sizeof(eh->h_dest));
> + memcpy(eh->h_source, c->our_tap_mac, sizeof(eh->h_source));
> +
> + /* initialize header */
> +
> + if (inany_v4(&toside->eaddr) && inany_v4(&toside->oaddr)) {
> + eh->h_proto = htons(ETH_P_IP);
> +
> + iph = vu_ip(base);
> + *iph = (struct iphdr)L2_BUF_IP4_INIT(IPPROTO_TCP);
> + payload = vu_payloadv4(base);
> + } else {
> + eh->h_proto = htons(ETH_P_IPV6);
> +
> + ip6h = vu_ip(base);
> + *ip6h = (struct ipv6hdr)L2_BUF_IP6_INIT(IPPROTO_TCP);
> +
> + payload = vu_payloadv6(base);
> + }
> +
> + memset(&payload->th, 0, sizeof(payload->th));
> + payload->th.doff = offsetof(struct tcp_payload_t, data) / 4;
> + payload->th.ack = 1;
> +
> + if (inany_v4(&toside->eaddr) && inany_v4(&toside->oaddr)) {
> + tcp_fill_headers4(conn, NULL, iph, payload, dlen,
> + *check, conn->seq_to_tap, true);
> + *check = &iph->check;
I think there is a subtle broken case in the handling of the IPv4
header checksums. IIUC, if the guest doesn't support Rx buffer
merging, we could get segments other than the last which are shorter
than the mss due to limited space in the frame. In that case we can't
re-use the checksum, since it depends on the length.
I wonder if we might be better off rather than trying to incrementally
update *check with special handling for the first and last frame we
might be better off doing for each segment:
if (# of segment in batch > 0) AND
(this frame's dlen == previous frame's dlen):
check = &(previous frame)->check
else
check = NULL;
> + } else {
> + tcp_fill_headers6(conn, NULL, ip6h, payload, dlen,
> + conn->seq_to_tap, true);
> + }
> +}
> +
> +/**
> + * tcp_vu_data_from_sock() - Handle new data from socket, queue to vhost-user,
> + * in window
> + * @c: Execution context
> + * @conn: Connection pointer
> + *
> + * Return: Negative on connection reset, 0 otherwise
> + */
> +int tcp_vu_data_from_sock(const struct ctx *c, struct tcp_tap_conn *conn)
> +{
> + uint32_t wnd_scaled = conn->wnd_from_tap << conn->ws_from_tap;
> + struct vu_dev *vdev = c->vdev;
> + struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
> + const struct flowside *tapside = TAPFLOW(conn);
> + size_t fillsize, hdrlen;
> + int v6 = CONN_V6(conn);
> + uint32_t already_sent;
> + const uint16_t *check;
> + int i, iov_cnt;
> + ssize_t len;
> +
> + if (!vu_queue_enabled(vq) || !vu_queue_started(vq)) {
> + debug("Got packet, but RX virtqueue not usable yet");
> + return 0;
> + }
> +
> + already_sent = conn->seq_to_tap - conn->seq_ack_from_tap;
> +
> + if (SEQ_LT(already_sent, 0)) {
> + /* RFC 761, section 2.1. */
> + flow_trace(conn, "ACK sequence gap: ACK for %u, sent: %u",
> + conn->seq_ack_from_tap, conn->seq_to_tap);
> + conn->seq_to_tap = conn->seq_ack_from_tap;
> + already_sent = 0;
> + if (tcp_set_peek_offset(conn->sock, 0)) {
> + tcp_rst(c, conn);
> + return -1;
> + }
> + }
> +
> + if (!wnd_scaled || already_sent >= wnd_scaled) {
> + conn_flag(c, conn, STALLED);
> + conn_flag(c, conn, ACK_FROM_TAP_DUE);
> + return 0;
> + }
> +
> + /* Set up buffer descriptors we'll fill completely and partially. */
> +
> + fillsize = wnd_scaled - already_sent;
> +
> + /* collect the buffers from vhost-user and fill them with the
> + * data from the socket
> + */
> + len = tcp_vu_sock_recv(c, conn, v6, already_sent, fillsize, &iov_cnt);
> + if (len < 0) {
> + if (len != -EAGAIN && len != -EWOULDBLOCK) {
> + tcp_rst(c, conn);
> + return len;
> + }
> + return 0;
> + }
> +
> + if (!len) {
> + if (already_sent) {
> + conn_flag(c, conn, STALLED);
> + } else if ((conn->events & (SOCK_FIN_RCVD | TAP_FIN_SENT)) ==
> + SOCK_FIN_RCVD) {
> + int ret = tcp_vu_send_flag(c, conn, FIN | ACK);
> + if (ret) {
> + tcp_rst(c, conn);
> + return ret;
> + }
> +
> + conn_event(c, conn, TAP_FIN_SENT);
> + }
> +
> + return 0;
> + }
> +
> + conn_flag(c, conn, ~STALLED);
> +
> + /* Likely, some new data was acked too. */
> + tcp_update_seqack_wnd(c, conn, false, NULL);
> +
> + /* initialize headers */
> + /* iov_vu is an array of buffers and the buffer size can be
> + * smaller than the frame size we want to use but with
> + * num_buffer we can merge several virtio iov buffers in one packet
> + * we need only to set the packet headers in the first iov and
> + * num_buffer to the number of iov entries
> + */
> +
> + hdrlen = tcp_vu_hdrlen(v6);
> + for (i = 0, check = NULL; i < head_cnt; i++) {
> + struct iovec *iov = &elem[head[i]].in_sg[0];
> + int buf_cnt = head[i + 1] - head[i];
> + int dlen = iov_size(iov, buf_cnt) - hdrlen;
> +
> + vu_set_vnethdr(vdev, iov->iov_base, buf_cnt);
> +
> + /* we compute IPv4 header checksum only for the
> + * first and the last, all other checksums are the
> + * same as the first one
> + */
> + if (i + 1 == head_cnt)
> + check = NULL;
> +
> + tcp_vu_prepare(c, conn, iov->iov_base, dlen, &check);
> +
> + if (*c->pcap) {
> + tcp_vu_update_check(tapside, iov, buf_cnt);
> + pcap_iov(iov, buf_cnt,
> + sizeof(struct virtio_net_hdr_mrg_rxbuf));
> + }
> +
> + conn->seq_to_tap += dlen;
> + }
> +
> + /* send packets */
> + vu_flush(vdev, vq, elem, iov_cnt);
> +
> + conn_flag(c, conn, ACK_FROM_TAP_DUE);
> +
> + return 0;
> +}
> diff --git a/tcp_vu.h b/tcp_vu.h
> new file mode 100644
> index 000000000000..6ab6057f352a
> --- /dev/null
> +++ b/tcp_vu.h
> @@ -0,0 +1,12 @@
> +// SPDX-License-Identifier: GPL-2.0-or-later
> +/* Copyright Red Hat
> + * Author: Laurent Vivier <lvivier@redhat.com>
> + */
> +
> +#ifndef TCP_VU_H
> +#define TCP_VU_H
> +
> +int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags);
> +int tcp_vu_data_from_sock(const struct ctx *c, struct tcp_tap_conn *conn);
> +
> +#endif /*TCP_VU_H */
> diff --git a/udp.c b/udp.c
> index 9718ed85e796..5b0093a15a30 100644
> --- a/udp.c
> +++ b/udp.c
> @@ -110,6 +110,7 @@
> #include "log.h"
> #include "flow_table.h"
> #include "udp_internal.h"
> +#include "udp_vu.h"
>
> /* "Spliced" sockets indexed by bound port (host order) */
> static int udp_splice_ns [IP_VERSIONS][NUM_PORTS];
> @@ -628,6 +629,11 @@ void udp_listen_sock_handler(const struct ctx *c,
> union epoll_ref ref, uint32_t events,
> const struct timespec *now)
> {
> + if (c->mode == MODE_VU) {
> + udp_vu_listen_sock_handler(c, ref, events, now);
> + return;
> + }
> +
> udp_buf_listen_sock_handler(c, ref, events, now);
> }
>
> @@ -698,6 +704,11 @@ static void udp_buf_reply_sock_handler(const struct ctx *c, union epoll_ref ref,
> void udp_reply_sock_handler(const struct ctx *c, union epoll_ref ref,
> uint32_t events, const struct timespec *now)
> {
> + if (c->mode == MODE_VU) {
> + udp_vu_reply_sock_handler(c, ref, events, now);
> + return;
> + }
> +
> udp_buf_reply_sock_handler(c, ref, events, now);
> }
>
> diff --git a/udp_vu.c b/udp_vu.c
> new file mode 100644
> index 000000000000..c911022546c1
> --- /dev/null
> +++ b/udp_vu.c
> @@ -0,0 +1,343 @@
> +// SPDX-License-Identifier: GPL-2.0-or-later
> +/* udp_vu.c - UDP L2 vhost-user management functions
> + *
> + * Copyright Red Hat
> + * Author: Laurent Vivier <lvivier@redhat.com>
> + */
> +
> +#include <unistd.h>
> +#include <assert.h>
> +#include <net/ethernet.h>
> +#include <net/if.h>
> +#include <netinet/in.h>
> +#include <netinet/ip.h>
> +#include <netinet/udp.h>
> +#include <stdint.h>
> +#include <stddef.h>
> +#include <sys/uio.h>
> +#include <linux/virtio_net.h>
> +
> +#include "checksum.h"
> +#include "util.h"
> +#include "ip.h"
> +#include "siphash.h"
> +#include "inany.h"
> +#include "passt.h"
> +#include "pcap.h"
> +#include "log.h"
> +#include "vhost_user.h"
> +#include "udp_internal.h"
> +#include "flow.h"
> +#include "flow_table.h"
> +#include "udp_flow.h"
> +#include "udp_vu.h"
> +#include "vu_common.h"
> +
> +static struct iovec iov_vu [VIRTQUEUE_MAX_SIZE];
> +static struct vu_virtq_element elem [VIRTQUEUE_MAX_SIZE];
> +
> +/**
> + * udp_vu_hdrlen() - return the size of the header in level 2 frame (UDP)
> + * @v6: Set for IPv6 packet
> + *
> + * Return: Return the size of the header
> + */
> +static size_t udp_vu_hdrlen(bool v6)
> +{
> + size_t hdrlen;
> +
> + hdrlen = sizeof(struct virtio_net_hdr_mrg_rxbuf) +
> + sizeof(struct ethhdr) + sizeof(struct udphdr);
> +
> + if (v6)
> + hdrlen += sizeof(struct ipv6hdr);
> + else
> + hdrlen += sizeof(struct iphdr);
> +
> + return hdrlen;
> +}
> +
> +/**
> + * udp_vu_sock_info() - get socket information
> + * @s: Socket to get information from
> + * @s_in: Socket address (output)
> + *
> + * Return: 0 if socket address can be read, -1 otherwise
> + */
> +static int udp_vu_sock_info(int s, union sockaddr_inany *s_in)
> +{
> + struct msghdr msg = {
> + .msg_name = s_in,
> + .msg_namelen = sizeof(union sockaddr_inany),
> + };
> +
> + return recvmsg(s, &msg, MSG_PEEK | MSG_DONTWAIT);
> +}
> +
> +/**
> + * udp_vu_sock_recv() - Receive datagrams from socket into vhost-user buffers
> + * @c: Execution context
> + * @s: Socket to receive from
> + * @events: epoll events bitmap
> + * @v6: Set for IPv6 connections
> + * @dlen: Size of received data (output)
> + *
> + * Return: Number of iov entries used to store the datagram
> + */
> +static int udp_vu_sock_recv(const struct ctx *c, int s, uint32_t events,
> + bool v6, ssize_t *dlen)
> +{
> + struct vu_dev *vdev = c->vdev;
> + struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
> + int iov_cnt, idx, iov_used;
> + struct msghdr msg = { 0 };
> + size_t off, hdrlen;
> +
> + ASSERT(!c->no_udp);
> +
> + if (!(events & EPOLLIN))
> + return 0;
> +
> + /* compute L2 header length */
> + hdrlen = udp_vu_hdrlen(v6);
> +
> + vu_init_elem(elem, iov_vu, VIRTQUEUE_MAX_SIZE);
> +
> + iov_cnt = vu_collect(vdev, vq, elem, VIRTQUEUE_MAX_SIZE,
> + IP_MAX_MTU - sizeof(struct udphdr) + hdrlen,
I don't think this calculation is quite right, though it's probably
safe. At least for IPv4, IP_MAX_MTU includes the IP header itself,
but then you count that again in hdrlen.
> + NULL);
> + if (iov_cnt == 0)
> + return 0;
> +
> + /* reserve space for the headers */
> + ASSERT(iov_vu[0].iov_len >= hdrlen);
> + iov_vu[0].iov_base = (char *)iov_vu[0].iov_base + hdrlen;
> + iov_vu[0].iov_len -= hdrlen;
> +
> + /* read data from the socket */
> + msg.msg_iov = iov_vu;
> + msg.msg_iovlen = iov_cnt;
> +
> + *dlen = recvmsg(s, &msg, 0);
> + if (*dlen < 0) {
> + vu_queue_rewind(vq, iov_cnt);
> + return 0;
> + }
> +
> + /* restore the pointer to the headers address */
> + iov_vu[0].iov_base = (char *)iov_vu[0].iov_base - hdrlen;
> + iov_vu[0].iov_len += hdrlen;
> +
> + /* count the numbers of buffer filled by recvmsg() */
> + idx = iov_skip_bytes(iov_vu, iov_cnt, *dlen + hdrlen, &off);
> +
> + /* adjust last iov length */
> + if (idx < iov_cnt)
> + iov_vu[idx].iov_len = off;
> + iov_used = idx + !!off;
> +
> + vu_set_vnethdr(vdev, iov_vu[0].iov_base, iov_used);
> +
> + /* release unused buffers */
> + vu_queue_rewind(vq, iov_cnt - iov_used);
> +
> + return iov_used;
> +}
> +
> +/**
> + * udp_vu_prepare() - Prepare the packet header
> + * @c: Execution context
> + * @toside: Address information for one side of the flow
> + * @dlen: Packet data length
> + *
> + * Return: Layer-4 length
> + */
> +static size_t udp_vu_prepare(const struct ctx *c,
> + const struct flowside *toside, ssize_t dlen)
> +{
> + struct ethhdr *eh;
> + size_t l4len;
> +
> + /* ethernet header */
> + eh = vu_eth(iov_vu[0].iov_base);
> +
> + memcpy(eh->h_dest, c->guest_mac, sizeof(eh->h_dest));
> + memcpy(eh->h_source, c->our_tap_mac, sizeof(eh->h_source));
> +
> + /* initialize header */
> + if (inany_v4(&toside->eaddr) && inany_v4(&toside->oaddr)) {
> + struct iphdr *iph = vu_ip(iov_vu[0].iov_base);
> + struct udp_payload_t *bp = vu_payloadv4(iov_vu[0].iov_base);
> +
> + eh->h_proto = htons(ETH_P_IP);
> +
> + *iph = (struct iphdr)L2_BUF_IP4_INIT(IPPROTO_UDP);
> +
> + l4len = udp_update_hdr4(iph, bp, toside, dlen, true);
> + } else {
> + struct ipv6hdr *ip6h = vu_ip(iov_vu[0].iov_base);
> + struct udp_payload_t *bp = vu_payloadv6(iov_vu[0].iov_base);
> +
> + eh->h_proto = htons(ETH_P_IPV6);
> +
> + *ip6h = (struct ipv6hdr)L2_BUF_IP6_INIT(IPPROTO_UDP);
> +
> + l4len = udp_update_hdr6(ip6h, bp, toside, dlen, true);
> + }
> +
> + return l4len;
> +}
> +
> +/**
> + * udp_vu_csum() - Calculate and set checksum for a UDP packet
> + * @toside: Address information for one side of the flow
> + * @iov_used: Number of used iov_vu items
> + */
> +static void udp_vu_csum(const struct flowside *toside, int iov_used)
> +{
> + const struct in_addr *src4 = inany_v4(&toside->oaddr);
> + const struct in_addr *dst4 = inany_v4(&toside->eaddr);
> + char *base = iov_vu[0].iov_base;
> + struct udp_payload_t *bp;
> +
> + if (src4 && dst4) {
> + bp = vu_payloadv4(base);
> + csum_udp4(&bp->uh, *src4, *dst4, iov_vu, iov_used,
> + (char *)&bp->data - base);
> + } else {
> + bp = vu_payloadv6(base);
> + csum_udp6(&bp->uh, &toside->oaddr.a6, &toside->eaddr.a6,
> + iov_vu, iov_used, (char *)&bp->data - base);
> + }
> +}
> +
> +/**
> + * udp_vu_listen_sock_handler() - Handle new data from socket
> + * @c: Execution context
> + * @ref: epoll reference
> + * @events: epoll events bitmap
> + * @now: Current timestamp
> + */
> +void udp_vu_listen_sock_handler(const struct ctx *c, union epoll_ref ref,
> + uint32_t events, const struct timespec *now)
> +{
> + struct vu_dev *vdev = c->vdev;
> + struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
> + int i;
> +
> + if (udp_sock_errs(c, ref.fd, events) < 0) {
> + err("UDP: Unrecoverable error on listening socket:"
> + " (%s port %hu)", pif_name(ref.udp.pif), ref.udp.port);
> + return;
> + }
> +
> + for (i = 0; i < UDP_MAX_FRAMES; i++) {
> + const struct flowside *toside;
> + union sockaddr_inany s_in;
> + flow_sidx_t sidx;
> + uint8_t pif;
> + ssize_t dlen;
> + int iov_used;
> + bool v6;
> +
> + if (udp_vu_sock_info(ref.fd, &s_in) < 0)
> + break;
> +
> + sidx = udp_flow_from_sock(c, ref, &s_in, now);
> + pif = pif_at_sidx(sidx);
> +
> + if (pif != PIF_TAP) {
> + if (flow_sidx_valid(sidx)) {
> + flow_sidx_t fromsidx = flow_sidx_opposite(sidx);
> + struct udp_flow *uflow = udp_at_sidx(sidx);
> +
> + flow_err(uflow,
> + "No support for forwarding UDP from %s to %s",
> + pif_name(pif_at_sidx(fromsidx)),
> + pif_name(pif));
> + } else {
> + debug("Discarding 1 datagram without flow");
> + }
> +
> + continue;
> + }
> +
> + toside = flowside_at_sidx(sidx);
> +
> + v6 = !(inany_v4(&toside->eaddr) && inany_v4(&toside->oaddr));
> +
> + iov_used = udp_vu_sock_recv(c, ref.fd, events, v6, &dlen);
> + if (iov_used <= 0)
> + break;
> +
> + udp_vu_prepare(c, toside, dlen);
> + if (*c->pcap) {
> + udp_vu_csum(toside, iov_used);
> + pcap_iov(iov_vu, iov_used,
> + sizeof(struct virtio_net_hdr_mrg_rxbuf));
> + }
> + vu_flush(vdev, vq, elem, iov_used);
> + }
> +}
> +
> +/**
> + * udp_vu_reply_sock_handler() - Handle new data from flow specific socket
> + * @c: Execution context
> + * @ref: epoll reference
> + * @events: epoll events bitmap
> + * @now: Current timestamp
> + */
> +void udp_vu_reply_sock_handler(const struct ctx *c, union epoll_ref ref,
> + uint32_t events, const struct timespec *now)
> +{
> + flow_sidx_t tosidx = flow_sidx_opposite(ref.flowside);
> + const struct flowside *toside = flowside_at_sidx(tosidx);
> + struct udp_flow *uflow = udp_at_sidx(ref.flowside);
> + int from_s = uflow->s[ref.flowside.sidei];
> + struct vu_dev *vdev = c->vdev;
> + struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
> + int i;
> +
> + ASSERT(!c->no_udp);
> +
> + if (udp_sock_errs(c, from_s, events) < 0) {
> + flow_err(uflow, "Unrecoverable error on reply socket");
> + flow_err_details(uflow);
> + udp_flow_close(c, uflow);
> + return;
> + }
> +
> + for (i = 0; i < UDP_MAX_FRAMES; i++) {
> + uint8_t topif = pif_at_sidx(tosidx);
> + ssize_t dlen;
> + int iov_used;
> + bool v6;
> +
> + ASSERT(uflow);
> +
> + if (topif != PIF_TAP) {
> + uint8_t frompif = pif_at_sidx(ref.flowside);
> +
> + flow_err(uflow,
> + "No support for forwarding UDP from %s to %s",
> + pif_name(frompif), pif_name(topif));
> + continue;
> + }
> +
> + v6 = !(inany_v4(&toside->eaddr) && inany_v4(&toside->oaddr));
> +
> + iov_used = udp_vu_sock_recv(c, from_s, events, v6, &dlen);
> + if (iov_used <= 0)
> + break;
> + flow_trace(uflow, "Received 1 datagram on reply socket");
> + uflow->ts = now->tv_sec;
> +
> + udp_vu_prepare(c, toside, dlen);
> + if (*c->pcap) {
> + udp_vu_csum(toside, iov_used);
> + pcap_iov(iov_vu, iov_used,
> + sizeof(struct virtio_net_hdr_mrg_rxbuf));
> + }
> + vu_flush(vdev, vq, elem, iov_used);
> + }
> +}
> diff --git a/udp_vu.h b/udp_vu.h
> new file mode 100644
> index 000000000000..ba7018d3bf01
> --- /dev/null
> +++ b/udp_vu.h
> @@ -0,0 +1,13 @@
> +// SPDX-License-Identifier: GPL-2.0-or-later
> +/* Copyright Red Hat
> + * Author: Laurent Vivier <lvivier@redhat.com>
> + */
> +
> +#ifndef UDP_VU_H
> +#define UDP_VU_H
> +
> +void udp_vu_listen_sock_handler(const struct ctx *c, union epoll_ref ref,
> + uint32_t events, const struct timespec *now);
> +void udp_vu_reply_sock_handler(const struct ctx *c, union epoll_ref ref,
> + uint32_t events, const struct timespec *now);
> +#endif /* UDP_VU_H */
> diff --git a/vhost_user.c b/vhost_user.c
> index 89627a227ff1..51c90db10e7b 100644
> --- a/vhost_user.c
> +++ b/vhost_user.c
> @@ -48,12 +48,13 @@
> /* vhost-user version we are compatible with */
> #define VHOST_USER_VERSION 1
>
> +static struct vu_dev vdev_storage;
> +
> /**
> * vu_print_capabilities() - print vhost-user capabilities
> * this is part of the vhost-user backend
> * convention.
> */
> -/* cppcheck-suppress unusedFunction */
> void vu_print_capabilities(void)
> {
> info("{");
> @@ -163,9 +164,7 @@ static void vmsg_close_fds(const struct vhost_user_msg *vmsg)
> */
> static void vu_remove_watch(const struct vu_dev *vdev, int fd)
> {
> - /* Placeholder to add passt related code */
> - (void)vdev;
> - (void)fd;
> + epoll_ctl(vdev->context->epollfd, EPOLL_CTL_DEL, fd, NULL);
> }
>
> /**
> @@ -487,6 +486,14 @@ static bool vu_set_mem_table_exec(struct vu_dev *vdev,
> }
> }
>
> + /* As vu_packet_check_range() has no access to the number of
> + * memory regions, mark the end of the array with mmap_addr = 0
> + */
> + ASSERT(vdev->nregions < VHOST_USER_MAX_RAM_SLOTS - 1);
> + vdev->regions[vdev->nregions].mmap_addr = 0;
> +
> + tap_sock_update_pool(vdev->regions, 0);
> +
> return false;
> }
>
> @@ -615,9 +622,16 @@ static bool vu_get_vring_base_exec(struct vu_dev *vdev,
> */
> static void vu_set_watch(const struct vu_dev *vdev, int idx)
> {
> - /* Placeholder to add passt related code */
> - (void)vdev;
> - (void)idx;
> + union epoll_ref ref = {
> + .type = EPOLL_TYPE_VHOST_KICK,
> + .fd = vdev->vq[idx].kick_fd,
> + .queue = idx
> + };
> + struct epoll_event ev = { 0 };
> +
> + ev.data.u64 = ref.u64;
> + ev.events = EPOLLIN;
> + epoll_ctl(vdev->context->epollfd, EPOLL_CTL_ADD, ref.fd, &ev);
> }
>
> /**
> @@ -829,14 +843,14 @@ static bool vu_set_vring_enable_exec(struct vu_dev *vdev,
> * @c: execution context
> * @vdev: vhost-user device
> */
> -/* cppcheck-suppress unusedFunction */
> -void vu_init(struct ctx *c, struct vu_dev *vdev)
> +void vu_init(struct ctx *c)
> {
> int i;
>
> - vdev->context = c;
> + c->vdev = &vdev_storage;
> + c->vdev->context = c;
> for (i = 0; i < VHOST_USER_MAX_QUEUES; i++) {
> - vdev->vq[i] = (struct vu_virtq){
> + c->vdev->vq[i] = (struct vu_virtq){
> .call_fd = -1,
> .kick_fd = -1,
> .err_fd = -1,
> @@ -849,7 +863,6 @@ void vu_init(struct ctx *c, struct vu_dev *vdev)
> * vu_cleanup() - Reset vhost-user device
> * @vdev: vhost-user device
> */
> -/* cppcheck-suppress unusedFunction */
> void vu_cleanup(struct vu_dev *vdev)
> {
> unsigned int i;
> @@ -896,8 +909,7 @@ void vu_cleanup(struct vu_dev *vdev)
> */
> static void vu_sock_reset(struct vu_dev *vdev)
> {
> - /* Placeholder to add passt related code */
> - (void)vdev;
> + tap_sock_reset(vdev->context);
> }
>
> static bool (*vu_handle[VHOST_USER_MAX])(struct vu_dev *vdev,
> @@ -925,7 +937,6 @@ static bool (*vu_handle[VHOST_USER_MAX])(struct vu_dev *vdev,
> * @fd: vhost-user message socket
> * @events: epoll events
> */
> -/* cppcheck-suppress unusedFunction */
> void vu_control_handler(struct vu_dev *vdev, int fd, uint32_t events)
> {
> struct vhost_user_msg msg = { 0 };
> diff --git a/vhost_user.h b/vhost_user.h
> index 5af349ba58b8..464ba21e962f 100644
> --- a/vhost_user.h
> +++ b/vhost_user.h
> @@ -183,7 +183,6 @@ struct vhost_user_msg {
> *
> * Return: true if the virqueue is enabled, false otherwise
> */
> -/* cppcheck-suppress unusedFunction */
> static inline bool vu_queue_enabled(const struct vu_virtq *vq)
> {
> return vq->enable;
> @@ -195,14 +194,13 @@ static inline bool vu_queue_enabled(const struct vu_virtq *vq)
> *
> * Return: true if the virqueue is started, false otherwise
> */
> -/* cppcheck-suppress unusedFunction */
> static inline bool vu_queue_started(const struct vu_virtq *vq)
> {
> return vq->started;
> }
>
> void vu_print_capabilities(void);
> -void vu_init(struct ctx *c, struct vu_dev *vdev);
> +void vu_init(struct ctx *c);
> void vu_cleanup(struct vu_dev *vdev);
> void vu_control_handler(struct vu_dev *vdev, int fd, uint32_t events);
> #endif /* VHOST_USER_H */
> diff --git a/virtio.c b/virtio.c
> index b23a68c4917f..6a97435e2965 100644
> --- a/virtio.c
> +++ b/virtio.c
> @@ -325,7 +325,6 @@ static bool vring_can_notify(const struct vu_dev *dev, struct vu_virtq *vq)
> * @dev: Vhost-user device
> * @vq: Virtqueue
> */
> -/* cppcheck-suppress unusedFunction */
> void vu_queue_notify(const struct vu_dev *dev, struct vu_virtq *vq)
> {
> if (!vring_can_notify(dev, vq)) {
> @@ -498,7 +497,6 @@ static int vu_queue_map_desc(struct vu_dev *dev, struct vu_virtq *vq, unsigned i
> *
> * Return: -1 if there is an error, 0 otherwise
> */
> -/* cppcheck-suppress unusedFunction */
> int vu_queue_pop(struct vu_dev *dev, struct vu_virtq *vq, struct vu_virtq_element *elem)
> {
> unsigned int head;
> @@ -556,7 +554,6 @@ void vu_queue_unpop(struct vu_virtq *vq)
> * @vq: Virtqueue
> * @num: Number of element to unpop
> */
> -/* cppcheck-suppress unusedFunction */
> bool vu_queue_rewind(struct vu_virtq *vq, unsigned int num)
> {
> if (num > vq->inuse)
> @@ -609,7 +606,6 @@ void vu_queue_fill_by_index(struct vu_virtq *vq, unsigned int index,
> * @len: Size of the element
> * @idx: Used ring entry index
> */
> -/* cppcheck-suppress unusedFunction */
> void vu_queue_fill(struct vu_virtq *vq, const struct vu_virtq_element *elem,
> unsigned int len, unsigned int idx)
> {
> @@ -633,7 +629,6 @@ static inline void vring_used_idx_set(struct vu_virtq *vq, uint16_t val)
> * @vq: Virtqueue
> * @count: Number of entry to flush
> */
> -/* cppcheck-suppress unusedFunction */
> void vu_queue_flush(struct vu_virtq *vq, unsigned int count)
> {
> uint16_t old, new;
> diff --git a/vu_common.c b/vu_common.c
> new file mode 100644
> index 000000000000..2a18e9794b5c
> --- /dev/null
> +++ b/vu_common.c
> @@ -0,0 +1,282 @@
> +// SPDX-License-Identifier: GPL-2.0-or-later
> +/* Copyright Red Hat
> + * Author: Laurent Vivier <lvivier@redhat.com>
> + *
> + * common_vu.c - vhost-user common UDP and TCP functions
> + */
> +
> +#include <unistd.h>
> +#include <sys/uio.h>
> +#include <sys/eventfd.h>
> +#include <linux/virtio_net.h>
> +
> +#include "util.h"
> +#include "passt.h"
> +#include "tap.h"
> +#include "vhost_user.h"
> +#include "pcap.h"
> +#include "vu_common.h"
> +
> +/**
> + * vu_packet_check_range() - Check if a given memory zone is contained in
> + * a mapped guest memory region
> + * @buf: Array of the available memory regions
> + * @offset: Offset of data range in packet descriptor
> + * @size: Length of desired data range
> + * @start: Start of the packet descriptor
> + *
> + * Return: 0 if the zone is in a mapped memory region, -1 otherwise
> + */
> +int vu_packet_check_range(void *buf, size_t offset, size_t len,
> + const char *start)
> +{
> + struct vu_dev_region *dev_region;
> +
> + for (dev_region = buf; dev_region->mmap_addr; dev_region++) {
> + /* NOLINTNEXTLINE(performance-no-int-to-ptr) */
> + char *m = (char *)dev_region->mmap_addr;
> +
> + if (m <= start &&
> + start + offset + len <= m + dev_region->mmap_offset +
> + dev_region->size)
> + return 0;
> + }
> +
> + return -1;
> +}
> +
> +/**
> + * vu_init_elem() - initialize an array of virtqueue elements with 1 iov in each
> + * @elem: Array of virtqueue elements to initialize
> + * @iov: Array of iovec to assign to virtqueue element
> + * @elem_cnt: Number of virtqueue element
> + */
> +void vu_init_elem(struct vu_virtq_element *elem, struct iovec *iov, int elem_cnt)
> +{
> + int i;
> +
> + for (i = 0; i < elem_cnt; i++)
> + vu_set_element(&elem[i], NULL, &iov[i]);
> +}
> +
> +/**
> + * vu_collect() - collect virtio buffers from a given virtqueue
> + * @vdev: vhost-user device
> + * @vq: virtqueue to collect from
> + * @elem: Array of virtqueue element
> + * each element must be initialized with one iovec entry
> + * in the in_sg array.
> + * @max_elem: Number of virtqueue elements in the array
> + * @size: Maximum size of the data in the frame
> + * @frame_size: The total size of the buffers (output)
> + *
> + * Return: number of elements used to contain the frame
> + */
> +int vu_collect(struct vu_dev *vdev, struct vu_virtq *vq,
> + struct vu_virtq_element *elem, int max_elem,
> + size_t size, size_t *frame_size)
> +{
> + size_t current_size = 0;
> + int elem_cnt = 0;
> +
> + while (current_size < size && elem_cnt < max_elem) {
> + struct iovec *iov;
> + int ret;
> +
> + ret = vu_queue_pop(vdev, vq, &elem[elem_cnt]);
> + if (ret < 0)
> + break;
> +
> + if (elem[elem_cnt].in_num < 1) {
> + warn("virtio-net receive queue contains no in buffers");
> + vu_queue_detach_element(vq);
> + break;
> + }
> +
> + iov = &elem[elem_cnt].in_sg[0];
> +
> + if (iov->iov_len > size - current_size)
> + iov->iov_len = size - current_size;
> +
> + current_size += iov->iov_len;
> + elem_cnt++;
> +
> + if (!vu_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
> + break;
> + }
> +
> + if (frame_size)
> + *frame_size = current_size;
> +
> + return elem_cnt;
> +}
> +
> +/**
> + * vu_set_vnethdr() - set virtio-net headers
> + * @vdev: vhost-user device
> + * @vnethdr: Address of the header to set
> + * @num_buffers: Number of guest buffers of the frame
> + */
> +void vu_set_vnethdr(const struct vu_dev *vdev,
> + struct virtio_net_hdr_mrg_rxbuf *vnethdr,
> + int num_buffers)
> +{
> + vnethdr->hdr = VU_HEADER;
> + if (vu_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
> + vnethdr->num_buffers = htole16(num_buffers);
> +}
> +
> +/**
> + * vu_flush() - flush all the collected buffers to the vhost-user interface
> + * @vdev: vhost-user device
> + * @vq: vhost-user virtqueue
> + * @elem: virtqueue elements array to send back to the virtqueue
> + * @elem_cnt: Length of the array
> + */
> +void vu_flush(const struct vu_dev *vdev, struct vu_virtq *vq,
> + struct vu_virtq_element *elem, int elem_cnt)
> +{
> + int i;
> +
> + for (i = 0; i < elem_cnt; i++)
> + vu_queue_fill(vq, &elem[i], elem[i].in_sg[0].iov_len, i);
> +
> + vu_queue_flush(vq, elem_cnt);
> + vu_queue_notify(vdev, vq);
> +}
> +
> +/**
> + * vu_handle_tx() - Receive data from the TX virtqueue
> + * @vdev: vhost-user device
> + * @index: index of the virtqueue
> + * @now: Current timestamp
> + */
> +static void vu_handle_tx(struct vu_dev *vdev, int index,
> + const struct timespec *now)
> +{
> + struct vu_virtq_element elem[VIRTQUEUE_MAX_SIZE];
> + struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
> + struct vu_virtq *vq = &vdev->vq[index];
> + int hdrlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
> + int out_sg_count;
> + int count;
> +
> + ASSERT(VHOST_USER_IS_QUEUE_TX(index));
> +
> + tap_flush_pools();
> +
> + count = 0;
> + out_sg_count = 0;
> + while (count < VIRTQUEUE_MAX_SIZE) {
> + int ret;
> +
> + vu_set_element(&elem[count], &out_sg[out_sg_count], NULL);
> + ret = vu_queue_pop(vdev, vq, &elem[count]);
> + if (ret < 0)
> + break;
> + out_sg_count += elem[count].out_num;
> +
> + if (elem[count].out_num < 1) {
> + warn("virtio-net transmit queue contains no out buffers");
> + break;
> + }
> + ASSERT(elem[count].out_num == 1);
> +
> + tap_add_packet(vdev->context,
> + elem[count].out_sg[0].iov_len - hdrlen,
> + (char *)elem[count].out_sg[0].iov_base + hdrlen);
> + count++;
> + }
> + tap_handler(vdev->context, now);
> +
> + if (count) {
> + int i;
> +
> + for (i = 0; i < count; i++)
> + vu_queue_fill(vq, &elem[i], 0, i);
> + vu_queue_flush(vq, count);
> + vu_queue_notify(vdev, vq);
> + }
> +}
> +
> +/**
> + * vu_kick_cb() - Called on a kick event to start to receive data
> + * @vdev: vhost-user device
> + * @ref: epoll reference information
> + * @now: Current timestamp
> + */
> +void vu_kick_cb(struct vu_dev *vdev, union epoll_ref ref,
> + const struct timespec *now)
> +{
> + eventfd_t kick_data;
> + ssize_t rc;
> +
> + rc = eventfd_read(ref.fd, &kick_data);
> + if (rc == -1)
> + die_perror("vhost-user kick eventfd_read()");
> +
> + debug("vhost-user: got kick_data: %016"PRIx64" idx: %d",
> + kick_data, ref.queue);
> + if (VHOST_USER_IS_QUEUE_TX(ref.queue))
> + vu_handle_tx(vdev, ref.queue, now);
> +}
> +
> +/**
> + * vu_send_single() - Send a buffer to the front-end using the RX virtqueue
> + * @c: execution context
> + * @buf: address of the buffer
> + * @size: size of the buffer
> + *
> + * Return: number of bytes sent, -1 if there is an error
> + */
> +int vu_send_single(const struct ctx *c, const void *buf, size_t size)
> +{
> + struct vu_dev *vdev = c->vdev;
> + struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
> + struct vu_virtq_element elem[VIRTQUEUE_MAX_SIZE];
> + struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
> + size_t total;
> + int elem_cnt;
> + int i;
> +
> + debug("vu_send_single size %zu", size);
> +
> + if (!vu_queue_enabled(vq) || !vu_queue_started(vq)) {
> + debug("Got packet, but RX virtqueue not usable yet");
> + return -1;
> + }
> +
> + vu_init_elem(elem, in_sg, VIRTQUEUE_MAX_SIZE);
> +
> + size += sizeof(struct virtio_net_hdr_mrg_rxbuf);
> + elem_cnt = vu_collect(vdev, vq, elem, VIRTQUEUE_MAX_SIZE, size, &total);
> + if (total < size) {
> + debug("vu_send_single: no space to send the data "
> + "elem_cnt %d size %zd", elem_cnt, total);
> + goto err;
> + }
> +
> + vu_set_vnethdr(vdev, in_sg[0].iov_base, elem_cnt);
> +
> + total -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
> +
> + /* copy data from the buffer to the iovec */
> + iov_from_buf(in_sg, elem_cnt, sizeof(struct virtio_net_hdr_mrg_rxbuf),
> + buf, total);
> +
> + if (*c->pcap) {
> + pcap_iov(in_sg, elem_cnt,
> + sizeof(struct virtio_net_hdr_mrg_rxbuf));
> + }
> +
> + vu_flush(vdev, vq, elem, elem_cnt);
> +
> + debug("vhost-user sent %zu", total);
> +
> + return total;
> +err:
> + for (i = 0; i < elem_cnt; i++)
> + vu_queue_detach_element(vq);
> +
> + return -1;
> +}
> diff --git a/vu_common.h b/vu_common.h
> new file mode 100644
> index 000000000000..901d97216c67
> --- /dev/null
> +++ b/vu_common.h
> @@ -0,0 +1,60 @@
> +/* SPDX-License-Identifier: GPL-2.0-or-later
> + * Copyright Red Hat
> + * Author: Laurent Vivier <lvivier@redhat.com>
> + *
> + * vhost-user common UDP and TCP functions
> + */
> +
> +#ifndef VU_COMMON_H
> +#define VU_COMMON_H
> +#include <linux/virtio_net.h>
> +
> +static inline void *vu_eth(void *base)
> +{
> + return ((char *)base + sizeof(struct virtio_net_hdr_mrg_rxbuf));
> +}
> +
> +static inline void *vu_ip(void *base)
> +{
> + return (struct ethhdr *)vu_eth(base) + 1;
> +}
> +
> +static inline void *vu_payloadv4(void *base)
> +{
> + return (struct iphdr *)vu_ip(base) + 1;
> +}
> +
> +static inline void *vu_payloadv6(void *base)
> +{
> + return (struct ipv6hdr *)vu_ip(base) + 1;
> +}
> +
> +/**
> + * vu_set_element() - Initialize a vu_virtq_element
> + * @elem: Element to initialize
> + * @out_sg: One out iovec entry to set in elem
> + * @in_sg: One in iovec entry to set in elem
> + */
> +static inline void vu_set_element(struct vu_virtq_element *elem,
> + struct iovec *out_sg, struct iovec *in_sg)
> +{
> + elem->out_num = !!out_sg;
> + elem->out_sg = out_sg;
> + elem->in_num = !!in_sg;
> + elem->in_sg = in_sg;
> +}
> +
> +void vu_init_elem(struct vu_virtq_element *elem, struct iovec *iov,
> + int elem_cnt);
> +int vu_collect(struct vu_dev *vdev, struct vu_virtq *vq,
> + struct vu_virtq_element *elem, int max_elem, size_t size,
> + size_t *frame_size);
> +void vu_set_vnethdr(const struct vu_dev *vdev,
> + struct virtio_net_hdr_mrg_rxbuf *vnethdr,
> + int num_buffers);
> +void vu_flush(const struct vu_dev *vdev, struct vu_virtq *vq,
> + struct vu_virtq_element *elem, int elem_cnt);
> +void vu_kick_cb(struct vu_dev *vdev, union epoll_ref ref,
> + const struct timespec *now);
> +int vu_send_single(const struct ctx *c, const void *buf, size_t size);
> +#endif /* VU_COMMON_H */
--
David Gibson (he or they) | I'll have my music baroque, and my code
david AT gibson.dropbear.id.au | minimalist, thank you, not the other way
| around.
http://www.ozlabs.org/~dgibson
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v14 7/9] vhost-user: add vhost-user
2024-11-26 5:24 ` David Gibson
@ 2024-11-28 12:57 ` Laurent Vivier
0 siblings, 0 replies; 26+ messages in thread
From: Laurent Vivier @ 2024-11-28 12:57 UTC (permalink / raw)
To: David Gibson; +Cc: passt-dev
On 26/11/2024 06:24, David Gibson wrote:
>> static int udp_vu_sock_recv(const struct ctx *c, int s, uint32_t events,
>> + bool v6, ssize_t *dlen)
>> +{
>> + struct vu_dev *vdev = c->vdev;
>> + struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
>> + int iov_cnt, idx, iov_used;
>> + struct msghdr msg = { 0 };
>> + size_t off, hdrlen;
>> +
>> + ASSERT(!c->no_udp);
>> +
>> + if (!(events & EPOLLIN))
>> + return 0;
>> +
>> + /* compute L2 header length */
>> + hdrlen = udp_vu_hdrlen(v6);
>> +
>> + vu_init_elem(elem, iov_vu, VIRTQUEUE_MAX_SIZE);
>> +
>> + iov_cnt = vu_collect(vdev, vq, elem, VIRTQUEUE_MAX_SIZE,
>> + IP_MAX_MTU - sizeof(struct udphdr) + hdrlen,
> I don't think this calculation is quite right, though it's probably
> safe. At least for IPv4, IP_MAX_MTU includes the IP header itself,
> but then you count that again in hdrlen.
I think it would be semantically more correct to use "ETH_MAX_MTU +
sizeof(struct virtio_net_hdr_mrg_rxbuf)", but as ETH_MAX_MTU and IP_MAX_MTU are both
defined to USHRT_MAX I'm not sure how to compute the segment size...
Thanks,
Laurent
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v14 7/9] vhost-user: add vhost-user
2024-11-22 16:43 ` [PATCH v14 7/9] vhost-user: add vhost-user Laurent Vivier
2024-11-26 5:14 ` Stefano Brivio
2024-11-26 5:24 ` David Gibson
@ 2024-11-27 4:47 ` Stefano Brivio
2024-11-27 9:09 ` Laurent Vivier
2 siblings, 1 reply; 26+ messages in thread
From: Stefano Brivio @ 2024-11-27 4:47 UTC (permalink / raw)
To: Laurent Vivier; +Cc: passt-dev
On Fri, 22 Nov 2024 17:43:34 +0100
Laurent Vivier <lvivier@redhat.com> wrote:
> +/**
> + * tcp_vu_send_flag() - Send segment with flags to vhost-user (no payload)
> + * @c: Execution context
> + * @conn: Connection pointer
> + * @flags: TCP flags: if not set, send segment only if ACK is due
> + *
> + * Return: negative error code on connection reset, 0 otherwise
> + */
> +int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
> +{
> + struct vu_dev *vdev = c->vdev;
> + struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
> + const struct flowside *tapside = TAPFLOW(conn);
> + size_t l2len, l4len, optlen, hdrlen;
> + struct vu_virtq_element flags_elem[2];
> + struct tcp_payload_t *payload;
> + struct ipv6hdr *ip6h = NULL;
> + struct iovec flags_iov[2];
> + struct iphdr *iph = NULL;
> + struct ethhdr *eh;
> + uint32_t seq;
> + int elem_cnt;
> + int nb_ack;
> + int ret;
> +
> + hdrlen = tcp_vu_hdrlen(CONN_V6(conn));
> +
> + vu_set_element(&flags_elem[0], NULL, &flags_iov[0]);
> +
> + elem_cnt = vu_collect(vdev, vq, &flags_elem[0], 1,
> + hdrlen + sizeof(struct tcp_syn_opts), NULL);
Oops, I made this crash, by starting a number of iperf3 client threads
on the host:
$ iperf3 -c localhost -p 6001 -Z -l 500 -w 256M -t 600 -P20
with matching server in the guest, then terminating QEMU while the test
is running.
Details (I saw it first, then I reproduced it under gdb):
accepted connection from PID 3115463
NDP: received RS, sending RA
DHCP: offer to discover
from 52:54:00:12:34:56
DHCP: ack to request
from 52:54:00:12:34:56
NDP: sending unsolicited RA, next in 212s
Client connection closed
Program received signal SIGSEGV, Segmentation fault.
0x00005555555884f5 in vring_avail_idx (vq=0x555559343f10 <vdev_storage+1296>) at virtio.c:138
138 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx);
(gdb) list
133 *
134 * Return: the available ring index of the given virtqueue
135 */
136 static inline uint16_t vring_avail_idx(struct vu_virtq *vq)
137 {
138 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx);
139
140 return vq->shadow_avail_idx;
141 }
142
(gdb) bt
#0 0x00005555555884f5 in vring_avail_idx (vq=0x555559343f10 <vdev_storage+1296>) at virtio.c:138
#1 vu_queue_empty (vq=vq@entry=0x555559343f10 <vdev_storage+1296>) at virtio.c:290
#2 vu_queue_pop (dev=dev@entry=0x555559343a00 <vdev_storage>, vq=vq@entry=0x555559343f10 <vdev_storage+1296>, elem=elem@entry=0x7ffffff6f510) at virtio.c:505
#3 0x0000555555588c8c in vu_collect (vdev=vdev@entry=0x555559343a00 <vdev_storage>, vq=vq@entry=0x555559343f10 <vdev_storage+1296>, elem=elem@entry=0x7ffffff6f510, max_elem=max_elem@entry=1,
size=size@entry=74, frame_size=frame_size@entry=0x0) at vu_common.c:86
#4 0x000055555557e00e in tcp_vu_send_flag (c=0x7ffffff6f7a0, conn=0x5555555bd2d0 <flowtab+2160>, flags=4) at tcp_vu.c:116
#5 0x0000555555578125 in tcp_send_flag (flags=4, conn=0x5555555bd2d0 <flowtab+2160>, c=0x7ffffff6f7a0) at tcp.c:1278
#6 tcp_rst_do (conn=<optimized out>, c=<optimized out>) at tcp.c:1293
#7 tcp_timer_handler (c=c@entry=0x7ffffff6f7a0, ref=..., ref@entry=...) at tcp.c:2266
#8 0x0000555555558f26 in main (argc=<optimized out>, argv=<optimized out>) at passt.c:342
(gdb) p *vq
$1 = {vring = {num = 256, desc = 0x0, avail = 0x0, used = 0x0, log_guest_addr = 4338774592, flags = 0}, last_avail_idx = 35133, shadow_avail_idx = 35133, used_idx = 35133, signalled_used = 0,
signalled_used_valid = false, notification = true, inuse = 0, call_fd = -1, kick_fd = -1, err_fd = -1, enable = 1, started = false, vra = {index = 0, flags = 0, desc_user_addr = 139660501995520,
used_user_addr = 139660502000192, avail_user_addr = 139660501999616, log_guest_addr = 4338774592}}
(gdb) p *vq->vring.avail
Cannot access memory at address 0x0
...so we're sending a RST segment to the guest, but the ring doesn't
exist anymore.
By the way, I still have the gdb session running, if you need something
else out of it.
Now, I guess we should eventually introduce a more comprehensive
handling of the case where the guest suddenly terminates (not specific
to vhost-user), but given that we have vu_cleanup() working as expected
in this case, I wonder if we shouldn't simply avoid calling
vring_avail_idx() (it has a single caller) by checking for !vring.avail
in the caller, or something like that.
We can also handle this as a follow-up patch, it's not something that
would stand in the way of a bisect, but I wouldn't push the series
without a fix immediately following it.
--
Stefano
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v14 7/9] vhost-user: add vhost-user
2024-11-27 4:47 ` Stefano Brivio
@ 2024-11-27 9:09 ` Laurent Vivier
2024-11-27 9:45 ` Stefano Brivio
0 siblings, 1 reply; 26+ messages in thread
From: Laurent Vivier @ 2024-11-27 9:09 UTC (permalink / raw)
To: Stefano Brivio; +Cc: passt-dev
On 27/11/2024 05:47, Stefano Brivio wrote:
> On Fri, 22 Nov 2024 17:43:34 +0100
> Laurent Vivier <lvivier@redhat.com> wrote:
>
>> +/**
>> + * tcp_vu_send_flag() - Send segment with flags to vhost-user (no payload)
>> + * @c: Execution context
>> + * @conn: Connection pointer
>> + * @flags: TCP flags: if not set, send segment only if ACK is due
>> + *
>> + * Return: negative error code on connection reset, 0 otherwise
>> + */
>> +int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
>> +{
>> + struct vu_dev *vdev = c->vdev;
>> + struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
>> + const struct flowside *tapside = TAPFLOW(conn);
>> + size_t l2len, l4len, optlen, hdrlen;
>> + struct vu_virtq_element flags_elem[2];
>> + struct tcp_payload_t *payload;
>> + struct ipv6hdr *ip6h = NULL;
>> + struct iovec flags_iov[2];
>> + struct iphdr *iph = NULL;
>> + struct ethhdr *eh;
>> + uint32_t seq;
>> + int elem_cnt;
>> + int nb_ack;
>> + int ret;
>> +
>> + hdrlen = tcp_vu_hdrlen(CONN_V6(conn));
>> +
>> + vu_set_element(&flags_elem[0], NULL, &flags_iov[0]);
>> +
>> + elem_cnt = vu_collect(vdev, vq, &flags_elem[0], 1,
>> + hdrlen + sizeof(struct tcp_syn_opts), NULL);
>
> Oops, I made this crash, by starting a number of iperf3 client threads
> on the host:
>
> $ iperf3 -c localhost -p 6001 -Z -l 500 -w 256M -t 600 -P20
>
> with matching server in the guest, then terminating QEMU while the test
> is running.
>
> Details (I saw it first, then I reproduced it under gdb):
>
> accepted connection from PID 3115463
> NDP: received RS, sending RA
> DHCP: offer to discover
> from 52:54:00:12:34:56
> DHCP: ack to request
> from 52:54:00:12:34:56
> NDP: sending unsolicited RA, next in 212s
> Client connection closed
>
> Program received signal SIGSEGV, Segmentation fault.
> 0x00005555555884f5 in vring_avail_idx (vq=0x555559343f10 <vdev_storage+1296>) at virtio.c:138
> 138 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx);
> (gdb) list
> 133 *
> 134 * Return: the available ring index of the given virtqueue
> 135 */
> 136 static inline uint16_t vring_avail_idx(struct vu_virtq *vq)
> 137 {
> 138 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx);
> 139
> 140 return vq->shadow_avail_idx;
> 141 }
> 142
> (gdb) bt
> #0 0x00005555555884f5 in vring_avail_idx (vq=0x555559343f10 <vdev_storage+1296>) at virtio.c:138
> #1 vu_queue_empty (vq=vq@entry=0x555559343f10 <vdev_storage+1296>) at virtio.c:290
> #2 vu_queue_pop (dev=dev@entry=0x555559343a00 <vdev_storage>, vq=vq@entry=0x555559343f10 <vdev_storage+1296>, elem=elem@entry=0x7ffffff6f510) at virtio.c:505
> #3 0x0000555555588c8c in vu_collect (vdev=vdev@entry=0x555559343a00 <vdev_storage>, vq=vq@entry=0x555559343f10 <vdev_storage+1296>, elem=elem@entry=0x7ffffff6f510, max_elem=max_elem@entry=1,
> size=size@entry=74, frame_size=frame_size@entry=0x0) at vu_common.c:86
> #4 0x000055555557e00e in tcp_vu_send_flag (c=0x7ffffff6f7a0, conn=0x5555555bd2d0 <flowtab+2160>, flags=4) at tcp_vu.c:116
> #5 0x0000555555578125 in tcp_send_flag (flags=4, conn=0x5555555bd2d0 <flowtab+2160>, c=0x7ffffff6f7a0) at tcp.c:1278
> #6 tcp_rst_do (conn=<optimized out>, c=<optimized out>) at tcp.c:1293
> #7 tcp_timer_handler (c=c@entry=0x7ffffff6f7a0, ref=..., ref@entry=...) at tcp.c:2266
> #8 0x0000555555558f26 in main (argc=<optimized out>, argv=<optimized out>) at passt.c:342
> (gdb) p *vq
> $1 = {vring = {num = 256, desc = 0x0, avail = 0x0, used = 0x0, log_guest_addr = 4338774592, flags = 0}, last_avail_idx = 35133, shadow_avail_idx = 35133, used_idx = 35133, signalled_used = 0,
> signalled_used_valid = false, notification = true, inuse = 0, call_fd = -1, kick_fd = -1, err_fd = -1, enable = 1, started = false, vra = {index = 0, flags = 0, desc_user_addr = 139660501995520,
> used_user_addr = 139660502000192, avail_user_addr = 139660501999616, log_guest_addr = 4338774592}}
> (gdb) p *vq->vring.avail
> Cannot access memory at address 0x0
>
> ...so we're sending a RST segment to the guest, but the ring doesn't
> exist anymore.
>
> By the way, I still have the gdb session running, if you need something
> else out of it.
>
> Now, I guess we should eventually introduce a more comprehensive
> handling of the case where the guest suddenly terminates (not specific
> to vhost-user), but given that we have vu_cleanup() working as expected
> in this case, I wonder if we shouldn't simply avoid calling
> vring_avail_idx() (it has a single caller) by checking for !vring.avail
> in the caller, or something like that.
>
Yes, I think it's the lines I removed during the reviews:
if (!vq->vring.avail)
return true;
Could you try to checkout virtio.c from v11?
Thanks,
Laurent
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v14 7/9] vhost-user: add vhost-user
2024-11-27 9:09 ` Laurent Vivier
@ 2024-11-27 9:45 ` Stefano Brivio
2024-11-27 9:48 ` Laurent Vivier
0 siblings, 1 reply; 26+ messages in thread
From: Stefano Brivio @ 2024-11-27 9:45 UTC (permalink / raw)
To: Laurent Vivier; +Cc: passt-dev
On Wed, 27 Nov 2024 10:09:53 +0100
Laurent Vivier <lvivier@redhat.com> wrote:
> On 27/11/2024 05:47, Stefano Brivio wrote:
> > On Fri, 22 Nov 2024 17:43:34 +0100
> > Laurent Vivier <lvivier@redhat.com> wrote:
> >
> >> +/**
> >> + * tcp_vu_send_flag() - Send segment with flags to vhost-user (no payload)
> >> + * @c: Execution context
> >> + * @conn: Connection pointer
> >> + * @flags: TCP flags: if not set, send segment only if ACK is due
> >> + *
> >> + * Return: negative error code on connection reset, 0 otherwise
> >> + */
> >> +int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
> >> +{
> >> + struct vu_dev *vdev = c->vdev;
> >> + struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
> >> + const struct flowside *tapside = TAPFLOW(conn);
> >> + size_t l2len, l4len, optlen, hdrlen;
> >> + struct vu_virtq_element flags_elem[2];
> >> + struct tcp_payload_t *payload;
> >> + struct ipv6hdr *ip6h = NULL;
> >> + struct iovec flags_iov[2];
> >> + struct iphdr *iph = NULL;
> >> + struct ethhdr *eh;
> >> + uint32_t seq;
> >> + int elem_cnt;
> >> + int nb_ack;
> >> + int ret;
> >> +
> >> + hdrlen = tcp_vu_hdrlen(CONN_V6(conn));
> >> +
> >> + vu_set_element(&flags_elem[0], NULL, &flags_iov[0]);
> >> +
> >> + elem_cnt = vu_collect(vdev, vq, &flags_elem[0], 1,
> >> + hdrlen + sizeof(struct tcp_syn_opts), NULL);
> >
> > Oops, I made this crash, by starting a number of iperf3 client threads
> > on the host:
> >
> > $ iperf3 -c localhost -p 6001 -Z -l 500 -w 256M -t 600 -P20
> >
> > with matching server in the guest, then terminating QEMU while the test
> > is running.
> >
> > Details (I saw it first, then I reproduced it under gdb):
> >
> > accepted connection from PID 3115463
> > NDP: received RS, sending RA
> > DHCP: offer to discover
> > from 52:54:00:12:34:56
> > DHCP: ack to request
> > from 52:54:00:12:34:56
> > NDP: sending unsolicited RA, next in 212s
> > Client connection closed
> >
> > Program received signal SIGSEGV, Segmentation fault.
> > 0x00005555555884f5 in vring_avail_idx (vq=0x555559343f10 <vdev_storage+1296>) at virtio.c:138
> > 138 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx);
> > (gdb) list
> > 133 *
> > 134 * Return: the available ring index of the given virtqueue
> > 135 */
> > 136 static inline uint16_t vring_avail_idx(struct vu_virtq *vq)
> > 137 {
> > 138 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx);
> > 139
> > 140 return vq->shadow_avail_idx;
> > 141 }
> > 142
> > (gdb) bt
> > #0 0x00005555555884f5 in vring_avail_idx (vq=0x555559343f10 <vdev_storage+1296>) at virtio.c:138
> > #1 vu_queue_empty (vq=vq@entry=0x555559343f10 <vdev_storage+1296>) at virtio.c:290
> > #2 vu_queue_pop (dev=dev@entry=0x555559343a00 <vdev_storage>, vq=vq@entry=0x555559343f10 <vdev_storage+1296>, elem=elem@entry=0x7ffffff6f510) at virtio.c:505
> > #3 0x0000555555588c8c in vu_collect (vdev=vdev@entry=0x555559343a00 <vdev_storage>, vq=vq@entry=0x555559343f10 <vdev_storage+1296>, elem=elem@entry=0x7ffffff6f510, max_elem=max_elem@entry=1,
> > size=size@entry=74, frame_size=frame_size@entry=0x0) at vu_common.c:86
> > #4 0x000055555557e00e in tcp_vu_send_flag (c=0x7ffffff6f7a0, conn=0x5555555bd2d0 <flowtab+2160>, flags=4) at tcp_vu.c:116
> > #5 0x0000555555578125 in tcp_send_flag (flags=4, conn=0x5555555bd2d0 <flowtab+2160>, c=0x7ffffff6f7a0) at tcp.c:1278
> > #6 tcp_rst_do (conn=<optimized out>, c=<optimized out>) at tcp.c:1293
> > #7 tcp_timer_handler (c=c@entry=0x7ffffff6f7a0, ref=..., ref@entry=...) at tcp.c:2266
> > #8 0x0000555555558f26 in main (argc=<optimized out>, argv=<optimized out>) at passt.c:342
> > (gdb) p *vq
> > $1 = {vring = {num = 256, desc = 0x0, avail = 0x0, used = 0x0, log_guest_addr = 4338774592, flags = 0}, last_avail_idx = 35133, shadow_avail_idx = 35133, used_idx = 35133, signalled_used = 0,
> > signalled_used_valid = false, notification = true, inuse = 0, call_fd = -1, kick_fd = -1, err_fd = -1, enable = 1, started = false, vra = {index = 0, flags = 0, desc_user_addr = 139660501995520,
> > used_user_addr = 139660502000192, avail_user_addr = 139660501999616, log_guest_addr = 4338774592}}
> > (gdb) p *vq->vring.avail
> > Cannot access memory at address 0x0
> >
> > ...so we're sending a RST segment to the guest, but the ring doesn't
> > exist anymore.
> >
> > By the way, I still have the gdb session running, if you need something
> > else out of it.
> >
> > Now, I guess we should eventually introduce a more comprehensive
> > handling of the case where the guest suddenly terminates (not specific
> > to vhost-user), but given that we have vu_cleanup() working as expected
> > in this case, I wonder if we shouldn't simply avoid calling
> > vring_avail_idx() (it has a single caller) by checking for !vring.avail
> > in the caller, or something like that.
> >
>
> Yes, I think it's the lines I removed during the reviews:
>
> if (!vq->vring.avail)
> return true;
Ah, right:
https://archives.passt.top/passt-dev/20241114163859.7eeafa38@elisabeth/
...so, at least in our case, it's more than "sanity checks" after all.
:) Well, I guess it depends on the definition.
> Could you try to checkout virtio.c from v11?
That would take a rather lengthy rebase, but I tried to reintroduce all
the checks you had:
--
diff --git a/virtio.c b/virtio.c
index 6a97435..0598ff4 100644
--- a/virtio.c
+++ b/virtio.c
@@ -284,6 +284,9 @@ static int virtqueue_read_next_desc(const struct vring_desc *desc,
*/
bool vu_queue_empty(struct vu_virtq *vq)
{
+ if (!vq->vring.avail)
+ return true;
+
if (vq->shadow_avail_idx != vq->last_avail_idx)
return false;
@@ -327,6 +330,9 @@ static bool vring_can_notify(const struct vu_dev *dev, struct vu_virtq *vq)
*/
void vu_queue_notify(const struct vu_dev *dev, struct vu_virtq *vq)
{
+ if (!vq->vring.avail)
+ return;
+
if (!vring_can_notify(dev, vq)) {
debug("vhost-user: virtqueue can skip notify...");
return;
@@ -502,6 +508,9 @@ int vu_queue_pop(struct vu_dev *dev, struct vu_virtq *vq, struct vu_virtq_elemen
unsigned int head;
int ret;
+ if (!vq->vring.avail)
+ return -1;
+
if (vu_queue_empty(vq))
return -1;
@@ -591,6 +600,9 @@ void vu_queue_fill_by_index(struct vu_virtq *vq, unsigned int index,
{
struct vring_used_elem uelem;
+ if (!vq->vring.avail)
+ return;
+
idx = (idx + vq->used_idx) % vq->vring.num;
uelem.id = htole32(index);
@@ -633,6 +645,9 @@ void vu_queue_flush(struct vu_virtq *vq, unsigned int count)
{
uint16_t old, new;
+ if (!vq->vring.avail)
+ return;
+
/* Make sure buffer is written before we update index. */
smp_wmb();
--
and it's all fine with those, I tried doing a few nasty things and
didn't observe any issue.
Any check I missed? Do you want to submit it as follow-up patch? I can
also do that. I'd rather (still) avoid a re-post of v14 if possible.
--
@@ -284,6 +284,9 @@ static int virtqueue_read_next_desc(const struct vring_desc *desc,
*/
bool vu_queue_empty(struct vu_virtq *vq)
{
+ if (!vq->vring.avail)
+ return true;
+
if (vq->shadow_avail_idx != vq->last_avail_idx)
return false;
@@ -327,6 +330,9 @@ static bool vring_can_notify(const struct vu_dev *dev, struct vu_virtq *vq)
*/
void vu_queue_notify(const struct vu_dev *dev, struct vu_virtq *vq)
{
+ if (!vq->vring.avail)
+ return;
+
if (!vring_can_notify(dev, vq)) {
debug("vhost-user: virtqueue can skip notify...");
return;
@@ -502,6 +508,9 @@ int vu_queue_pop(struct vu_dev *dev, struct vu_virtq *vq, struct vu_virtq_elemen
unsigned int head;
int ret;
+ if (!vq->vring.avail)
+ return -1;
+
if (vu_queue_empty(vq))
return -1;
@@ -591,6 +600,9 @@ void vu_queue_fill_by_index(struct vu_virtq *vq, unsigned int index,
{
struct vring_used_elem uelem;
+ if (!vq->vring.avail)
+ return;
+
idx = (idx + vq->used_idx) % vq->vring.num;
uelem.id = htole32(index);
@@ -633,6 +645,9 @@ void vu_queue_flush(struct vu_virtq *vq, unsigned int count)
{
uint16_t old, new;
+ if (!vq->vring.avail)
+ return;
+
/* Make sure buffer is written before we update index. */
smp_wmb();
--
and it's all fine with those, I tried doing a few nasty things and
didn't observe any issue.
Any check I missed? Do you want to submit it as follow-up patch? I can
also do that. I'd rather (still) avoid a re-post of v14 if possible.
--
Stefano
^ permalink raw reply related [flat|nested] 26+ messages in thread
* Re: [PATCH v14 7/9] vhost-user: add vhost-user
2024-11-27 9:45 ` Stefano Brivio
@ 2024-11-27 9:48 ` Laurent Vivier
2024-11-27 10:03 ` Stefano Brivio
0 siblings, 1 reply; 26+ messages in thread
From: Laurent Vivier @ 2024-11-27 9:48 UTC (permalink / raw)
To: Stefano Brivio; +Cc: passt-dev
On 27/11/2024 10:45, Stefano Brivio wrote:
> On Wed, 27 Nov 2024 10:09:53 +0100
> Laurent Vivier <lvivier@redhat.com> wrote:
>
>> On 27/11/2024 05:47, Stefano Brivio wrote:
>>> On Fri, 22 Nov 2024 17:43:34 +0100
>>> Laurent Vivier <lvivier@redhat.com> wrote:
>>>
>>>> +/**
>>>> + * tcp_vu_send_flag() - Send segment with flags to vhost-user (no payload)
>>>> + * @c: Execution context
>>>> + * @conn: Connection pointer
>>>> + * @flags: TCP flags: if not set, send segment only if ACK is due
>>>> + *
>>>> + * Return: negative error code on connection reset, 0 otherwise
>>>> + */
>>>> +int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
>>>> +{
>>>> + struct vu_dev *vdev = c->vdev;
>>>> + struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
>>>> + const struct flowside *tapside = TAPFLOW(conn);
>>>> + size_t l2len, l4len, optlen, hdrlen;
>>>> + struct vu_virtq_element flags_elem[2];
>>>> + struct tcp_payload_t *payload;
>>>> + struct ipv6hdr *ip6h = NULL;
>>>> + struct iovec flags_iov[2];
>>>> + struct iphdr *iph = NULL;
>>>> + struct ethhdr *eh;
>>>> + uint32_t seq;
>>>> + int elem_cnt;
>>>> + int nb_ack;
>>>> + int ret;
>>>> +
>>>> + hdrlen = tcp_vu_hdrlen(CONN_V6(conn));
>>>> +
>>>> + vu_set_element(&flags_elem[0], NULL, &flags_iov[0]);
>>>> +
>>>> + elem_cnt = vu_collect(vdev, vq, &flags_elem[0], 1,
>>>> + hdrlen + sizeof(struct tcp_syn_opts), NULL);
>>>
>>> Oops, I made this crash, by starting a number of iperf3 client threads
>>> on the host:
>>>
>>> $ iperf3 -c localhost -p 6001 -Z -l 500 -w 256M -t 600 -P20
>>>
>>> with matching server in the guest, then terminating QEMU while the test
>>> is running.
>>>
>>> Details (I saw it first, then I reproduced it under gdb):
>>>
>>> accepted connection from PID 3115463
>>> NDP: received RS, sending RA
>>> DHCP: offer to discover
>>> from 52:54:00:12:34:56
>>> DHCP: ack to request
>>> from 52:54:00:12:34:56
>>> NDP: sending unsolicited RA, next in 212s
>>> Client connection closed
>>>
>>> Program received signal SIGSEGV, Segmentation fault.
>>> 0x00005555555884f5 in vring_avail_idx (vq=0x555559343f10 <vdev_storage+1296>) at virtio.c:138
>>> 138 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx);
>>> (gdb) list
>>> 133 *
>>> 134 * Return: the available ring index of the given virtqueue
>>> 135 */
>>> 136 static inline uint16_t vring_avail_idx(struct vu_virtq *vq)
>>> 137 {
>>> 138 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx);
>>> 139
>>> 140 return vq->shadow_avail_idx;
>>> 141 }
>>> 142
>>> (gdb) bt
>>> #0 0x00005555555884f5 in vring_avail_idx (vq=0x555559343f10 <vdev_storage+1296>) at virtio.c:138
>>> #1 vu_queue_empty (vq=vq@entry=0x555559343f10 <vdev_storage+1296>) at virtio.c:290
>>> #2 vu_queue_pop (dev=dev@entry=0x555559343a00 <vdev_storage>, vq=vq@entry=0x555559343f10 <vdev_storage+1296>, elem=elem@entry=0x7ffffff6f510) at virtio.c:505
>>> #3 0x0000555555588c8c in vu_collect (vdev=vdev@entry=0x555559343a00 <vdev_storage>, vq=vq@entry=0x555559343f10 <vdev_storage+1296>, elem=elem@entry=0x7ffffff6f510, max_elem=max_elem@entry=1,
>>> size=size@entry=74, frame_size=frame_size@entry=0x0) at vu_common.c:86
>>> #4 0x000055555557e00e in tcp_vu_send_flag (c=0x7ffffff6f7a0, conn=0x5555555bd2d0 <flowtab+2160>, flags=4) at tcp_vu.c:116
>>> #5 0x0000555555578125 in tcp_send_flag (flags=4, conn=0x5555555bd2d0 <flowtab+2160>, c=0x7ffffff6f7a0) at tcp.c:1278
>>> #6 tcp_rst_do (conn=<optimized out>, c=<optimized out>) at tcp.c:1293
>>> #7 tcp_timer_handler (c=c@entry=0x7ffffff6f7a0, ref=..., ref@entry=...) at tcp.c:2266
>>> #8 0x0000555555558f26 in main (argc=<optimized out>, argv=<optimized out>) at passt.c:342
>>> (gdb) p *vq
>>> $1 = {vring = {num = 256, desc = 0x0, avail = 0x0, used = 0x0, log_guest_addr = 4338774592, flags = 0}, last_avail_idx = 35133, shadow_avail_idx = 35133, used_idx = 35133, signalled_used = 0,
>>> signalled_used_valid = false, notification = true, inuse = 0, call_fd = -1, kick_fd = -1, err_fd = -1, enable = 1, started = false, vra = {index = 0, flags = 0, desc_user_addr = 139660501995520,
>>> used_user_addr = 139660502000192, avail_user_addr = 139660501999616, log_guest_addr = 4338774592}}
>>> (gdb) p *vq->vring.avail
>>> Cannot access memory at address 0x0
>>>
>>> ...so we're sending a RST segment to the guest, but the ring doesn't
>>> exist anymore.
>>>
>>> By the way, I still have the gdb session running, if you need something
>>> else out of it.
>>>
>>> Now, I guess we should eventually introduce a more comprehensive
>>> handling of the case where the guest suddenly terminates (not specific
>>> to vhost-user), but given that we have vu_cleanup() working as expected
>>> in this case, I wonder if we shouldn't simply avoid calling
>>> vring_avail_idx() (it has a single caller) by checking for !vring.avail
>>> in the caller, or something like that.
>>>
>>
>> Yes, I think it's the lines I removed during the reviews:
>>
>> if (!vq->vring.avail)
>> return true;
>
> Ah, right:
>
> https://archives.passt.top/passt-dev/20241114163859.7eeafa38@elisabeth/
>
> ...so, at least in our case, it's more than "sanity checks" after all.
> :) Well, I guess it depends on the definition.
>
>> Could you try to checkout virtio.c from v11?
>
> That would take a rather lengthy rebase, but I tried to reintroduce all
> the checks you had:
>
> --
> diff --git a/virtio.c b/virtio.c
> index 6a97435..0598ff4 100644
> --- a/virtio.c
> +++ b/virtio.c
> @@ -284,6 +284,9 @@ static int virtqueue_read_next_desc(const struct vring_desc *desc,
> */
> bool vu_queue_empty(struct vu_virtq *vq)
> {
> + if (!vq->vring.avail)
> + return true;
> +
> if (vq->shadow_avail_idx != vq->last_avail_idx)
> return false;
>
> @@ -327,6 +330,9 @@ static bool vring_can_notify(const struct vu_dev *dev, struct vu_virtq *vq)
> */
> void vu_queue_notify(const struct vu_dev *dev, struct vu_virtq *vq)
> {
> + if (!vq->vring.avail)
> + return;
> +
> if (!vring_can_notify(dev, vq)) {
> debug("vhost-user: virtqueue can skip notify...");
> return;
> @@ -502,6 +508,9 @@ int vu_queue_pop(struct vu_dev *dev, struct vu_virtq *vq, struct vu_virtq_elemen
> unsigned int head;
> int ret;
>
> + if (!vq->vring.avail)
> + return -1;
> +
> if (vu_queue_empty(vq))
> return -1;
>
> @@ -591,6 +600,9 @@ void vu_queue_fill_by_index(struct vu_virtq *vq, unsigned int index,
> {
> struct vring_used_elem uelem;
>
> + if (!vq->vring.avail)
> + return;
> +
> idx = (idx + vq->used_idx) % vq->vring.num;
>
> uelem.id = htole32(index);
> @@ -633,6 +645,9 @@ void vu_queue_flush(struct vu_virtq *vq, unsigned int count)
> {
> uint16_t old, new;
>
> + if (!vq->vring.avail)
> + return;
> +
> /* Make sure buffer is written before we update index. */
> smp_wmb();
>
> --
>
> and it's all fine with those, I tried doing a few nasty things and
> didn't observe any issue.
>
> Any check I missed? Do you want to submit it as follow-up patch? I can
> also do that. I'd rather (still) avoid a re-post of v14 if possible.
>
As you prefer. Let me know.
Thanks,
Laurent
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v14 7/9] vhost-user: add vhost-user
2024-11-27 9:48 ` Laurent Vivier
@ 2024-11-27 10:03 ` Stefano Brivio
2024-11-27 10:11 ` Laurent Vivier
0 siblings, 1 reply; 26+ messages in thread
From: Stefano Brivio @ 2024-11-27 10:03 UTC (permalink / raw)
To: Laurent Vivier; +Cc: passt-dev
On Wed, 27 Nov 2024 10:48:41 +0100
Laurent Vivier <lvivier@redhat.com> wrote:
> On 27/11/2024 10:45, Stefano Brivio wrote:
> > On Wed, 27 Nov 2024 10:09:53 +0100
> > Laurent Vivier <lvivier@redhat.com> wrote:
> >
> >> On 27/11/2024 05:47, Stefano Brivio wrote:
> >>> On Fri, 22 Nov 2024 17:43:34 +0100
> >>> Laurent Vivier <lvivier@redhat.com> wrote:
> >>>
> >>>> +/**
> >>>> + * tcp_vu_send_flag() - Send segment with flags to vhost-user (no payload)
> >>>> + * @c: Execution context
> >>>> + * @conn: Connection pointer
> >>>> + * @flags: TCP flags: if not set, send segment only if ACK is due
> >>>> + *
> >>>> + * Return: negative error code on connection reset, 0 otherwise
> >>>> + */
> >>>> +int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
> >>>> +{
> >>>> + struct vu_dev *vdev = c->vdev;
> >>>> + struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
> >>>> + const struct flowside *tapside = TAPFLOW(conn);
> >>>> + size_t l2len, l4len, optlen, hdrlen;
> >>>> + struct vu_virtq_element flags_elem[2];
> >>>> + struct tcp_payload_t *payload;
> >>>> + struct ipv6hdr *ip6h = NULL;
> >>>> + struct iovec flags_iov[2];
> >>>> + struct iphdr *iph = NULL;
> >>>> + struct ethhdr *eh;
> >>>> + uint32_t seq;
> >>>> + int elem_cnt;
> >>>> + int nb_ack;
> >>>> + int ret;
> >>>> +
> >>>> + hdrlen = tcp_vu_hdrlen(CONN_V6(conn));
> >>>> +
> >>>> + vu_set_element(&flags_elem[0], NULL, &flags_iov[0]);
> >>>> +
> >>>> + elem_cnt = vu_collect(vdev, vq, &flags_elem[0], 1,
> >>>> + hdrlen + sizeof(struct tcp_syn_opts), NULL);
> >>>
> >>> Oops, I made this crash, by starting a number of iperf3 client threads
> >>> on the host:
> >>>
> >>> $ iperf3 -c localhost -p 6001 -Z -l 500 -w 256M -t 600 -P20
> >>>
> >>> with matching server in the guest, then terminating QEMU while the test
> >>> is running.
> >>>
> >>> Details (I saw it first, then I reproduced it under gdb):
> >>>
> >>> accepted connection from PID 3115463
> >>> NDP: received RS, sending RA
> >>> DHCP: offer to discover
> >>> from 52:54:00:12:34:56
> >>> DHCP: ack to request
> >>> from 52:54:00:12:34:56
> >>> NDP: sending unsolicited RA, next in 212s
> >>> Client connection closed
> >>>
> >>> Program received signal SIGSEGV, Segmentation fault.
> >>> 0x00005555555884f5 in vring_avail_idx (vq=0x555559343f10 <vdev_storage+1296>) at virtio.c:138
> >>> 138 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx);
> >>> (gdb) list
> >>> 133 *
> >>> 134 * Return: the available ring index of the given virtqueue
> >>> 135 */
> >>> 136 static inline uint16_t vring_avail_idx(struct vu_virtq *vq)
> >>> 137 {
> >>> 138 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx);
> >>> 139
> >>> 140 return vq->shadow_avail_idx;
> >>> 141 }
> >>> 142
> >>> (gdb) bt
> >>> #0 0x00005555555884f5 in vring_avail_idx (vq=0x555559343f10 <vdev_storage+1296>) at virtio.c:138
> >>> #1 vu_queue_empty (vq=vq@entry=0x555559343f10 <vdev_storage+1296>) at virtio.c:290
> >>> #2 vu_queue_pop (dev=dev@entry=0x555559343a00 <vdev_storage>, vq=vq@entry=0x555559343f10 <vdev_storage+1296>, elem=elem@entry=0x7ffffff6f510) at virtio.c:505
> >>> #3 0x0000555555588c8c in vu_collect (vdev=vdev@entry=0x555559343a00 <vdev_storage>, vq=vq@entry=0x555559343f10 <vdev_storage+1296>, elem=elem@entry=0x7ffffff6f510, max_elem=max_elem@entry=1,
> >>> size=size@entry=74, frame_size=frame_size@entry=0x0) at vu_common.c:86
> >>> #4 0x000055555557e00e in tcp_vu_send_flag (c=0x7ffffff6f7a0, conn=0x5555555bd2d0 <flowtab+2160>, flags=4) at tcp_vu.c:116
> >>> #5 0x0000555555578125 in tcp_send_flag (flags=4, conn=0x5555555bd2d0 <flowtab+2160>, c=0x7ffffff6f7a0) at tcp.c:1278
> >>> #6 tcp_rst_do (conn=<optimized out>, c=<optimized out>) at tcp.c:1293
> >>> #7 tcp_timer_handler (c=c@entry=0x7ffffff6f7a0, ref=..., ref@entry=...) at tcp.c:2266
> >>> #8 0x0000555555558f26 in main (argc=<optimized out>, argv=<optimized out>) at passt.c:342
> >>> (gdb) p *vq
> >>> $1 = {vring = {num = 256, desc = 0x0, avail = 0x0, used = 0x0, log_guest_addr = 4338774592, flags = 0}, last_avail_idx = 35133, shadow_avail_idx = 35133, used_idx = 35133, signalled_used = 0,
> >>> signalled_used_valid = false, notification = true, inuse = 0, call_fd = -1, kick_fd = -1, err_fd = -1, enable = 1, started = false, vra = {index = 0, flags = 0, desc_user_addr = 139660501995520,
> >>> used_user_addr = 139660502000192, avail_user_addr = 139660501999616, log_guest_addr = 4338774592}}
> >>> (gdb) p *vq->vring.avail
> >>> Cannot access memory at address 0x0
> >>>
> >>> ...so we're sending a RST segment to the guest, but the ring doesn't
> >>> exist anymore.
> >>>
> >>> By the way, I still have the gdb session running, if you need something
> >>> else out of it.
> >>>
> >>> Now, I guess we should eventually introduce a more comprehensive
> >>> handling of the case where the guest suddenly terminates (not specific
> >>> to vhost-user), but given that we have vu_cleanup() working as expected
> >>> in this case, I wonder if we shouldn't simply avoid calling
> >>> vring_avail_idx() (it has a single caller) by checking for !vring.avail
> >>> in the caller, or something like that.
> >>>
> >>
> >> Yes, I think it's the lines I removed during the reviews:
> >>
> >> if (!vq->vring.avail)
> >> return true;
> >
> > Ah, right:
> >
> > https://archives.passt.top/passt-dev/20241114163859.7eeafa38@elisabeth/
> >
> > ...so, at least in our case, it's more than "sanity checks" after all.
> > :) Well, I guess it depends on the definition.
> >
> >> Could you try to checkout virtio.c from v11?
> >
> > That would take a rather lengthy rebase, but I tried to reintroduce all
> > the checks you had:
> >
> > --
> > diff --git a/virtio.c b/virtio.c
> > index 6a97435..0598ff4 100644
> > --- a/virtio.c
> > +++ b/virtio.c
> > @@ -284,6 +284,9 @@ static int virtqueue_read_next_desc(const struct vring_desc *desc,
> > */
> > bool vu_queue_empty(struct vu_virtq *vq)
> > {
> > + if (!vq->vring.avail)
> > + return true;
> > +
> > if (vq->shadow_avail_idx != vq->last_avail_idx)
> > return false;
> >
> > @@ -327,6 +330,9 @@ static bool vring_can_notify(const struct vu_dev *dev, struct vu_virtq *vq)
> > */
> > void vu_queue_notify(const struct vu_dev *dev, struct vu_virtq *vq)
> > {
> > + if (!vq->vring.avail)
> > + return;
> > +
> > if (!vring_can_notify(dev, vq)) {
> > debug("vhost-user: virtqueue can skip notify...");
> > return;
> > @@ -502,6 +508,9 @@ int vu_queue_pop(struct vu_dev *dev, struct vu_virtq *vq, struct vu_virtq_elemen
> > unsigned int head;
> > int ret;
> >
> > + if (!vq->vring.avail)
> > + return -1;
> > +
> > if (vu_queue_empty(vq))
> > return -1;
> >
> > @@ -591,6 +600,9 @@ void vu_queue_fill_by_index(struct vu_virtq *vq, unsigned int index,
> > {
> > struct vring_used_elem uelem;
> >
> > + if (!vq->vring.avail)
> > + return;
> > +
> > idx = (idx + vq->used_idx) % vq->vring.num;
> >
> > uelem.id = htole32(index);
> > @@ -633,6 +645,9 @@ void vu_queue_flush(struct vu_virtq *vq, unsigned int count)
> > {
> > uint16_t old, new;
> >
> > + if (!vq->vring.avail)
> > + return;
> > +
> > /* Make sure buffer is written before we update index. */
> > smp_wmb();
> >
> > --
> >
> > and it's all fine with those, I tried doing a few nasty things and
> > didn't observe any issue.
> >
> > Any check I missed? Do you want to submit it as follow-up patch? I can
> > also do that. I'd rather (still) avoid a re-post of v14 if possible.
>
> As you prefer. Let me know.
It would save me some time if you could... it should be based on v14 as
it is.
I didn't have time to take care of gcc warnings on 32-bit and of the
build failure on musl, yet.
--
Stefano
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v14 7/9] vhost-user: add vhost-user
2024-11-27 10:03 ` Stefano Brivio
@ 2024-11-27 10:11 ` Laurent Vivier
2024-11-27 10:14 ` Stefano Brivio
0 siblings, 1 reply; 26+ messages in thread
From: Laurent Vivier @ 2024-11-27 10:11 UTC (permalink / raw)
To: Stefano Brivio; +Cc: passt-dev
On 27/11/2024 11:03, Stefano Brivio wrote:
> On Wed, 27 Nov 2024 10:48:41 +0100
> Laurent Vivier <lvivier@redhat.com> wrote:
>
>> On 27/11/2024 10:45, Stefano Brivio wrote:
>>> On Wed, 27 Nov 2024 10:09:53 +0100
>>> Laurent Vivier <lvivier@redhat.com> wrote:
>>>
>>>> On 27/11/2024 05:47, Stefano Brivio wrote:
>>>>> On Fri, 22 Nov 2024 17:43:34 +0100
>>>>> Laurent Vivier <lvivier@redhat.com> wrote:
>>>>>
>>>>>> +/**
>>>>>> + * tcp_vu_send_flag() - Send segment with flags to vhost-user (no payload)
>>>>>> + * @c: Execution context
>>>>>> + * @conn: Connection pointer
>>>>>> + * @flags: TCP flags: if not set, send segment only if ACK is due
>>>>>> + *
>>>>>> + * Return: negative error code on connection reset, 0 otherwise
>>>>>> + */
>>>>>> +int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
>>>>>> +{
>>>>>> + struct vu_dev *vdev = c->vdev;
>>>>>> + struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
>>>>>> + const struct flowside *tapside = TAPFLOW(conn);
>>>>>> + size_t l2len, l4len, optlen, hdrlen;
>>>>>> + struct vu_virtq_element flags_elem[2];
>>>>>> + struct tcp_payload_t *payload;
>>>>>> + struct ipv6hdr *ip6h = NULL;
>>>>>> + struct iovec flags_iov[2];
>>>>>> + struct iphdr *iph = NULL;
>>>>>> + struct ethhdr *eh;
>>>>>> + uint32_t seq;
>>>>>> + int elem_cnt;
>>>>>> + int nb_ack;
>>>>>> + int ret;
>>>>>> +
>>>>>> + hdrlen = tcp_vu_hdrlen(CONN_V6(conn));
>>>>>> +
>>>>>> + vu_set_element(&flags_elem[0], NULL, &flags_iov[0]);
>>>>>> +
>>>>>> + elem_cnt = vu_collect(vdev, vq, &flags_elem[0], 1,
>>>>>> + hdrlen + sizeof(struct tcp_syn_opts), NULL);
>>>>>
>>>>> Oops, I made this crash, by starting a number of iperf3 client threads
>>>>> on the host:
>>>>>
>>>>> $ iperf3 -c localhost -p 6001 -Z -l 500 -w 256M -t 600 -P20
>>>>>
>>>>> with matching server in the guest, then terminating QEMU while the test
>>>>> is running.
>>>>>
>>>>> Details (I saw it first, then I reproduced it under gdb):
>>>>>
>>>>> accepted connection from PID 3115463
>>>>> NDP: received RS, sending RA
>>>>> DHCP: offer to discover
>>>>> from 52:54:00:12:34:56
>>>>> DHCP: ack to request
>>>>> from 52:54:00:12:34:56
>>>>> NDP: sending unsolicited RA, next in 212s
>>>>> Client connection closed
>>>>>
>>>>> Program received signal SIGSEGV, Segmentation fault.
>>>>> 0x00005555555884f5 in vring_avail_idx (vq=0x555559343f10 <vdev_storage+1296>) at virtio.c:138
>>>>> 138 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx);
>>>>> (gdb) list
>>>>> 133 *
>>>>> 134 * Return: the available ring index of the given virtqueue
>>>>> 135 */
>>>>> 136 static inline uint16_t vring_avail_idx(struct vu_virtq *vq)
>>>>> 137 {
>>>>> 138 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx);
>>>>> 139
>>>>> 140 return vq->shadow_avail_idx;
>>>>> 141 }
>>>>> 142
>>>>> (gdb) bt
>>>>> #0 0x00005555555884f5 in vring_avail_idx (vq=0x555559343f10 <vdev_storage+1296>) at virtio.c:138
>>>>> #1 vu_queue_empty (vq=vq@entry=0x555559343f10 <vdev_storage+1296>) at virtio.c:290
>>>>> #2 vu_queue_pop (dev=dev@entry=0x555559343a00 <vdev_storage>, vq=vq@entry=0x555559343f10 <vdev_storage+1296>, elem=elem@entry=0x7ffffff6f510) at virtio.c:505
>>>>> #3 0x0000555555588c8c in vu_collect (vdev=vdev@entry=0x555559343a00 <vdev_storage>, vq=vq@entry=0x555559343f10 <vdev_storage+1296>, elem=elem@entry=0x7ffffff6f510, max_elem=max_elem@entry=1,
>>>>> size=size@entry=74, frame_size=frame_size@entry=0x0) at vu_common.c:86
>>>>> #4 0x000055555557e00e in tcp_vu_send_flag (c=0x7ffffff6f7a0, conn=0x5555555bd2d0 <flowtab+2160>, flags=4) at tcp_vu.c:116
>>>>> #5 0x0000555555578125 in tcp_send_flag (flags=4, conn=0x5555555bd2d0 <flowtab+2160>, c=0x7ffffff6f7a0) at tcp.c:1278
>>>>> #6 tcp_rst_do (conn=<optimized out>, c=<optimized out>) at tcp.c:1293
>>>>> #7 tcp_timer_handler (c=c@entry=0x7ffffff6f7a0, ref=..., ref@entry=...) at tcp.c:2266
>>>>> #8 0x0000555555558f26 in main (argc=<optimized out>, argv=<optimized out>) at passt.c:342
>>>>> (gdb) p *vq
>>>>> $1 = {vring = {num = 256, desc = 0x0, avail = 0x0, used = 0x0, log_guest_addr = 4338774592, flags = 0}, last_avail_idx = 35133, shadow_avail_idx = 35133, used_idx = 35133, signalled_used = 0,
>>>>> signalled_used_valid = false, notification = true, inuse = 0, call_fd = -1, kick_fd = -1, err_fd = -1, enable = 1, started = false, vra = {index = 0, flags = 0, desc_user_addr = 139660501995520,
>>>>> used_user_addr = 139660502000192, avail_user_addr = 139660501999616, log_guest_addr = 4338774592}}
>>>>> (gdb) p *vq->vring.avail
>>>>> Cannot access memory at address 0x0
>>>>>
>>>>> ...so we're sending a RST segment to the guest, but the ring doesn't
>>>>> exist anymore.
>>>>>
>>>>> By the way, I still have the gdb session running, if you need something
>>>>> else out of it.
>>>>>
>>>>> Now, I guess we should eventually introduce a more comprehensive
>>>>> handling of the case where the guest suddenly terminates (not specific
>>>>> to vhost-user), but given that we have vu_cleanup() working as expected
>>>>> in this case, I wonder if we shouldn't simply avoid calling
>>>>> vring_avail_idx() (it has a single caller) by checking for !vring.avail
>>>>> in the caller, or something like that.
>>>>>
>>>>
>>>> Yes, I think it's the lines I removed during the reviews:
>>>>
>>>> if (!vq->vring.avail)
>>>> return true;
>>>
>>> Ah, right:
>>>
>>> https://archives.passt.top/passt-dev/20241114163859.7eeafa38@elisabeth/
>>>
>>> ...so, at least in our case, it's more than "sanity checks" after all.
>>> :) Well, I guess it depends on the definition.
>>>
>>>> Could you try to checkout virtio.c from v11?
>>>
>>> That would take a rather lengthy rebase, but I tried to reintroduce all
>>> the checks you had:
>>>
>>> --
>>> diff --git a/virtio.c b/virtio.c
>>> index 6a97435..0598ff4 100644
>>> --- a/virtio.c
>>> +++ b/virtio.c
>>> @@ -284,6 +284,9 @@ static int virtqueue_read_next_desc(const struct vring_desc *desc,
>>> */
>>> bool vu_queue_empty(struct vu_virtq *vq)
>>> {
>>> + if (!vq->vring.avail)
>>> + return true;
>>> +
>>> if (vq->shadow_avail_idx != vq->last_avail_idx)
>>> return false;
>>>
>>> @@ -327,6 +330,9 @@ static bool vring_can_notify(const struct vu_dev *dev, struct vu_virtq *vq)
>>> */
>>> void vu_queue_notify(const struct vu_dev *dev, struct vu_virtq *vq)
>>> {
>>> + if (!vq->vring.avail)
>>> + return;
>>> +
>>> if (!vring_can_notify(dev, vq)) {
>>> debug("vhost-user: virtqueue can skip notify...");
>>> return;
>>> @@ -502,6 +508,9 @@ int vu_queue_pop(struct vu_dev *dev, struct vu_virtq *vq, struct vu_virtq_elemen
>>> unsigned int head;
>>> int ret;
>>>
>>> + if (!vq->vring.avail)
>>> + return -1;
>>> +
>>> if (vu_queue_empty(vq))
>>> return -1;
>>>
>>> @@ -591,6 +600,9 @@ void vu_queue_fill_by_index(struct vu_virtq *vq, unsigned int index,
>>> {
>>> struct vring_used_elem uelem;
>>>
>>> + if (!vq->vring.avail)
>>> + return;
>>> +
>>> idx = (idx + vq->used_idx) % vq->vring.num;
>>>
>>> uelem.id = htole32(index);
>>> @@ -633,6 +645,9 @@ void vu_queue_flush(struct vu_virtq *vq, unsigned int count)
>>> {
>>> uint16_t old, new;
>>>
>>> + if (!vq->vring.avail)
>>> + return;
>>> +
>>> /* Make sure buffer is written before we update index. */
>>> smp_wmb();
>>>
>>> --
>>>
>>> and it's all fine with those, I tried doing a few nasty things and
>>> didn't observe any issue.
>>>
>>> Any check I missed? Do you want to submit it as follow-up patch? I can
>>> also do that. I'd rather (still) avoid a re-post of v14 if possible.
>>
>> As you prefer. Let me know.
>
> It would save me some time if you could... it should be based on v14 as
> it is.
I can.
>
> I didn't have time to take care of gcc warnings on 32-bit and of the
> build failure on musl, yet.
>
I will do too.
Do you want them before to merge?
Thanks,
Laurent
^ permalink raw reply [flat|nested] 26+ messages in thread
* Re: [PATCH v14 7/9] vhost-user: add vhost-user
2024-11-27 10:11 ` Laurent Vivier
@ 2024-11-27 10:14 ` Stefano Brivio
0 siblings, 0 replies; 26+ messages in thread
From: Stefano Brivio @ 2024-11-27 10:14 UTC (permalink / raw)
To: Laurent Vivier; +Cc: passt-dev
On Wed, 27 Nov 2024 11:11:33 +0100
Laurent Vivier <lvivier@redhat.com> wrote:
> On 27/11/2024 11:03, Stefano Brivio wrote:
> > On Wed, 27 Nov 2024 10:48:41 +0100
> > Laurent Vivier <lvivier@redhat.com> wrote:
> >
> >> On 27/11/2024 10:45, Stefano Brivio wrote:
> >>> On Wed, 27 Nov 2024 10:09:53 +0100
> >>> Laurent Vivier <lvivier@redhat.com> wrote:
> >>>
> >>>> On 27/11/2024 05:47, Stefano Brivio wrote:
> >>>>> On Fri, 22 Nov 2024 17:43:34 +0100
> >>>>> Laurent Vivier <lvivier@redhat.com> wrote:
> >>>>>
> >>>>>> +/**
> >>>>>> + * tcp_vu_send_flag() - Send segment with flags to vhost-user (no payload)
> >>>>>> + * @c: Execution context
> >>>>>> + * @conn: Connection pointer
> >>>>>> + * @flags: TCP flags: if not set, send segment only if ACK is due
> >>>>>> + *
> >>>>>> + * Return: negative error code on connection reset, 0 otherwise
> >>>>>> + */
> >>>>>> +int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
> >>>>>> +{
> >>>>>> + struct vu_dev *vdev = c->vdev;
> >>>>>> + struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
> >>>>>> + const struct flowside *tapside = TAPFLOW(conn);
> >>>>>> + size_t l2len, l4len, optlen, hdrlen;
> >>>>>> + struct vu_virtq_element flags_elem[2];
> >>>>>> + struct tcp_payload_t *payload;
> >>>>>> + struct ipv6hdr *ip6h = NULL;
> >>>>>> + struct iovec flags_iov[2];
> >>>>>> + struct iphdr *iph = NULL;
> >>>>>> + struct ethhdr *eh;
> >>>>>> + uint32_t seq;
> >>>>>> + int elem_cnt;
> >>>>>> + int nb_ack;
> >>>>>> + int ret;
> >>>>>> +
> >>>>>> + hdrlen = tcp_vu_hdrlen(CONN_V6(conn));
> >>>>>> +
> >>>>>> + vu_set_element(&flags_elem[0], NULL, &flags_iov[0]);
> >>>>>> +
> >>>>>> + elem_cnt = vu_collect(vdev, vq, &flags_elem[0], 1,
> >>>>>> + hdrlen + sizeof(struct tcp_syn_opts), NULL);
> >>>>>
> >>>>> Oops, I made this crash, by starting a number of iperf3 client threads
> >>>>> on the host:
> >>>>>
> >>>>> $ iperf3 -c localhost -p 6001 -Z -l 500 -w 256M -t 600 -P20
> >>>>>
> >>>>> with matching server in the guest, then terminating QEMU while the test
> >>>>> is running.
> >>>>>
> >>>>> Details (I saw it first, then I reproduced it under gdb):
> >>>>>
> >>>>> accepted connection from PID 3115463
> >>>>> NDP: received RS, sending RA
> >>>>> DHCP: offer to discover
> >>>>> from 52:54:00:12:34:56
> >>>>> DHCP: ack to request
> >>>>> from 52:54:00:12:34:56
> >>>>> NDP: sending unsolicited RA, next in 212s
> >>>>> Client connection closed
> >>>>>
> >>>>> Program received signal SIGSEGV, Segmentation fault.
> >>>>> 0x00005555555884f5 in vring_avail_idx (vq=0x555559343f10 <vdev_storage+1296>) at virtio.c:138
> >>>>> 138 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx);
> >>>>> (gdb) list
> >>>>> 133 *
> >>>>> 134 * Return: the available ring index of the given virtqueue
> >>>>> 135 */
> >>>>> 136 static inline uint16_t vring_avail_idx(struct vu_virtq *vq)
> >>>>> 137 {
> >>>>> 138 vq->shadow_avail_idx = le16toh(vq->vring.avail->idx);
> >>>>> 139
> >>>>> 140 return vq->shadow_avail_idx;
> >>>>> 141 }
> >>>>> 142
> >>>>> (gdb) bt
> >>>>> #0 0x00005555555884f5 in vring_avail_idx (vq=0x555559343f10 <vdev_storage+1296>) at virtio.c:138
> >>>>> #1 vu_queue_empty (vq=vq@entry=0x555559343f10 <vdev_storage+1296>) at virtio.c:290
> >>>>> #2 vu_queue_pop (dev=dev@entry=0x555559343a00 <vdev_storage>, vq=vq@entry=0x555559343f10 <vdev_storage+1296>, elem=elem@entry=0x7ffffff6f510) at virtio.c:505
> >>>>> #3 0x0000555555588c8c in vu_collect (vdev=vdev@entry=0x555559343a00 <vdev_storage>, vq=vq@entry=0x555559343f10 <vdev_storage+1296>, elem=elem@entry=0x7ffffff6f510, max_elem=max_elem@entry=1,
> >>>>> size=size@entry=74, frame_size=frame_size@entry=0x0) at vu_common.c:86
> >>>>> #4 0x000055555557e00e in tcp_vu_send_flag (c=0x7ffffff6f7a0, conn=0x5555555bd2d0 <flowtab+2160>, flags=4) at tcp_vu.c:116
> >>>>> #5 0x0000555555578125 in tcp_send_flag (flags=4, conn=0x5555555bd2d0 <flowtab+2160>, c=0x7ffffff6f7a0) at tcp.c:1278
> >>>>> #6 tcp_rst_do (conn=<optimized out>, c=<optimized out>) at tcp.c:1293
> >>>>> #7 tcp_timer_handler (c=c@entry=0x7ffffff6f7a0, ref=..., ref@entry=...) at tcp.c:2266
> >>>>> #8 0x0000555555558f26 in main (argc=<optimized out>, argv=<optimized out>) at passt.c:342
> >>>>> (gdb) p *vq
> >>>>> $1 = {vring = {num = 256, desc = 0x0, avail = 0x0, used = 0x0, log_guest_addr = 4338774592, flags = 0}, last_avail_idx = 35133, shadow_avail_idx = 35133, used_idx = 35133, signalled_used = 0,
> >>>>> signalled_used_valid = false, notification = true, inuse = 0, call_fd = -1, kick_fd = -1, err_fd = -1, enable = 1, started = false, vra = {index = 0, flags = 0, desc_user_addr = 139660501995520,
> >>>>> used_user_addr = 139660502000192, avail_user_addr = 139660501999616, log_guest_addr = 4338774592}}
> >>>>> (gdb) p *vq->vring.avail
> >>>>> Cannot access memory at address 0x0
> >>>>>
> >>>>> ...so we're sending a RST segment to the guest, but the ring doesn't
> >>>>> exist anymore.
> >>>>>
> >>>>> By the way, I still have the gdb session running, if you need something
> >>>>> else out of it.
> >>>>>
> >>>>> Now, I guess we should eventually introduce a more comprehensive
> >>>>> handling of the case where the guest suddenly terminates (not specific
> >>>>> to vhost-user), but given that we have vu_cleanup() working as expected
> >>>>> in this case, I wonder if we shouldn't simply avoid calling
> >>>>> vring_avail_idx() (it has a single caller) by checking for !vring.avail
> >>>>> in the caller, or something like that.
> >>>>>
> >>>>
> >>>> Yes, I think it's the lines I removed during the reviews:
> >>>>
> >>>> if (!vq->vring.avail)
> >>>> return true;
> >>>
> >>> Ah, right:
> >>>
> >>> https://archives.passt.top/passt-dev/20241114163859.7eeafa38@elisabeth/
> >>>
> >>> ...so, at least in our case, it's more than "sanity checks" after all.
> >>> :) Well, I guess it depends on the definition.
> >>>
> >>>> Could you try to checkout virtio.c from v11?
> >>>
> >>> That would take a rather lengthy rebase, but I tried to reintroduce all
> >>> the checks you had:
> >>>
> >>> --
> >>> diff --git a/virtio.c b/virtio.c
> >>> index 6a97435..0598ff4 100644
> >>> --- a/virtio.c
> >>> +++ b/virtio.c
> >>> @@ -284,6 +284,9 @@ static int virtqueue_read_next_desc(const struct vring_desc *desc,
> >>> */
> >>> bool vu_queue_empty(struct vu_virtq *vq)
> >>> {
> >>> + if (!vq->vring.avail)
> >>> + return true;
> >>> +
> >>> if (vq->shadow_avail_idx != vq->last_avail_idx)
> >>> return false;
> >>>
> >>> @@ -327,6 +330,9 @@ static bool vring_can_notify(const struct vu_dev *dev, struct vu_virtq *vq)
> >>> */
> >>> void vu_queue_notify(const struct vu_dev *dev, struct vu_virtq *vq)
> >>> {
> >>> + if (!vq->vring.avail)
> >>> + return;
> >>> +
> >>> if (!vring_can_notify(dev, vq)) {
> >>> debug("vhost-user: virtqueue can skip notify...");
> >>> return;
> >>> @@ -502,6 +508,9 @@ int vu_queue_pop(struct vu_dev *dev, struct vu_virtq *vq, struct vu_virtq_elemen
> >>> unsigned int head;
> >>> int ret;
> >>>
> >>> + if (!vq->vring.avail)
> >>> + return -1;
> >>> +
> >>> if (vu_queue_empty(vq))
> >>> return -1;
> >>>
> >>> @@ -591,6 +600,9 @@ void vu_queue_fill_by_index(struct vu_virtq *vq, unsigned int index,
> >>> {
> >>> struct vring_used_elem uelem;
> >>>
> >>> + if (!vq->vring.avail)
> >>> + return;
> >>> +
> >>> idx = (idx + vq->used_idx) % vq->vring.num;
> >>>
> >>> uelem.id = htole32(index);
> >>> @@ -633,6 +645,9 @@ void vu_queue_flush(struct vu_virtq *vq, unsigned int count)
> >>> {
> >>> uint16_t old, new;
> >>>
> >>> + if (!vq->vring.avail)
> >>> + return;
> >>> +
> >>> /* Make sure buffer is written before we update index. */
> >>> smp_wmb();
> >>>
> >>> --
> >>>
> >>> and it's all fine with those, I tried doing a few nasty things and
> >>> didn't observe any issue.
> >>>
> >>> Any check I missed? Do you want to submit it as follow-up patch? I can
> >>> also do that. I'd rather (still) avoid a re-post of v14 if possible.
> >>
> >> As you prefer. Let me know.
> >
> > It would save me some time if you could... it should be based on v14 as
> > it is.
>
> I can.
>
> >
> > I didn't have time to take care of gcc warnings on 32-bit and of the
> > build failure on musl, yet.
> >
> I will do too.
>
> Do you want them before to merge?
Yes, thanks before the merge would be great.
--
Stefano
^ permalink raw reply [flat|nested] 26+ messages in thread
* [PATCH v14 8/9] test: Add tests for passt in vhost-user mode
2024-11-22 16:43 [PATCH v14 0/9] Add vhost-user support to passt. (part 3) Laurent Vivier
` (6 preceding siblings ...)
2024-11-22 16:43 ` [PATCH v14 7/9] vhost-user: add vhost-user Laurent Vivier
@ 2024-11-22 16:43 ` Laurent Vivier
2024-11-22 16:43 ` [PATCH v14 9/9] tcp: Move tcp_l2_buf_fill_headers() to tcp_buf.c Laurent Vivier
2024-11-27 16:21 ` [PATCH v14 0/9] Add vhost-user support to passt. (part 3) Stefano Brivio
9 siblings, 0 replies; 26+ messages in thread
From: Laurent Vivier @ 2024-11-22 16:43 UTC (permalink / raw)
To: passt-dev; +Cc: Stefano Brivio, Laurent Vivier, David Gibson
From: Stefano Brivio <sbrivio@redhat.com>
Run functional and performance tests for vhost-user mode as well. For
functional tests, we add passt_vu and passt_vu_in_ns as symbolic links
to their non-vhost-user counterparts, as no differences are intended
but we want to distinguish them in test logs.
For performance tests, instead, we add separate perf/passt_vu_tcp and
perf/passt_vu_udp files, as we need longer test duration, as well as
higher UDP sending bandwidths and larger TCP windows, to actually get
the highest throughput vhost-user mode offers.
For valgrind tests, vhost-user mode needs two extra system calls:
statx and readlink. Add them as EXTRA_SYSCALLS for the valgrind
target.
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
---
Makefile | 3 +-
test/lib/perf_report | 15 +++
test/lib/setup | 77 ++++++++++++---
test/lib/setup_ugly | 2 +-
test/passt_vu | 1 +
test/passt_vu_in_ns | 1 +
test/perf/passt_vu_tcp | 211 +++++++++++++++++++++++++++++++++++++++++
test/perf/passt_vu_udp | 159 +++++++++++++++++++++++++++++++
test/run | 25 +++++
test/two_guests_vu | 1 +
10 files changed, 479 insertions(+), 16 deletions(-)
create mode 120000 test/passt_vu
create mode 120000 test/passt_vu_in_ns
create mode 100644 test/perf/passt_vu_tcp
create mode 100644 test/perf/passt_vu_udp
create mode 120000 test/two_guests_vu
diff --git a/Makefile b/Makefile
index faa5c23346ac..cb7448079de5 100644
--- a/Makefile
+++ b/Makefile
@@ -101,7 +101,8 @@ qrap: $(QRAP_SRCS) passt.h
valgrind: EXTRA_SYSCALLS += rt_sigprocmask rt_sigtimedwait rt_sigaction \
rt_sigreturn getpid gettid kill clock_gettime mmap \
- mmap2 munmap open unlink gettimeofday futex
+ mmap2 munmap open unlink gettimeofday futex statx \
+ readlink
valgrind: FLAGS += -g -DVALGRIND
valgrind: all
diff --git a/test/lib/perf_report b/test/lib/perf_report
index d1ef50bfe0d5..c4ec817bcd1e 100755
--- a/test/lib/perf_report
+++ b/test/lib/perf_report
@@ -49,6 +49,21 @@ td:empty { visibility: hidden; }
__passt_tcp_LINE__ __passt_udp_LINE__
</table>
+</li><li><p>passt with vhost-user support</p>
+<table class="passt" width="70%">
+ <tr>
+ <th/>
+ <th id="perf_passt_vu_tcp" colspan="__passt_vu_tcp_cols__">TCP, __passt_vu_tcp_threads__ at __passt_vu_tcp_freq__ GHz</th>
+ <th id="perf_passt_vu_udp" colspan="__passt_vu_udp_cols__">UDP, __passt_vu_udp_threads__ at __passt_vu_udp_freq__ GHz</th>
+ </tr>
+ <tr>
+ <td align="right">MTU:</td>
+ __passt_vu_tcp_header__
+ __passt_vu_udp_header__
+ </tr>
+ __passt_vu_tcp_LINE__ __passt_vu_udp_LINE__
+</table>
+
<style type="text/CSS">
table.pasta_local td { border: 0px solid; padding: 6px; line-height: 1; }
table.pasta_local td { text-align: right; }
diff --git a/test/lib/setup b/test/lib/setup
index 5338393ce35c..580825f1f9a7 100755
--- a/test/lib/setup
+++ b/test/lib/setup
@@ -15,8 +15,7 @@
INITRAMFS="${BASEPATH}/mbuto.img"
VCPUS="$( [ $(nproc) -ge 8 ] && echo 6 || echo $(( $(nproc) / 2 + 1 )) )"
-__mem_kib="$(sed -n 's/MemTotal:[ ]*\([0-9]*\) kB/\1/p' /proc/meminfo)"
-VMEM="$((${__mem_kib} / 1024 / 4))"
+MEM_KIB="$(sed -n 's/MemTotal:[ ]*\([0-9]*\) kB/\1/p' /proc/meminfo)"
QEMU_ARCH="$(uname -m)"
[ "${QEMU_ARCH}" = "i686" ] && QEMU_ARCH=i386
@@ -46,6 +45,7 @@ setup_passt() {
[ ${PCAP} -eq 1 ] && __opts="${__opts} -p ${LOGDIR}/passt.pcap"
[ ${DEBUG} -eq 1 ] && __opts="${__opts} -d"
[ ${TRACE} -eq 1 ] && __opts="${__opts} --trace"
+ [ ${VHOST_USER} -eq 1 ] && __opts="${__opts} --vhost-user"
context_run passt "make clean"
context_run passt "make valgrind"
@@ -54,16 +54,29 @@ setup_passt() {
# pidfile isn't created until passt is listening
wait_for [ -f "${STATESETUP}/passt.pid" ]
+ __vmem="$((${MEM_KIB} / 1024 / 4))"
+ if [ ${VHOST_USER} -eq 1 ]; then
+ __vmem="$(((${__vmem} + 500) / 1000))G"
+ __qemu_netdev=" \
+ -chardev socket,id=c,path=${STATESETUP}/passt.socket \
+ -netdev vhost-user,id=v,chardev=c \
+ -device virtio-net,netdev=v \
+ -object memory-backend-memfd,id=m,share=on,size=${__vmem} \
+ -numa node,memdev=m"
+ else
+ __qemu_netdev="-device virtio-net-pci,netdev=s \
+ -netdev stream,id=s,server=off,addr.type=unix,addr.path=${STATESETUP}/passt.socket"
+ fi
+
GUEST_CID=94557
context_run_bg qemu 'qemu-system-'"${QEMU_ARCH}" \
' -machine accel=kvm' \
- ' -m '${VMEM}' -cpu host -smp '${VCPUS} \
+ ' -m '${__vmem}' -cpu host -smp '${VCPUS} \
' -kernel '"${KERNEL}" \
' -initrd '${INITRAMFS}' -nographic -serial stdio' \
' -nodefaults' \
' -append "console=ttyS0 mitigations=off apparmor=0" ' \
- ' -device virtio-net-pci,netdev=s0 ' \
- " -netdev stream,id=s0,server=off,addr.type=unix,addr.path=${STATESETUP}/passt.socket " \
+ " ${__qemu_netdev}" \
" -pidfile ${STATESETUP}/qemu.pid" \
" -device vhost-vsock-pci,guest-cid=$GUEST_CID"
@@ -142,6 +155,7 @@ setup_passt_in_ns() {
[ ${PCAP} -eq 1 ] && __opts="${__opts} -p ${LOGDIR}/passt_in_pasta.pcap"
[ ${DEBUG} -eq 1 ] && __opts="${__opts} -d"
[ ${TRACE} -eq 1 ] && __opts="${__opts} --trace"
+ [ ${VHOST_USER} -eq 1 ] && __opts="${__opts} --vhost-user"
if [ ${VALGRIND} -eq 1 ]; then
context_run passt "make clean"
@@ -154,17 +168,30 @@ setup_passt_in_ns() {
fi
wait_for [ -f "${STATESETUP}/passt.pid" ]
+ __vmem="$((${MEM_KIB} / 1024 / 4))"
+ if [ ${VHOST_USER} -eq 1 ]; then
+ __vmem="$(((${__vmem} + 500) / 1000))G"
+ __qemu_netdev=" \
+ -chardev socket,id=c,path=${STATESETUP}/passt.socket \
+ -netdev vhost-user,id=v,chardev=c \
+ -device virtio-net,netdev=v \
+ -object memory-backend-memfd,id=m,share=on,size=${__vmem} \
+ -numa node,memdev=m"
+ else
+ __qemu_netdev="-device virtio-net-pci,netdev=s \
+ -netdev stream,id=s,server=off,addr.type=unix,addr.path=${STATESETUP}/passt.socket"
+ fi
+
GUEST_CID=94557
context_run_bg qemu 'qemu-system-'"${QEMU_ARCH}" \
' -machine accel=kvm' \
' -M accel=kvm:tcg' \
- ' -m '${VMEM}' -cpu host -smp '${VCPUS} \
+ ' -m '${__vmem}' -cpu host -smp '${VCPUS} \
' -kernel '"${KERNEL}" \
' -initrd '${INITRAMFS}' -nographic -serial stdio' \
' -nodefaults' \
' -append "console=ttyS0 mitigations=off apparmor=0" ' \
- ' -device virtio-net-pci,netdev=s0 ' \
- " -netdev stream,id=s0,server=off,addr.type=unix,addr.path=${STATESETUP}/passt.socket " \
+ " ${__qemu_netdev}" \
" -pidfile ${STATESETUP}/qemu.pid" \
" -device vhost-vsock-pci,guest-cid=$GUEST_CID"
@@ -214,6 +241,7 @@ setup_two_guests() {
[ ${PCAP} -eq 1 ] && __opts="${__opts} -p ${LOGDIR}/passt_1.pcap"
[ ${DEBUG} -eq 1 ] && __opts="${__opts} -d"
[ ${TRACE} -eq 1 ] && __opts="${__opts} --trace"
+ [ ${VHOST_USER} -eq 1 ] && __opts="${__opts} --vhost-user"
context_run_bg passt_1 "./passt -s ${STATESETUP}/passt_1.socket -P ${STATESETUP}/passt_1.pid -f ${__opts} -t 10001 -u 10001"
wait_for [ -f "${STATESETUP}/passt_1.pid" ]
@@ -222,33 +250,54 @@ setup_two_guests() {
[ ${PCAP} -eq 1 ] && __opts="${__opts} -p ${LOGDIR}/passt_2.pcap"
[ ${DEBUG} -eq 1 ] && __opts="${__opts} -d"
[ ${TRACE} -eq 1 ] && __opts="${__opts} --trace"
+ [ ${VHOST_USER} -eq 1 ] && __opts="${__opts} --vhost-user"
context_run_bg passt_2 "./passt -s ${STATESETUP}/passt_2.socket -P ${STATESETUP}/passt_2.pid -f ${__opts} -t 10004 -u 10004"
wait_for [ -f "${STATESETUP}/passt_2.pid" ]
+ __vmem="$((${MEM_KIB} / 1024 / 4))"
+ if [ ${VHOST_USER} -eq 1 ]; then
+ __vmem="$(((${__vmem} + 500) / 1000))G"
+ __qemu_netdev1=" \
+ -chardev socket,id=c,path=${STATESETUP}/passt_1.socket \
+ -netdev vhost-user,id=v,chardev=c \
+ -device virtio-net,netdev=v \
+ -object memory-backend-memfd,id=m,share=on,size=${__vmem} \
+ -numa node,memdev=m"
+ __qemu_netdev2=" \
+ -chardev socket,id=c,path=${STATESETUP}/passt_2.socket \
+ -netdev vhost-user,id=v,chardev=c \
+ -device virtio-net,netdev=v \
+ -object memory-backend-memfd,id=m,share=on,size=${__vmem} \
+ -numa node,memdev=m"
+ else
+ __qemu_netdev1="-device virtio-net-pci,netdev=s \
+ -netdev stream,id=s,server=off,addr.type=unix,addr.path=${STATESETUP}/passt_1.socket"
+ __qemu_netdev2="-device virtio-net-pci,netdev=s \
+ -netdev stream,id=s,server=off,addr.type=unix,addr.path=${STATESETUP}/passt_2.socket"
+ fi
+
GUEST_1_CID=94557
context_run_bg qemu_1 'qemu-system-'"${QEMU_ARCH}" \
' -M accel=kvm:tcg' \
- ' -m '${VMEM}' -cpu host -smp '${VCPUS} \
+ ' -m '${__vmem}' -cpu host -smp '${VCPUS} \
' -kernel '"${KERNEL}" \
' -initrd '${INITRAMFS}' -nographic -serial stdio' \
' -nodefaults' \
' -append "console=ttyS0 mitigations=off apparmor=0" ' \
- ' -device virtio-net-pci,netdev=s0 ' \
- " -netdev stream,id=s0,server=off,addr.type=unix,addr.path=${STATESETUP}/passt_1.socket " \
+ " ${__qemu_netdev1}" \
" -pidfile ${STATESETUP}/qemu_1.pid" \
" -device vhost-vsock-pci,guest-cid=$GUEST_1_CID"
GUEST_2_CID=94558
context_run_bg qemu_2 'qemu-system-'"${QEMU_ARCH}" \
' -M accel=kvm:tcg' \
- ' -m '${VMEM}' -cpu host -smp '${VCPUS} \
+ ' -m '${__vmem}' -cpu host -smp '${VCPUS} \
' -kernel '"${KERNEL}" \
' -initrd '${INITRAMFS}' -nographic -serial stdio' \
' -nodefaults' \
' -append "console=ttyS0 mitigations=off apparmor=0" ' \
- ' -device virtio-net-pci,netdev=s0 ' \
- " -netdev stream,id=s0,server=off,addr.type=unix,addr.path=${STATESETUP}/passt_2.socket " \
+ " ${__qemu_netdev2}" \
" -pidfile ${STATESETUP}/qemu_2.pid" \
" -device vhost-vsock-pci,guest-cid=$GUEST_2_CID"
diff --git a/test/lib/setup_ugly b/test/lib/setup_ugly
index 4b2a0774de1d..2802cc3bb43b 100755
--- a/test/lib/setup_ugly
+++ b/test/lib/setup_ugly
@@ -33,7 +33,7 @@ setup_memory() {
pane_or_context_run guest 'qemu-system-$(uname -m)' \
' -machine accel=kvm' \
- ' -m '${VMEM}' -cpu host -smp '${VCPUS} \
+ ' -m '$((${MEM_KIB} / 1024 / 4))' -cpu host -smp '${VCPUS} \
' -kernel ' "/boot/vmlinuz-$(uname -r)" \
' -initrd '${INITRAMFS_MEM}' -nographic -serial stdio' \
' -nodefaults' \
diff --git a/test/passt_vu b/test/passt_vu
new file mode 120000
index 000000000000..22f1840d1ad6
--- /dev/null
+++ b/test/passt_vu
@@ -0,0 +1 @@
+passt
\ No newline at end of file
diff --git a/test/passt_vu_in_ns b/test/passt_vu_in_ns
new file mode 120000
index 000000000000..3ff479e0436b
--- /dev/null
+++ b/test/passt_vu_in_ns
@@ -0,0 +1 @@
+passt_in_ns
\ No newline at end of file
diff --git a/test/perf/passt_vu_tcp b/test/perf/passt_vu_tcp
new file mode 100644
index 000000000000..b43400804e64
--- /dev/null
+++ b/test/perf/passt_vu_tcp
@@ -0,0 +1,211 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# PASST - Plug A Simple Socket Transport
+# for qemu/UNIX domain socket mode
+#
+# PASTA - Pack A Subtle Tap Abstraction
+# for network namespace/tap device mode
+#
+# test/perf/passt_vu_tcp - Check TCP performance in passt vhost-user mode
+#
+# Copyright (c) 2021 Red Hat GmbH
+# Author: Stefano Brivio <sbrivio@redhat.com>
+
+gtools /sbin/sysctl ip jq nproc seq sleep iperf3 tcp_rr tcp_crr # From neper
+nstools /sbin/sysctl ip jq nproc seq sleep iperf3 tcp_rr tcp_crr
+htools bc head sed seq
+
+set MAP_NS4 192.0.2.2
+set MAP_NS6 2001:db8:9a55::2
+
+test passt: throughput and latency
+
+guest /sbin/sysctl -w net.core.rmem_max=536870912
+guest /sbin/sysctl -w net.core.wmem_max=536870912
+guest /sbin/sysctl -w net.core.rmem_default=33554432
+guest /sbin/sysctl -w net.core.wmem_default=33554432
+guest /sbin/sysctl -w net.ipv4.tcp_rmem="4096 131072 268435456"
+guest /sbin/sysctl -w net.ipv4.tcp_wmem="4096 131072 268435456"
+guest /sbin/sysctl -w net.ipv4.tcp_timestamps=0
+
+ns /sbin/sysctl -w net.ipv4.tcp_rmem="4096 524288 134217728"
+ns /sbin/sysctl -w net.ipv4.tcp_wmem="4096 524288 134217728"
+ns /sbin/sysctl -w net.ipv4.tcp_timestamps=0
+
+gout IFNAME ip -j link show | jq -rM '.[] | select(.link_type == "ether").ifname'
+
+hout FREQ_PROCFS (echo "scale=1"; sed -n 's/cpu MHz.*: \([0-9]*\)\..*$/(\1+10^2\/2)\/10^3/p' /proc/cpuinfo) | bc -l | head -n1
+hout FREQ_CPUFREQ (echo "scale=1"; printf '( %i + 10^5 / 2 ) / 10^6\n' $(cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq) ) | bc -l
+hout FREQ [ -n "__FREQ_CPUFREQ__" ] && echo __FREQ_CPUFREQ__ || echo __FREQ_PROCFS__
+
+set THREADS 4
+set TIME 5
+set OMIT 0.1
+set OPTS -Z -P __THREADS__ -l 1M -O__OMIT__ -N
+
+info Throughput in Gbps, latency in µs, __THREADS__ threads at __FREQ__ GHz
+report passt_vu tcp __THREADS__ __FREQ__
+
+th MTU 256B 576B 1280B 1500B 9000B 65520B
+
+
+tr TCP throughput over IPv6: guest to host
+iperf3s ns 10002
+
+bw -
+bw -
+guest ip link set dev __IFNAME__ mtu 1280
+iperf3 BW guest __MAP_NS6__ 10002 __TIME__ __OPTS__ -w 16M
+bw __BW__ 1.2 1.5
+guest ip link set dev __IFNAME__ mtu 1500
+iperf3 BW guest __MAP_NS6__ 10002 __TIME__ __OPTS__ -w 32M
+bw __BW__ 1.6 1.8
+guest ip link set dev __IFNAME__ mtu 9000
+iperf3 BW guest __MAP_NS6__ 10002 __TIME__ __OPTS__ -w 64M
+bw __BW__ 4.0 5.0
+guest ip link set dev __IFNAME__ mtu 65520
+iperf3 BW guest __MAP_NS6__ 10002 __TIME__ __OPTS__ -w 64M
+bw __BW__ 7.0 8.0
+
+iperf3k ns
+
+tl TCP RR latency over IPv6: guest to host
+lat -
+lat -
+lat -
+lat -
+lat -
+nsb tcp_rr --nolog -6
+gout LAT tcp_rr --nolog -l1 -6 -c -H __MAP_NS6__ | sed -n 's/^throughput=\(.*\)/\1/p'
+lat __LAT__ 200 150
+
+tl TCP CRR latency over IPv6: guest to host
+lat -
+lat -
+lat -
+lat -
+lat -
+nsb tcp_crr --nolog -6
+gout LAT tcp_crr --nolog -l1 -6 -c -H __MAP_NS6__ | sed -n 's/^throughput=\(.*\)/\1/p'
+lat __LAT__ 500 400
+
+tr TCP throughput over IPv4: guest to host
+iperf3s ns 10002
+
+guest ip link set dev __IFNAME__ mtu 256
+iperf3 BW guest __MAP_NS4__ 10002 __TIME__ __OPTS__ -w 2M
+bw __BW__ 0.2 0.3
+guest ip link set dev __IFNAME__ mtu 576
+iperf3 BW guest __MAP_NS4__ 10002 __TIME__ __OPTS__ -w 4M
+bw __BW__ 0.5 0.8
+guest ip link set dev __IFNAME__ mtu 1280
+iperf3 BW guest __MAP_NS4__ 10002 __TIME__ __OPTS__ -w 8M
+bw __BW__ 1.2 1.5
+guest ip link set dev __IFNAME__ mtu 1500
+iperf3 BW guest __MAP_NS4__ 10002 __TIME__ __OPTS__ -w 16M
+bw __BW__ 1.6 1.8
+guest ip link set dev __IFNAME__ mtu 9000
+iperf3 BW guest __MAP_NS4__ 10002 __TIME__ __OPTS__ -w 64M
+bw __BW__ 4.0 5.0
+guest ip link set dev __IFNAME__ mtu 65520
+iperf3 BW guest __MAP_NS4__ 10002 __TIME__ __OPTS__ -w 64M
+bw __BW__ 7.0 8.0
+
+iperf3k ns
+
+# Reducing MTU below 1280 deconfigures IPv6, get our address back
+guest dhclient -6 -x
+guest dhclient -6 __IFNAME__
+
+tl TCP RR latency over IPv4: guest to host
+lat -
+lat -
+lat -
+lat -
+lat -
+nsb tcp_rr --nolog -4
+gout LAT tcp_rr --nolog -l1 -4 -c -H __MAP_NS4__ | sed -n 's/^throughput=\(.*\)/\1/p'
+lat __LAT__ 200 150
+
+tl TCP CRR latency over IPv4: guest to host
+lat -
+lat -
+lat -
+lat -
+lat -
+nsb tcp_crr --nolog -4
+gout LAT tcp_crr --nolog -l1 -4 -c -H __MAP_NS4__ | sed -n 's/^throughput=\(.*\)/\1/p'
+lat __LAT__ 500 400
+
+tr TCP throughput over IPv6: host to guest
+iperf3s guest 10001
+
+bw -
+bw -
+bw -
+bw -
+bw -
+iperf3 BW ns ::1 10001 __TIME__ __OPTS__ -w 32M
+bw __BW__ 6.0 6.8
+
+iperf3k guest
+
+tl TCP RR latency over IPv6: host to guest
+lat -
+lat -
+lat -
+lat -
+lat -
+guestb tcp_rr --nolog -P 10001 -C 10011 -6
+sleep 1
+nsout LAT tcp_rr --nolog -l1 -P 10001 -C 10011 -6 -c -H ::1 | sed -n 's/^throughput=\(.*\)/\1/p'
+lat __LAT__ 200 150
+
+tl TCP CRR latency over IPv6: host to guest
+lat -
+lat -
+lat -
+lat -
+lat -
+guestb tcp_crr --nolog -P 10001 -C 10011 -6
+sleep 1
+nsout LAT tcp_crr --nolog -l1 -P 10001 -C 10011 -6 -c -H ::1 | sed -n 's/^throughput=\(.*\)/\1/p'
+lat __LAT__ 500 350
+
+
+tr TCP throughput over IPv4: host to guest
+iperf3s guest 10001
+
+bw -
+bw -
+bw -
+bw -
+bw -
+iperf3 BW ns 127.0.0.1 10001 __TIME__ __OPTS__ -w 32M
+bw __BW__ 6.0 6.8
+
+iperf3k guest
+
+tl TCP RR latency over IPv4: host to guest
+lat -
+lat -
+lat -
+lat -
+lat -
+guestb tcp_rr --nolog -P 10001 -C 10011 -4
+sleep 1
+nsout LAT tcp_rr --nolog -l1 -P 10001 -C 10011 -4 -c -H 127.0.0.1 | sed -n 's/^throughput=\(.*\)/\1/p'
+lat __LAT__ 200 150
+
+tl TCP CRR latency over IPv6: host to guest
+lat -
+lat -
+lat -
+lat -
+lat -
+guestb tcp_crr --nolog -P 10001 -C 10011 -4
+sleep 1
+nsout LAT tcp_crr --nolog -l1 -P 10001 -C 10011 -4 -c -H 127.0.0.1 | sed -n 's/^throughput=\(.*\)/\1/p'
+lat __LAT__ 500 300
+
+te
diff --git a/test/perf/passt_vu_udp b/test/perf/passt_vu_udp
new file mode 100644
index 000000000000..943ac11b4a51
--- /dev/null
+++ b/test/perf/passt_vu_udp
@@ -0,0 +1,159 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# PASST - Plug A Simple Socket Transport
+# for qemu/UNIX domain socket mode
+#
+# PASTA - Pack A Subtle Tap Abstraction
+# for network namespace/tap device mode
+#
+# test/perf/passt_vu_udp - Check UDP performance in passt vhost-user mode
+#
+# Copyright (c) 2021 Red Hat GmbH
+# Author: Stefano Brivio <sbrivio@redhat.com>
+
+gtools /sbin/sysctl ip jq nproc sleep iperf3 udp_rr # From neper
+nstools ip jq sleep iperf3 udp_rr
+htools bc head sed
+
+set MAP_NS4 192.0.2.2
+set MAP_NS6 2001:db8:9a55::2
+
+test passt: throughput and latency
+
+guest /sbin/sysctl -w net.core.rmem_max=16777216
+guest /sbin/sysctl -w net.core.wmem_max=16777216
+guest /sbin/sysctl -w net.core.rmem_default=16777216
+guest /sbin/sysctl -w net.core.wmem_default=16777216
+
+hout FREQ_PROCFS (echo "scale=1"; sed -n 's/cpu MHz.*: \([0-9]*\)\..*$/(\1+10^2\/2)\/10^3/p' /proc/cpuinfo) | bc -l | head -n1
+hout FREQ_CPUFREQ (echo "scale=1"; printf '( %i + 10^5 / 2 ) / 10^6\n' $(cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq) ) | bc -l
+hout FREQ [ -n "__FREQ_CPUFREQ__" ] && echo __FREQ_CPUFREQ__ || echo __FREQ_PROCFS__
+
+set THREADS 2
+set TIME 1
+set OPTS -u -P __THREADS__ --pacing-timer 1000
+
+info Throughput in Gbps, latency in µs, __THREADS__ threads at __FREQ__ GHz
+
+report passt_vu udp __THREADS__ __FREQ__
+
+th pktlen 256B 576B 1280B 1500B 9000B 65520B
+
+tr UDP throughput over IPv6: guest to host
+iperf3s ns 10002
+# (datagram size) = (packet size) - 48: 40 bytes of IPv6 header, 8 of UDP header
+
+bw -
+bw -
+iperf3 BW guest __MAP_NS6__ 10002 __TIME__ __OPTS__ -b 3G -l 1232
+bw __BW__ 0.8 1.2
+iperf3 BW guest __MAP_NS6__ 10002 __TIME__ __OPTS__ -b 4G -l 1452
+bw __BW__ 1.0 1.5
+iperf3 BW guest __MAP_NS6__ 10002 __TIME__ __OPTS__ -b 10G -l 8952
+bw __BW__ 4.0 5.0
+iperf3 BW guest __MAP_NS6__ 10002 __TIME__ __OPTS__ -b 20G -l 64372
+bw __BW__ 4.0 5.0
+
+iperf3k ns
+
+tl UDP RR latency over IPv6: guest to host
+lat -
+lat -
+lat -
+lat -
+lat -
+nsb udp_rr --nolog -6
+gout LAT udp_rr --nolog -6 -c -H __MAP_NS6__ | sed -n 's/^throughput=\(.*\)/\1/p'
+lat __LAT__ 200 150
+
+
+tr UDP throughput over IPv4: guest to host
+iperf3s ns 10002
+# (datagram size) = (packet size) - 28: 20 bytes of IPv4 header, 8 of UDP header
+
+iperf3 BW guest __MAP_NS4__ 10002 __TIME__ __OPTS__ -b 1G -l 228
+bw __BW__ 0.0 0.0
+iperf3 BW guest __MAP_NS4__ 10002 __TIME__ __OPTS__ -b 2G -l 548
+bw __BW__ 0.4 0.6
+iperf3 BW guest __MAP_NS4__ 10002 __TIME__ __OPTS__ -b 3G -l 1252
+bw __BW__ 0.8 1.2
+iperf3 BW guest __MAP_NS4__ 10002 __TIME__ __OPTS__ -b 4G -l 1472
+bw __BW__ 1.0 1.5
+iperf3 BW guest __MAP_NS4__ 10002 __TIME__ __OPTS__ -b 10G -l 8972
+bw __BW__ 4.0 5.0
+iperf3 BW guest __MAP_NS4__ 10002 __TIME__ __OPTS__ -b 20G -l 65492
+bw __BW__ 4.0 5.0
+
+iperf3k ns
+
+tl UDP RR latency over IPv4: guest to host
+lat -
+lat -
+lat -
+lat -
+lat -
+nsb udp_rr --nolog -4
+gout LAT udp_rr --nolog -4 -c -H __MAP_NS4__ | sed -n 's/^throughput=\(.*\)/\1/p'
+lat __LAT__ 200 150
+
+
+tr UDP throughput over IPv6: host to guest
+iperf3s guest 10001
+# (datagram size) = (packet size) - 48: 40 bytes of IPv6 header, 8 of UDP header
+
+bw -
+bw -
+iperf3 BW ns ::1 10001 __TIME__ __OPTS__ -b 3G -l 1232
+bw __BW__ 0.8 1.2
+iperf3 BW ns ::1 10001 __TIME__ __OPTS__ -b 4G -l 1452
+bw __BW__ 1.0 1.5
+iperf3 BW ns ::1 10001 __TIME__ __OPTS__ -b 10G -l 8952
+bw __BW__ 3.0 4.0
+iperf3 BW ns ::1 10001 __TIME__ __OPTS__ -b 20G -l 64372
+bw __BW__ 3.0 4.0
+
+iperf3k guest
+
+tl UDP RR latency over IPv6: host to guest
+lat -
+lat -
+lat -
+lat -
+lat -
+guestb udp_rr --nolog -P 10001 -C 10011 -6
+sleep 1
+nsout LAT udp_rr --nolog -P 10001 -C 10011 -6 -c -H ::1 | sed -n 's/^throughput=\(.*\)/\1/p'
+lat __LAT__ 200 150
+
+
+tr UDP throughput over IPv4: host to guest
+iperf3s guest 10001
+# (datagram size) = (packet size) - 28: 20 bytes of IPv4 header, 8 of UDP header
+
+iperf3 BW ns 127.0.0.1 10001 __TIME__ __OPTS__ -b 1G -l 228
+bw __BW__ 0.0 0.0
+iperf3 BW ns 127.0.0.1 10001 __TIME__ __OPTS__ -b 2G -l 548
+bw __BW__ 0.4 0.6
+iperf3 BW ns 127.0.0.1 10001 __TIME__ __OPTS__ -b 3G -l 1252
+bw __BW__ 0.8 1.2
+iperf3 BW ns 127.0.0.1 10001 __TIME__ __OPTS__ -b 4G -l 1472
+bw __BW__ 1.0 1.5
+iperf3 BW ns 127.0.0.1 10001 __TIME__ __OPTS__ -b 10G -l 8972
+bw __BW__ 3.0 4.0
+iperf3 BW ns 127.0.0.1 10001 __TIME__ __OPTS__ -b 20G -l 65492
+bw __BW__ 3.0 4.0
+
+iperf3k guest
+
+tl UDP RR latency over IPv4: host to guest
+lat -
+lat -
+lat -
+lat -
+lat -
+guestb udp_rr --nolog -P 10001 -C 10011 -4
+sleep 1
+nsout LAT udp_rr --nolog -P 10001 -C 10011 -4 -c -H 127.0.0.1 | sed -n 's/^throughput=\(.*\)/\1/p'
+lat __LAT__ 200 150
+
+te
diff --git a/test/run b/test/run
index 547a729b3fbe..f188d8eaf2e0 100755
--- a/test/run
+++ b/test/run
@@ -93,6 +93,7 @@ run() {
test memory/passt
teardown memory
+ VHOST_USER=0
setup passt
test passt/ndp
test passt/dhcp
@@ -115,7 +116,22 @@ run() {
test two_guests/basic
teardown two_guests
+ VHOST_USER=1
+ setup passt_in_ns
+ test passt_vu/ndp
+ test passt_vu_in_ns/dhcp
+ test passt_vu_in_ns/icmp
+ test passt_vu_in_ns/tcp
+ test passt_vu_in_ns/udp
+ test passt_vu_in_ns/shutdown
+ teardown passt_in_ns
+
+ setup two_guests
+ test two_guests_vu/basic
+ teardown two_guests
+
VALGRIND=0
+ VHOST_USER=0
setup passt_in_ns
test passt/ndp
test passt_in_ns/dhcp
@@ -126,6 +142,15 @@ run() {
test passt_in_ns/shutdown
teardown passt_in_ns
+ VHOST_USER=1
+ setup passt_in_ns
+ test passt_vu/ndp
+ test passt_vu_in_ns/dhcp
+ test perf/passt_vu_tcp
+ test perf/passt_vu_udp
+ test passt_vu_in_ns/shutdown
+ teardown passt_in_ns
+
# TODO: Make those faster by at least pre-installing gcc and make on
# non-x86 images, then re-enable.
skip_distro() {
diff --git a/test/two_guests_vu b/test/two_guests_vu
new file mode 120000
index 000000000000..a8648fc7c9fb
--- /dev/null
+++ b/test/two_guests_vu
@@ -0,0 +1 @@
+two_guests
\ No newline at end of file
--
@@ -0,0 +1 @@
+two_guests
\ No newline at end of file
--
2.47.0
^ permalink raw reply related [flat|nested] 26+ messages in thread
* [PATCH v14 9/9] tcp: Move tcp_l2_buf_fill_headers() to tcp_buf.c
2024-11-22 16:43 [PATCH v14 0/9] Add vhost-user support to passt. (part 3) Laurent Vivier
` (7 preceding siblings ...)
2024-11-22 16:43 ` [PATCH v14 8/9] test: Add tests for passt in vhost-user mode Laurent Vivier
@ 2024-11-22 16:43 ` Laurent Vivier
2024-11-27 16:21 ` [PATCH v14 0/9] Add vhost-user support to passt. (part 3) Stefano Brivio
9 siblings, 0 replies; 26+ messages in thread
From: Laurent Vivier @ 2024-11-22 16:43 UTC (permalink / raw)
To: passt-dev; +Cc: David Gibson, Laurent Vivier
From: David Gibson <david@gibson.dropbear.id.au>
This function only has callers in tcp_buf.c. More importantly, it's
inherently tied to the "buf" path, because it uses internal knowledge of
how we lay out the various headers across our locally allocated buffers.
Therefore, move it to tcp_buf.c.
Slightly reformat the prototypes while we're at it.
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
---
tcp.c | 57 +++++++-------------------------------------------
tcp_buf.c | 39 ++++++++++++++++++++++++++++++----
tcp_internal.h | 22 ++++++++-----------
tcp_vu.c | 16 ++++++--------
4 files changed, 58 insertions(+), 76 deletions(-)
diff --git a/tcp.c b/tcp.c
index 2b547876d58a..c95dcaf97721 100644
--- a/tcp.c
+++ b/tcp.c
@@ -976,14 +976,11 @@ static void tcp_fill_header(struct tcphdr *th,
* @check: Checksum, if already known
* @seq: Sequence number for this segment
* @no_tcp_csum: Do not set TCP checksum
- *
- * Return: The IPv4 payload length, host order
*/
-size_t tcp_fill_headers4(const struct tcp_tap_conn *conn,
- struct tap_hdr *taph,
- struct iphdr *iph, struct tcp_payload_t *bp,
- size_t dlen, const uint16_t *check,
- uint32_t seq, bool no_tcp_csum)
+void tcp_fill_headers4(const struct tcp_tap_conn *conn,
+ struct tap_hdr *taph, struct iphdr *iph,
+ struct tcp_payload_t *bp, size_t dlen,
+ const uint16_t *check, uint32_t seq, bool no_tcp_csum)
{
const struct flowside *tapside = TAPFLOW(conn);
const struct in_addr *src4 = inany_v4(&tapside->oaddr);
@@ -1014,8 +1011,6 @@ size_t tcp_fill_headers4(const struct tcp_tap_conn *conn,
}
tap_hdr_update(taph, l3len + sizeof(struct ethhdr));
-
- return l4len;
}
/**
@@ -1028,13 +1023,11 @@ size_t tcp_fill_headers4(const struct tcp_tap_conn *conn,
* @check: Checksum, if already known
* @seq: Sequence number for this segment
* @no_tcp_csum: Do not set TCP checksum
- *
- * Return: The IPv6 payload length, host order
*/
-size_t tcp_fill_headers6(const struct tcp_tap_conn *conn,
- struct tap_hdr *taph,
- struct ipv6hdr *ip6h, struct tcp_payload_t *bp,
- size_t dlen, uint32_t seq, bool no_tcp_csum)
+void tcp_fill_headers6(const struct tcp_tap_conn *conn,
+ struct tap_hdr *taph, struct ipv6hdr *ip6h,
+ struct tcp_payload_t *bp, size_t dlen,
+ uint32_t seq, bool no_tcp_csum)
{
const struct flowside *tapside = TAPFLOW(conn);
size_t l4len = dlen + sizeof(bp->th);
@@ -1065,40 +1058,6 @@ size_t tcp_fill_headers6(const struct tcp_tap_conn *conn,
}
tap_hdr_update(taph, l4len + sizeof(*ip6h) + sizeof(struct ethhdr));
-
- return l4len;
-}
-
-/**
- * tcp_l2_buf_fill_headers() - Fill 802.3, IP, TCP headers in pre-cooked buffers
- * @conn: Connection pointer
- * @iov: Pointer to an array of iovec of TCP pre-cooked buffers
- * @dlen: TCP payload length
- * @check: Checksum, if already known
- * @seq: Sequence number for this segment
- * @no_tcp_csum: Do not set TCP checksum
- *
- * Return: IP payload length, host order
- */
-size_t tcp_l2_buf_fill_headers(const struct tcp_tap_conn *conn,
- struct iovec *iov, size_t dlen,
- const uint16_t *check, uint32_t seq,
- bool no_tcp_csum)
-{
- const struct flowside *tapside = TAPFLOW(conn);
- const struct in_addr *a4 = inany_v4(&tapside->oaddr);
-
- if (a4) {
- return tcp_fill_headers4(conn, iov[TCP_IOV_TAP].iov_base,
- iov[TCP_IOV_IP].iov_base,
- iov[TCP_IOV_PAYLOAD].iov_base, dlen,
- check, seq, no_tcp_csum);
- }
-
- return tcp_fill_headers6(conn, iov[TCP_IOV_TAP].iov_base,
- iov[TCP_IOV_IP].iov_base,
- iov[TCP_IOV_PAYLOAD].iov_base, dlen,
- seq, no_tcp_csum);
}
/**
diff --git a/tcp_buf.c b/tcp_buf.c
index d29c1a90bad0..0946cd5b2965 100644
--- a/tcp_buf.c
+++ b/tcp_buf.c
@@ -147,6 +147,36 @@ void tcp_payload_flush(const struct ctx *c)
tcp_payload_used = 0;
}
+/**
+ * tcp_buf_fill_headers() - Fill 802.3, IP, TCP headers in pre-cooked buffers
+ * @conn: Connection pointer
+ * @iov: Pointer to an array of iovec of TCP pre-cooked buffers
+ * @dlen: TCP payload length
+ * @check: Checksum, if already known
+ * @seq: Sequence number for this segment
+ * @no_tcp_csum: Do not set TCP checksum
+ */
+static void tcp_l2_buf_fill_headers(const struct tcp_tap_conn *conn,
+ struct iovec *iov, size_t dlen,
+ const uint16_t *check, uint32_t seq,
+ bool no_tcp_csum)
+{
+ const struct flowside *tapside = TAPFLOW(conn);
+ const struct in_addr *a4 = inany_v4(&tapside->oaddr);
+
+ if (a4) {
+ tcp_fill_headers4(conn, iov[TCP_IOV_TAP].iov_base,
+ iov[TCP_IOV_IP].iov_base,
+ iov[TCP_IOV_PAYLOAD].iov_base, dlen,
+ check, seq, no_tcp_csum);
+ } else {
+ tcp_fill_headers6(conn, iov[TCP_IOV_TAP].iov_base,
+ iov[TCP_IOV_IP].iov_base,
+ iov[TCP_IOV_PAYLOAD].iov_base, dlen,
+ seq, no_tcp_csum);
+ }
+}
+
/**
* tcp_buf_send_flag() - Send segment with flags to tap (no payload)
* @c: Execution context
@@ -181,8 +211,10 @@ int tcp_buf_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
return ret;
tcp_payload_used++;
- l4len = tcp_l2_buf_fill_headers(conn, iov, optlen, NULL, seq, false);
+ l4len = optlen + sizeof(struct tcphdr);
iov[TCP_IOV_PAYLOAD].iov_len = l4len;
+ tcp_l2_buf_fill_headers(conn, iov, optlen, NULL, seq, false);
+
if (flags & DUP_ACK) {
struct iovec *dup_iov = tcp_l2_iov[tcp_payload_used++];
@@ -215,7 +247,6 @@ static void tcp_data_to_tap(const struct ctx *c, struct tcp_tap_conn *conn,
struct tcp_payload_t *payload;
const uint16_t *check = NULL;
struct iovec *iov;
- size_t l4len;
conn->seq_to_tap = seq + dlen;
tcp_frame_conns[tcp_payload_used] = conn;
@@ -238,8 +269,8 @@ static void tcp_data_to_tap(const struct ctx *c, struct tcp_tap_conn *conn,
payload->th.th_x2 = 0;
payload->th.th_flags = 0;
payload->th.ack = 1;
- l4len = tcp_l2_buf_fill_headers(conn, iov, dlen, check, seq, false);
- iov[TCP_IOV_PAYLOAD].iov_len = l4len;
+ iov[TCP_IOV_PAYLOAD].iov_len = dlen + sizeof(struct tcphdr);
+ tcp_l2_buf_fill_headers(conn, iov, dlen, check, seq, false);
if (++tcp_payload_used > TCP_FRAMES_MEM - 1)
tcp_payload_flush(c);
}
diff --git a/tcp_internal.h b/tcp_internal.h
index 8625eed894d5..d7b125fb1d54 100644
--- a/tcp_internal.h
+++ b/tcp_internal.h
@@ -168,19 +168,15 @@ void tcp_update_check_tcp4(const struct iphdr *iph,
void tcp_update_check_tcp6(const struct ipv6hdr *ip6h,
const struct iovec *iov, int iov_cnt,
size_t l4offset);
-size_t tcp_fill_headers4(const struct tcp_tap_conn *conn,
- struct tap_hdr *taph,
- struct iphdr *iph, struct tcp_payload_t *bp,
- size_t dlen, const uint16_t *check,
- uint32_t seq, bool no_tcp_csum);
-size_t tcp_fill_headers6(const struct tcp_tap_conn *conn,
- struct tap_hdr *taph,
- struct ipv6hdr *ip6h, struct tcp_payload_t *bp,
- size_t dlen, uint32_t seq, bool no_tcp_csum);
-size_t tcp_l2_buf_fill_headers(const struct tcp_tap_conn *conn,
- struct iovec *iov, size_t dlen,
- const uint16_t *check, uint32_t seq,
- bool no_tcp_csum);
+void tcp_fill_headers4(const struct tcp_tap_conn *conn,
+ struct tap_hdr *taph, struct iphdr *iph,
+ struct tcp_payload_t *bp, size_t dlen,
+ const uint16_t *check, uint32_t seq, bool no_tcp_csum);
+void tcp_fill_headers6(const struct tcp_tap_conn *conn,
+ struct tap_hdr *taph, struct ipv6hdr *ip6h,
+ struct tcp_payload_t *bp, size_t dlen,
+ uint32_t seq, bool no_tcp_csum);
+
int tcp_update_seqack_wnd(const struct ctx *c, struct tcp_tap_conn *conn,
bool force_seq, struct tcp_info_linux *tinfo);
int tcp_prepare_flags(const struct ctx *c, struct tcp_tap_conn *conn,
diff --git a/tcp_vu.c b/tcp_vu.c
index be5027a1e921..05e2d1df2f2a 100644
--- a/tcp_vu.c
+++ b/tcp_vu.c
@@ -97,7 +97,7 @@ int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
struct vu_dev *vdev = c->vdev;
struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
const struct flowside *tapside = TAPFLOW(conn);
- size_t l2len, l4len, optlen, hdrlen;
+ size_t optlen, hdrlen;
struct vu_virtq_element flags_elem[2];
struct tcp_payload_t *payload;
struct ipv6hdr *ip6h = NULL;
@@ -156,19 +156,15 @@ int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
return ret;
}
+ flags_elem[0].in_sg[0].iov_len = hdrlen + optlen;
+
if (CONN_V4(conn)) {
- l4len = tcp_fill_headers4(conn, NULL, iph, payload, optlen,
- NULL, seq, true);
- l2len = sizeof(*iph);
+ tcp_fill_headers4(conn, NULL, iph, payload, optlen, NULL, seq,
+ true);
} else {
- l4len = tcp_fill_headers6(conn, NULL, ip6h, payload, optlen,
- seq, true);
- l2len = sizeof(*ip6h);
+ tcp_fill_headers6(conn, NULL, ip6h, payload, optlen, seq, true);
}
- l2len += l4len + sizeof(struct ethhdr);
- flags_elem[0].in_sg[0].iov_len = l2len +
- sizeof(struct virtio_net_hdr_mrg_rxbuf);
if (*c->pcap) {
tcp_vu_update_check(tapside, &flags_elem[0].in_sg[0], 1);
pcap_iov(&flags_elem[0].in_sg[0], 1,
--
@@ -97,7 +97,7 @@ int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
struct vu_dev *vdev = c->vdev;
struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
const struct flowside *tapside = TAPFLOW(conn);
- size_t l2len, l4len, optlen, hdrlen;
+ size_t optlen, hdrlen;
struct vu_virtq_element flags_elem[2];
struct tcp_payload_t *payload;
struct ipv6hdr *ip6h = NULL;
@@ -156,19 +156,15 @@ int tcp_vu_send_flag(const struct ctx *c, struct tcp_tap_conn *conn, int flags)
return ret;
}
+ flags_elem[0].in_sg[0].iov_len = hdrlen + optlen;
+
if (CONN_V4(conn)) {
- l4len = tcp_fill_headers4(conn, NULL, iph, payload, optlen,
- NULL, seq, true);
- l2len = sizeof(*iph);
+ tcp_fill_headers4(conn, NULL, iph, payload, optlen, NULL, seq,
+ true);
} else {
- l4len = tcp_fill_headers6(conn, NULL, ip6h, payload, optlen,
- seq, true);
- l2len = sizeof(*ip6h);
+ tcp_fill_headers6(conn, NULL, ip6h, payload, optlen, seq, true);
}
- l2len += l4len + sizeof(struct ethhdr);
- flags_elem[0].in_sg[0].iov_len = l2len +
- sizeof(struct virtio_net_hdr_mrg_rxbuf);
if (*c->pcap) {
tcp_vu_update_check(tapside, &flags_elem[0].in_sg[0], 1);
pcap_iov(&flags_elem[0].in_sg[0], 1,
--
2.47.0
^ permalink raw reply related [flat|nested] 26+ messages in thread
* Re: [PATCH v14 0/9] Add vhost-user support to passt. (part 3)
2024-11-22 16:43 [PATCH v14 0/9] Add vhost-user support to passt. (part 3) Laurent Vivier
` (8 preceding siblings ...)
2024-11-22 16:43 ` [PATCH v14 9/9] tcp: Move tcp_l2_buf_fill_headers() to tcp_buf.c Laurent Vivier
@ 2024-11-27 16:21 ` Stefano Brivio
2024-11-27 16:40 ` Laurent Vivier
9 siblings, 1 reply; 26+ messages in thread
From: Stefano Brivio @ 2024-11-27 16:21 UTC (permalink / raw)
To: Laurent Vivier; +Cc: passt-dev, David Gibson
On Fri, 22 Nov 2024 17:43:27 +0100
Laurent Vivier <lvivier@redhat.com> wrote:
> This series of patches adds vhost-user support to passt
> and then allows passt to connect to QEMU network backend using
> virtqueue rather than a socket.
░█▀█░█▀█░█▀█░█░░░▀█▀░█▀▀░█▀▄░░░
░█▀█░█▀▀░█▀▀░█░░░░█░░█▀▀░█░█░░░
░▀░▀░▀░░░▀░░░▀▀▀░▀▀▀░▀▀▀░▀▀░░▀░
--
Stefano
^ permalink raw reply [flat|nested] 26+ messages in thread