public inbox for passt-dev@passt.top
 help / color / mirror / code / Atom feed
From: David Gibson <david@gibson.dropbear.id.au>
To: Laurent Vivier <lvivier@redhat.com>
Cc: passt-dev@passt.top
Subject: Re: [PATCH 18/24] vhost-user: introduce virtio API
Date: Tue, 6 Feb 2024 14:51:31 +1100	[thread overview]
Message-ID: <ZcGsw-VYtplylVC2@zatzit> (raw)
In-Reply-To: <20240202141151.3762941-19-lvivier@redhat.com>

[-- Attachment #1: Type: text/plain, Size: 20327 bytes --]

On Fri, Feb 02, 2024 at 03:11:45PM +0100, Laurent Vivier wrote:
> Add virtio.c and virtio.h that define the functions needed
> to manage virtqueues.
> 
> Signed-off-by: Laurent Vivier <lvivier@redhat.com>

When importing a batch of code from outside, I think we need to choose
between one of two extremes:

  1) Treat this as a "vendored" dependency.  Keep the imported code
     byte-for-byte identical to the original source, and possibly have
     some integration glue in different files

  2) Fully assimilate: treat this as our own code, inspired by the
     original source.  Rewrite as much as we need to match our own
     conventions.

Currently, this is somewhere in between: we have some changes for the
passt tree (e.g. tab indents), but other things retain qemu style
(e.g. CamelCase, typedefs, and braces around single line clauses).

> ---
>  Makefile |   4 +-
>  util.h   |  11 ++
>  virtio.c | 484 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  virtio.h | 121 ++++++++++++++
>  4 files changed, 618 insertions(+), 2 deletions(-)
>  create mode 100644 virtio.c
>  create mode 100644 virtio.h
> 
> diff --git a/Makefile b/Makefile
> index bf370b6ec2e6..ae1daa6b2b50 100644
> --- a/Makefile
> +++ b/Makefile
> @@ -47,7 +47,7 @@ FLAGS += -DDUAL_STACK_SOCKETS=$(DUAL_STACK_SOCKETS)
>  PASST_SRCS = arch.c arp.c checksum.c conf.c dhcp.c dhcpv6.c flow.c icmp.c \
>  	igmp.c isolation.c lineread.c log.c mld.c ndp.c netlink.c packet.c \
>  	passt.c pasta.c pcap.c pif.c port_fwd.c tap.c tcp.c tcp_splice.c \
> -	tcp_buf.c udp.c util.c iov.c ip.c
> +	tcp_buf.c udp.c util.c iov.c ip.c virtio.c
>  QRAP_SRCS = qrap.c
>  SRCS = $(PASST_SRCS) $(QRAP_SRCS)
>  
> @@ -57,7 +57,7 @@ PASST_HEADERS = arch.h arp.h checksum.h conf.h dhcp.h dhcpv6.h flow.h \
>  	flow_table.h icmp.h inany.h isolation.h lineread.h log.h ndp.h \
>  	netlink.h packet.h passt.h pasta.h pcap.h pif.h port_fwd.h siphash.h \
>  	tap.h tcp.h tcp_conn.h tcp_splice.h tcp_buf.h tcp_internal.h udp.h \
> -	util.h iov.h ip.h
> +	util.h iov.h ip.h virtio.h
>  HEADERS = $(PASST_HEADERS) seccomp.h
>  
>  C := \#include <linux/tcp.h>\nstruct tcp_info x = { .tcpi_snd_wnd = 0 };
> diff --git a/util.h b/util.h
> index f7c3dfee9972..a80024e3b797 100644
> --- a/util.h
> +++ b/util.h
> @@ -43,6 +43,9 @@
>  #define ROUND_DOWN(x, y)	((x) & ~((y) - 1))
>  #define ROUND_UP(x, y)		(((x) + (y) - 1) & ~((y) - 1))
>  
> +#define ALIGN_DOWN(n, m)	((n) / (m) * (m))
> +#define ALIGN_UP(n, m)		ALIGN_DOWN((n) + (m) - 1, (m))

It would be nice to move these earlier in the series and use them for
patch 3.

>  #define MAX_FROM_BITS(n)	(((1U << (n)) - 1))
>  
>  #define BIT(n)			(1UL << (n))
> @@ -110,6 +113,14 @@
>  #define	htonl_constant(x)	(__bswap_constant_32(x))
>  #endif
>  
> +#define  barrier()		do { __asm__ __volatile__("" ::: "memory"); } while (0)
> +#define smp_mb()		do { barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); } while (0)
> +#define smp_mb_release()	do { barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); } while (0)
> +#define smp_mb_acquire()	do { barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); } while (0)
> +
> +#define smp_wmb()	smp_mb_release()
> +#define smp_rmb()	smp_mb_acquire()
> +
>  #define NS_FN_STACK_SIZE	(RLIMIT_STACK_VAL * 1024 / 8)
>  int do_clone(int (*fn)(void *), char *stack_area, size_t stack_size, int flags,
>  	     void *arg);
> diff --git a/virtio.c b/virtio.c
> new file mode 100644
> index 000000000000..1edd4155eec2
> --- /dev/null
> +++ b/virtio.c
> @@ -0,0 +1,484 @@
> +// SPDX-License-Identifier: GPL-2.0-or-later
> +
> +/* some parts copied from QEMU subprojects/libvhost-user/libvhost-user.c */
> +
> +#include <stddef.h>
> +#include <endian.h>
> +#include <string.h>
> +#include <errno.h>
> +#include <sys/eventfd.h>
> +#include <sys/socket.h>
> +
> +#include "util.h"
> +#include "virtio.h"
> +
> +#define VIRTQUEUE_MAX_SIZE 1024
> +
> +/* Translate guest physical address to our virtual address.  */
> +static void *vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr)
> +{
> +	unsigned int i;
> +
> +	if (*plen == 0) {
> +		return NULL;
> +	}
> +
> +	/* Find matching memory region.  */
> +	for (i = 0; i < dev->nregions; i++) {
> +		VuDevRegion *r = &dev->regions[i];
> +
> +		if ((guest_addr >= r->gpa) && (guest_addr < (r->gpa + r->size))) {
> +			if ((guest_addr + *plen) > (r->gpa + r->size)) {
> +				*plen = r->gpa + r->size - guest_addr;
> +			}
> +			return (void *)(guest_addr - (uintptr_t)r->gpa +
> +					(uintptr_t)r->mmap_addr + r->mmap_offset);
> +		}
> +	}
> +
> +	return NULL;
> +}
> +
> +static inline uint16_t vring_avail_flags(VuVirtq *vq)
> +{
> +	return le16toh(vq->vring.avail->flags);
> +}
> +
> +static inline uint16_t vring_avail_idx(VuVirtq *vq)
> +{
> +	vq->shadow_avail_idx = le16toh(vq->vring.avail->idx);
> +
> +	return vq->shadow_avail_idx;
> +}
> +
> +static inline uint16_t vring_avail_ring(VuVirtq *vq, int i)
> +{
> +	return le16toh(vq->vring.avail->ring[i]);
> +}
> +
> +static inline uint16_t vring_get_used_event(VuVirtq *vq)
> +{
> +	return vring_avail_ring(vq, vq->vring.num);
> +}
> +
> +static bool virtqueue_get_head(VuDev *dev, VuVirtq *vq,
> +		   unsigned int idx, unsigned int *head)
> +{
> +	/* Grab the next descriptor number they're advertising, and increment
> +	 * the index we've seen. */
> +	*head = vring_avail_ring(vq, idx % vq->vring.num);
> +
> +	/* If their number is silly, that's a fatal mistake. */
> +	if (*head >= vq->vring.num) {
> +		vu_panic(dev, "Guest says index %u is available", *head);
> +		return false;
> +	}
> +
> +	return true;
> +}
> +
> +static int
> +virtqueue_read_indirect_desc(VuDev *dev, struct vring_desc *desc,
> +			     uint64_t addr, size_t len)
> +{
> +	struct vring_desc *ori_desc;
> +	uint64_t read_len;
> +
> +	if (len > (VIRTQUEUE_MAX_SIZE * sizeof(struct vring_desc))) {
> +		return -1;
> +	}
> +
> +	if (len == 0) {
> +		return -1;
> +	}
> +
> +	while (len) {
> +		read_len = len;
> +		ori_desc = vu_gpa_to_va(dev, &read_len, addr);
> +		if (!ori_desc) {
> +			return -1;
> +		}
> +
> +		memcpy(desc, ori_desc, read_len);
> +		len -= read_len;
> +		addr += read_len;
> +		desc += read_len;

Hrm... this is copied as is from qemu, but it looks wrong.  Why would
be advancing the descriptor by a number of descriptor entries equal to
the number of bytes in this chunk.

> +	}
> +
> +	return 0;
> +}
> +
> +enum {
> +	VIRTQUEUE_READ_DESC_ERROR = -1,
> +	VIRTQUEUE_READ_DESC_DONE = 0,   /* end of chain */
> +	VIRTQUEUE_READ_DESC_MORE = 1,   /* more buffers in chain */
> +};
> +
> +static int
> +virtqueue_read_next_desc(VuDev *dev, struct vring_desc *desc,
> +			 int i, unsigned int max, unsigned int *next)
> +{
> +	/* If this descriptor says it doesn't chain, we're done. */
> +	if (!(le16toh(desc[i].flags) & VRING_DESC_F_NEXT)) {
> +		return VIRTQUEUE_READ_DESC_DONE;
> +	}
> +
> +	/* Check they're not leading us off end of descriptors. */
> +	*next = le16toh(desc[i].next);
> +	/* Make sure compiler knows to grab that: we don't want it changing! */
> +	smp_wmb();
> +
> +	if (*next >= max) {
> +		vu_panic(dev, "Desc next is %u", *next);
> +		return VIRTQUEUE_READ_DESC_ERROR;
> +	}
> +
> +	return VIRTQUEUE_READ_DESC_MORE;
> +}
> +
> +bool vu_queue_empty(VuDev *dev, VuVirtq *vq)
> +{
> +	if (dev->broken ||
> +		!vq->vring.avail) {
> +		return true;
> +	}
> +
> +	if (vq->shadow_avail_idx != vq->last_avail_idx) {
> +		return false;
> +	}
> +
> +	return vring_avail_idx(vq) == vq->last_avail_idx;
> +}
> +
> +static bool vring_notify(VuDev *dev, VuVirtq *vq)
> +{
> +	uint16_t old, new;
> +	bool v;
> +
> +	/* We need to expose used array entries before checking used event. */
> +	smp_mb();
> +
> +	/* Always notify when queue is empty (when feature acknowledge) */
> +	if (vu_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
> +		!vq->inuse && vu_queue_empty(dev, vq)) {
> +		return true;
> +	}
> +
> +	if (!vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
> +		return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
> +	}
> +
> +	v = vq->signalled_used_valid;
> +	vq->signalled_used_valid = true;
> +	old = vq->signalled_used;
> +	new = vq->signalled_used = vq->used_idx;
> +	return !v || vring_need_event(vring_get_used_event(vq), new, old);
> +}
> +
> +void vu_queue_notify(VuDev *dev, VuVirtq *vq)
> +{
> +	if (dev->broken || !vq->vring.avail) {
> +		return;
> +	}
> +
> +	if (!vring_notify(dev, vq)) {
> +		debug("skipped notify...");
> +		return;
> +	}
> +
> +	if (eventfd_write(vq->call_fd, 1) < 0) {
> +		vu_panic(dev, "Error writing eventfd: %s", strerror(errno));
> +	}
> +}
> +
> +static inline void vring_set_avail_event(VuVirtq *vq, uint16_t val)
> +{
> +	uint16_t val_le = htole16(val);
> +
> +	if (!vq->notification) {
> +		return;
> +	}
> +
> +	memcpy(&vq->vring.used->ring[vq->vring.num], &val_le, sizeof(uint16_t));
> +}
> +
> +static bool virtqueue_map_desc(VuDev *dev,
> +			       unsigned int *p_num_sg, struct iovec *iov,
> +			       unsigned int max_num_sg,
> +			       uint64_t pa, size_t sz)
> +{
> +	unsigned num_sg = *p_num_sg;
> +
> +	ASSERT(num_sg <= max_num_sg);
> +
> +	if (!sz) {
> +		vu_panic(dev, "virtio: zero sized buffers are not allowed");
> +		return false;
> +	}
> +
> +	while (sz) {
> +		uint64_t len = sz;
> +
> +		if (num_sg == max_num_sg) {
> +			vu_panic(dev, "virtio: too many descriptors in indirect table");
> +			return false;
> +		}
> +
> +		iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa);
> +		if (iov[num_sg].iov_base == NULL) {
> +			vu_panic(dev, "virtio: invalid address for buffers");
> +			return false;
> +		}
> +		iov[num_sg].iov_len = len;
> +		num_sg++;
> +		sz -= len;
> +		pa += len;
> +	}
> +
> +	*p_num_sg = num_sg;
> +	return true;
> +}
> +
> +static void * virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num, unsigned char *buffer)
> +{
> +	VuVirtqElement *elem;
> +	size_t in_sg_ofs = ALIGN_UP(sz, __alignof__(elem->in_sg[0]));
> +	size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
> +	size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
> +
> +	if (out_sg_end > 65536)
> +		return NULL;
> +
> +	elem = (void *)buffer;
> +	elem->out_num = out_num;
> +	elem->in_num = in_num;
> +	elem->in_sg = (struct iovec *)((uintptr_t)elem + in_sg_ofs);
> +	elem->out_sg = (struct iovec *)((uintptr_t)elem + out_sg_ofs);
> +	return elem;
> +}
> +
> +static void *
> +vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz, unsigned char *buffer)
> +{
> +	struct vring_desc *desc = vq->vring.desc;
> +	uint64_t desc_addr, read_len;
> +	unsigned int desc_len;
> +	unsigned int max = vq->vring.num;
> +	unsigned int i = idx;
> +	VuVirtqElement *elem;
> +	unsigned int out_num = 0, in_num = 0;
> +	struct iovec iov[VIRTQUEUE_MAX_SIZE];
> +	struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
> +	int rc;
> +
> +	if (le16toh(desc[i].flags) & VRING_DESC_F_INDIRECT) {
> +		if (le32toh(desc[i].len) % sizeof(struct vring_desc)) {
> +			vu_panic(dev, "Invalid size for indirect buffer table");
> +			return NULL;
> +		}
> +
> +		/* loop over the indirect descriptor table */
> +		desc_addr = le64toh(desc[i].addr);
> +		desc_len = le32toh(desc[i].len);
> +		max = desc_len / sizeof(struct vring_desc);
> +		read_len = desc_len;
> +		desc = vu_gpa_to_va(dev, &read_len, desc_addr);
> +		if (desc && read_len != desc_len) {
> +			/* Failed to use zero copy */
> +			desc = NULL;
> +			if (!virtqueue_read_indirect_desc(dev, desc_buf, desc_addr, desc_len)) {
> +				desc = desc_buf;
> +			}
> +		}
> +		if (!desc) {
> +			vu_panic(dev, "Invalid indirect buffer table");
> +			return NULL;
> +		}
> +		i = 0;
> +	}
> +
> +	/* Collect all the descriptors */
> +	do {
> +		if (le16toh(desc[i].flags) & VRING_DESC_F_WRITE) {
> +			if (!virtqueue_map_desc(dev, &in_num, iov + out_num,
> +						VIRTQUEUE_MAX_SIZE - out_num,
> +						le64toh(desc[i].addr),
> +						le32toh(desc[i].len))) {
> +				return NULL;
> +			}
> +		} else {
> +			if (in_num) {
> +				vu_panic(dev, "Incorrect order for descriptors");
> +				return NULL;
> +			}
> +			if (!virtqueue_map_desc(dev, &out_num, iov,
> +						VIRTQUEUE_MAX_SIZE,
> +						le64toh(desc[i].addr),
> +						le32toh(desc[i].len))) {
> +				return NULL;
> +			}
> +		}
> +
> +		/* If we've got too many, that implies a descriptor loop. */
> +		if ((in_num + out_num) > max) {
> +			vu_panic(dev, "Looped descriptor");
> +			return NULL;
> +		}
> +		rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
> +	} while (rc == VIRTQUEUE_READ_DESC_MORE);
> +
> +	if (rc == VIRTQUEUE_READ_DESC_ERROR) {
> +		vu_panic(dev, "read descriptor error");
> +		return NULL;
> +	}
> +
> +	/* Now copy what we have collected and mapped */
> +	elem = virtqueue_alloc_element(sz, out_num, in_num, buffer);
> +	if (!elem) {
> +		return NULL;
> +	}
> +	elem->index = idx;
> +	for (i = 0; i < out_num; i++) {
> +		elem->out_sg[i] = iov[i];
> +	}
> +	for (i = 0; i < in_num; i++) {
> +		elem->in_sg[i] = iov[out_num + i];
> +	}
> +
> +	return elem;
> +}
> +
> +void *vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz, unsigned char *buffer)
> +{
> +	unsigned int head;
> +	VuVirtqElement *elem;
> +
> +	if (dev->broken || !vq->vring.avail) {
> +	return NULL;
> +	}
> +
> +	if (vu_queue_empty(dev, vq)) {
> +	return NULL;
> +	}
> +	/*
> +	 * Needed after virtio_queue_empty(), see comment in
> +	 * virtqueue_num_heads().
> +	 */
> +	smp_rmb();
> +
> +	if (vq->inuse >= vq->vring.num) {
> +	vu_panic(dev, "Virtqueue size exceeded");
> +	return NULL;
> +	}
> +
> +	if (!virtqueue_get_head(dev, vq, vq->last_avail_idx++, &head)) {
> +	return NULL;
> +	}
> +
> +	if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
> +		vring_set_avail_event(vq, vq->last_avail_idx);
> +	}
> +
> +	elem = vu_queue_map_desc(dev, vq, head, sz, buffer);
> +
> +	if (!elem) {
> +	return NULL;
> +	}
> +
> +	vq->inuse++;
> +
> +	return elem;
> +}
> +
> +void vu_queue_detach_element(VuDev *dev, VuVirtq *vq,
> +			     unsigned int index, size_t len)
> +{
> +	(void)dev;
> +	(void)index;
> +	(void)len;
> +
> +	vq->inuse--;
> +	/* unmap, when DMA support is added */
> +}
> +
> +void vu_queue_unpop(VuDev *dev, VuVirtq *vq, unsigned int index, size_t len)
> +{
> +	vq->last_avail_idx--;
> +	vu_queue_detach_element(dev, vq, index, len);
> +}
> +
> +bool vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num)
> +{
> +	(void)dev;
> +	if (num > vq->inuse) {
> +		return false;
> +	}
> +	vq->last_avail_idx -= num;
> +	vq->inuse -= num;
> +	return true;
> +}
> +
> +static inline void vring_used_write(VuVirtq *vq,
> +				    struct vring_used_elem *uelem, int i)
> +{
> +	struct vring_used *used = vq->vring.used;
> +
> +	used->ring[i] = *uelem;
> +}
> +
> +void vu_queue_fill_by_index(VuDev *dev, VuVirtq *vq, unsigned int index,
> +			  unsigned int len, unsigned int idx)
> +{
> +	struct vring_used_elem uelem;
> +
> +	if (dev->broken || !vq->vring.avail)
> +		return;
> +
> +	idx = (idx + vq->used_idx) % vq->vring.num;
> +
> +	uelem.id = htole32(index);
> +	uelem.len = htole32(len);
> +	vring_used_write(vq, &uelem, idx);
> +}
> +
> +void vu_queue_fill(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
> +		   unsigned int len, unsigned int idx)
> +{
> +	vu_queue_fill_by_index(dev, vq, elem->index, len, idx);
> +}
> +
> +static inline void vring_used_idx_set(VuVirtq *vq, uint16_t val)
> +{
> +	vq->vring.used->idx = htole16(val);
> +
> +	vq->used_idx = val;
> +}
> +
> +void vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count)
> +{
> +	uint16_t old, new;
> +
> +	if (dev->broken ||
> +		!vq->vring.avail) {
> +		return;
> +	}
> +
> +	/* Make sure buffer is written before we update index. */
> +	smp_wmb();
> +
> +	old = vq->used_idx;
> +	new = old + count;
> +	vring_used_idx_set(vq, new);
> +	vq->inuse -= count;
> +	if ((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)) {
> +		vq->signalled_used_valid = false;
> +	}
> +}
> +
> +void vu_queue_push(VuDev *dev, VuVirtq *vq,
> +		   VuVirtqElement *elem, unsigned int len)
> +{
> +	vu_queue_fill(dev, vq, elem, len, 0);
> +	vu_queue_flush(dev, vq, 1);
> +}
> +
> diff --git a/virtio.h b/virtio.h
> new file mode 100644
> index 000000000000..e334355b0f30
> --- /dev/null
> +++ b/virtio.h
> @@ -0,0 +1,121 @@
> +// SPDX-License-Identifier: GPL-2.0-or-later
> +//
> +/* come parts copied from QEMU subprojects/libvhost-user/libvhost-user.h */
> +
> +#ifndef VIRTIO_H
> +#define VIRTIO_H
> +
> +#include <stdbool.h>
> +#include <linux/vhost_types.h>
> +
> +#define VIRTQUEUE_MAX_SIZE 1024
> +
> +#define vu_panic(vdev, ...)		\
> +	do {				\
> +		(vdev)->broken = true;	\
> +		err( __VA_ARGS__ );	\
> +	} while (0)
> +
> +typedef struct VuRing {
> +	unsigned int num;
> +	struct vring_desc *desc;
> +	struct vring_avail *avail;
> +	struct vring_used *used;
> +	uint64_t log_guest_addr;
> +	uint32_t flags;
> +} VuRing;
> +
> +typedef struct VuVirtq {
> +	VuRing vring;
> +
> +	/* Next head to pop */
> +	uint16_t last_avail_idx;
> +
> +	/* Last avail_idx read from VQ. */
> +	uint16_t shadow_avail_idx;
> +
> +	uint16_t used_idx;
> +
> +	/* Last used index value we have signalled on */
> +	uint16_t signalled_used;
> +
> +	/* Last used index value we have signalled on */
> +	bool signalled_used_valid;
> +
> +	bool notification;
> +
> +	unsigned int inuse;
> +
> +	int call_fd;
> +	int kick_fd;
> +	int err_fd;
> +	unsigned int enable;
> +	bool started;
> +
> +	/* Guest addresses of our ring */
> +	struct vhost_vring_addr vra;
> +} VuVirtq;
> +
> +typedef struct VuDevRegion {
> +	uint64_t gpa;
> +	uint64_t size;
> +	uint64_t qva;
> +	uint64_t mmap_offset;
> +	uint64_t mmap_addr;
> +} VuDevRegion;
> +
> +#define VHOST_USER_MAX_QUEUES 2
> +
> +/*
> + * Set a reasonable maximum number of ram slots, which will be supported by
> + * any architecture.
> + */
> +#define VHOST_USER_MAX_RAM_SLOTS 32
> +
> +typedef struct VuDev {
> +	uint32_t nregions;
> +	VuDevRegion regions[VHOST_USER_MAX_RAM_SLOTS];
> +	VuVirtq vq[VHOST_USER_MAX_QUEUES];
> +	uint64_t features;
> +	uint64_t protocol_features;
> +	bool broken;
> +	int hdrlen;
> +} VuDev;
> +
> +typedef struct VuVirtqElement {
> +	unsigned int index;
> +	unsigned int out_num;
> +	unsigned int in_num;
> +	struct iovec *in_sg;
> +	struct iovec *out_sg;
> +} VuVirtqElement;
> +
> +static inline bool has_feature(uint64_t features, unsigned int fbit)
> +{
> +	return !!(features & (1ULL << fbit));
> +}
> +
> +static inline bool vu_has_feature(VuDev *vdev, unsigned int fbit)
> +{
> +	return has_feature(vdev->features, fbit);
> +}
> +
> +static inline bool vu_has_protocol_feature(VuDev *vdev, unsigned int fbit)
> +{
> +	return has_feature(vdev->protocol_features, fbit);
> +}
> +
> +bool vu_queue_empty(VuDev *dev, VuVirtq *vq);
> +void vu_queue_notify(VuDev *dev, VuVirtq *vq);
> +void *vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz, unsigned char *buffer);
> +void vu_queue_detach_element(VuDev *dev, VuVirtq *vq, unsigned int index, size_t len);
> +void vu_queue_unpop(VuDev *dev, VuVirtq *vq, unsigned int index, size_t len);
> +bool vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num);
> +
> +void vu_queue_fill_by_index(VuDev *dev, VuVirtq *vq, unsigned int index,
> +			    unsigned int len, unsigned int idx);
> +void vu_queue_fill(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem, unsigned int len,
> +		   unsigned int idx);
> +void vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count);
> +void vu_queue_push(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem, unsigned int len);
> +#endif /* VIRTIO_H */

-- 
David Gibson			| I'll have my music baroque, and my code
david AT gibson.dropbear.id.au	| minimalist, thank you.  NOT _the_ _other_
				| _way_ _around_!
http://www.ozlabs.org/~dgibson

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

  reply	other threads:[~2024-02-06  3:51 UTC|newest]

Thread overview: 83+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-02-02 14:11 [PATCH 00/24] Add vhost-user support to passt Laurent Vivier
2024-02-02 14:11 ` [PATCH 01/24] iov: add some functions to manage iovec Laurent Vivier
2024-02-05  5:57   ` David Gibson
2024-02-06 14:28     ` Laurent Vivier
2024-02-07  1:01       ` David Gibson
2024-02-07 10:00         ` Laurent Vivier
2024-02-06 16:10   ` Stefano Brivio
2024-02-07 14:02     ` Laurent Vivier
2024-02-07 14:57       ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 02/24] pcap: add pcap_iov() Laurent Vivier
2024-02-05  6:25   ` David Gibson
2024-02-06 16:10   ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 03/24] checksum: align buffers Laurent Vivier
2024-02-05  6:02   ` David Gibson
2024-02-07  9:01     ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 04/24] checksum: add csum_iov() Laurent Vivier
2024-02-05  6:07   ` David Gibson
2024-02-07  9:02   ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 05/24] util: move IP stuff from util.[ch] to ip.[ch] Laurent Vivier
2024-02-05  6:13   ` David Gibson
2024-02-07  9:03     ` Stefano Brivio
2024-02-08  0:04       ` David Gibson
2024-02-02 14:11 ` [PATCH 06/24] ip: move duplicate IPv4 checksum function to ip.h Laurent Vivier
2024-02-05  6:16   ` David Gibson
2024-02-07 10:40   ` Stefano Brivio
2024-02-07 23:43     ` David Gibson
2024-02-02 14:11 ` [PATCH 07/24] ip: introduce functions to compute the header part checksum for TCP/UDP Laurent Vivier
2024-02-05  6:20   ` David Gibson
2024-02-07 10:41   ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 08/24] tcp: extract buffer management from tcp_send_flag() Laurent Vivier
2024-02-06  0:24   ` David Gibson
2024-02-08 16:57   ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 09/24] tcp: extract buffer management from tcp_conn_tap_mss() Laurent Vivier
2024-02-06  0:47   ` David Gibson
2024-02-08 16:59   ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 10/24] tcp: rename functions that manage buffers Laurent Vivier
2024-02-06  1:48   ` David Gibson
2024-02-08 17:10     ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 11/24] tcp: move buffers management functions to their own file Laurent Vivier
2024-02-02 14:11 ` [PATCH 12/24] tap: make tap_update_mac() generic Laurent Vivier
2024-02-06  1:49   ` David Gibson
2024-02-08 17:10     ` Stefano Brivio
2024-02-09  5:02       ` David Gibson
2024-02-02 14:11 ` [PATCH 13/24] tap: export pool_flush()/tapX_handler()/packet_add() Laurent Vivier
2024-02-02 14:29   ` Laurent Vivier
2024-02-06  1:52   ` David Gibson
2024-02-11 23:15   ` Stefano Brivio
2024-02-12  2:22     ` David Gibson
2024-02-02 14:11 ` [PATCH 14/24] udp: move udpX_l2_buf_t and udpX_l2_mh_sock out of udp_update_hdrX() Laurent Vivier
2024-02-06  1:59   ` David Gibson
2024-02-11 23:16   ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 15/24] udp: rename udp_sock_handler() to udp_buf_sock_handler() Laurent Vivier
2024-02-06  2:14   ` David Gibson
2024-02-11 23:17     ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 16/24] packet: replace struct desc by struct iovec Laurent Vivier
2024-02-06  2:25   ` David Gibson
2024-02-11 23:18     ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 17/24] vhost-user: compare mode MODE_PASTA and not MODE_PASST Laurent Vivier
2024-02-06  2:29   ` David Gibson
2024-02-02 14:11 ` [PATCH 18/24] vhost-user: introduce virtio API Laurent Vivier
2024-02-06  3:51   ` David Gibson [this message]
2024-02-11 23:18     ` Stefano Brivio
2024-02-12  2:26       ` David Gibson
2024-02-02 14:11 ` [PATCH 19/24] vhost-user: introduce vhost-user API Laurent Vivier
2024-02-07  2:13   ` David Gibson
2024-02-02 14:11 ` [PATCH 20/24] vhost-user: add vhost-user Laurent Vivier
2024-02-07  2:40   ` David Gibson
2024-02-11 23:19     ` Stefano Brivio
2024-02-12  2:47       ` David Gibson
2024-02-13 15:22         ` Stefano Brivio
2024-02-14  2:05           ` David Gibson
2024-02-11 23:19   ` Stefano Brivio
2024-02-12  2:49     ` David Gibson
2024-02-12 10:02       ` Laurent Vivier
2024-02-12 16:56         ` Stefano Brivio
2024-02-02 14:11 ` [PATCH 21/24] vhost-user: use guest buffer directly in vu_handle_tx() Laurent Vivier
2024-02-09  4:26   ` David Gibson
2024-02-02 14:11 ` [PATCH 22/24] tcp: vhost-user RX nocopy Laurent Vivier
2024-02-09  4:57   ` David Gibson
2024-02-02 14:11 ` [PATCH 23/24] udp: " Laurent Vivier
2024-02-09  5:00   ` David Gibson
2024-02-02 14:11 ` [PATCH 24/24] vhost-user: remove tap_send_frames_vu() Laurent Vivier
2024-02-09  5:01   ` David Gibson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZcGsw-VYtplylVC2@zatzit \
    --to=david@gibson.dropbear.id.au \
    --cc=lvivier@redhat.com \
    --cc=passt-dev@passt.top \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
Code repositories for project(s) associated with this public inbox

	https://passt.top/passt

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for IMAP folder(s).