1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
| | // SPDX-License-Identifier: GPL-2.0-or-later
/* Copyright Red Hat
* Author: Laurent Vivier <lvivier@redhat.com>
*
* common_vu.c - vhost-user common UDP and TCP functions
*/
#include <unistd.h>
#include <sys/uio.h>
#include <sys/eventfd.h>
#include <linux/virtio_net.h>
#include "util.h"
#include "passt.h"
#include "tap.h"
#include "vhost_user.h"
#include "pcap.h"
#include "vu_common.h"
/**
* vu_packet_check_range() - Check if a given memory zone is contained in
* a mapped guest memory region
* @buf: Array of the available memory regions
* @offset: Offset of data range in packet descriptor
* @size: Length of desired data range
* @start: Start of the packet descriptor
*
* Return: 0 if the zone is in a mapped memory region, -1 otherwise
*/
int vu_packet_check_range(void *buf, size_t offset, size_t len,
const char *start)
{
struct vu_dev_region *dev_region;
for (dev_region = buf; dev_region->mmap_addr; dev_region++) {
/* NOLINTNEXTLINE(performance-no-int-to-ptr) */
char *m = (char *)dev_region->mmap_addr;
if (m <= start &&
start + offset + len <= m + dev_region->mmap_offset +
dev_region->size)
return 0;
}
return -1;
}
/**
* vu_init_elem() - initialize an array of virtqueue element with 1 iov in each
* @elem: Array of virtqueue element to initialize
* @iov: Array of iovec to assign to virtqueue element
* @elem_cnt: Number of virtqueue element
*/
void vu_init_elem(struct vu_virtq_element *elem, struct iovec *iov, int elem_cnt)
{
int i;
for (i = 0; i < elem_cnt; i++)
vu_set_element(&elem[i], NULL, &iov[i]);
}
/**
* vu_collect() - collect virtio buffers from a given virtqueue
* @vdev: vhost-user device
* @vq: virtqueue to collect from
* @elem: Array of virtqueue element
* each element must be initialized with one iovec entry
* in the in_sg array.
* @max_elem: Number of virtqueue element in the array
* @size: Maximum size of the data in the frame
* @frame_size: The total size of the frame (output)
*
* Return: number of element used to contain the frame
*/
int vu_collect(struct vu_dev *vdev, struct vu_virtq *vq,
struct vu_virtq_element *elem, int max_elem,
size_t size, size_t *frame_size)
{
size_t current_size = 0;
int elem_cnt = 0;
while (current_size < size && elem_cnt < max_elem) {
struct iovec *iov;
int ret;
ret = vu_queue_pop(vdev, vq, &elem[elem_cnt]);
if (ret < 0)
break;
if (elem[elem_cnt].in_num < 1) {
warn("virtio-net receive queue contains no in buffers");
vu_queue_detach_element(vq);
break;
}
iov = &elem[elem_cnt].in_sg[0];
if (iov->iov_len > size - current_size)
iov->iov_len = size - current_size;
current_size += iov->iov_len;
elem_cnt++;
if (!vu_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
break;
}
if (frame_size)
*frame_size = current_size;
return elem_cnt;
}
/**
* vu_set_vnethdr() - set virtio-net headers
* @vdev: vhost-user device
* @vnethdr: Address of the header to set
* @num_buffers: Number of guest buffers of the frame
*/
void vu_set_vnethdr(const struct vu_dev *vdev,
struct virtio_net_hdr_mrg_rxbuf *vnethdr,
int num_buffers)
{
vnethdr->hdr = VU_HEADER;
if (vu_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vnethdr->num_buffers = htole16(num_buffers);
}
/**
* vu_flush() - flush all the collected buffers to the vhost-user interface
* @vdev: vhost-user device
* @vq: vhost-user virtqueue
* @elem: virtqueue element array to send back to the virqueue
* @iov_used: Length of the array
*/
void vu_flush(const struct vu_dev *vdev, struct vu_virtq *vq,
struct vu_virtq_element *elem, int elem_cnt)
{
int i;
for (i = 0; i < elem_cnt; i++)
vu_queue_fill(vq, &elem[i], elem[i].in_sg[0].iov_len, i);
vu_queue_flush(vq, elem_cnt);
vu_queue_notify(vdev, vq);
}
/**
* vu_handle_tx() - Receive data from the TX virtqueue
* @vdev: vhost-user device
* @index: index of the virtqueue
* @now: Current timestamp
*/
static void vu_handle_tx(struct vu_dev *vdev, int index,
const struct timespec *now)
{
struct vu_virtq_element elem[VIRTQUEUE_MAX_SIZE];
struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
struct vu_virtq *vq = &vdev->vq[index];
int hdrlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
int out_sg_count;
int count;
if (!VHOST_USER_IS_QUEUE_TX(index)) {
debug("vhost-user: index %d is not a TX queue", index);
return;
}
tap_flush_pools();
count = 0;
out_sg_count = 0;
while (count < VIRTQUEUE_MAX_SIZE) {
int ret;
vu_set_element(&elem[count], &out_sg[out_sg_count], NULL);
ret = vu_queue_pop(vdev, vq, &elem[count]);
if (ret < 0)
break;
out_sg_count += elem[count].out_num;
if (elem[count].out_num < 1) {
warn("virtio-net transmit queue contains no out buffers");
break;
}
ASSERT(elem[count].out_num == 1);
tap_add_packet(vdev->context,
elem[count].out_sg[0].iov_len - hdrlen,
(char *)elem[count].out_sg[0].iov_base + hdrlen);
count++;
}
tap_handler(vdev->context, now);
if (count) {
int i;
for (i = 0; i < count; i++)
vu_queue_fill(vq, &elem[i], 0, i);
vu_queue_flush(vq, count);
vu_queue_notify(vdev, vq);
}
}
/**
* vu_kick_cb() - Called on a kick event to start to receive data
* @vdev: vhost-user device
* @ref: epoll reference information
* @now: Current timestamp
*/
void vu_kick_cb(struct vu_dev *vdev, union epoll_ref ref,
const struct timespec *now)
{
eventfd_t kick_data;
ssize_t rc;
rc = eventfd_read(ref.fd, &kick_data);
if (rc == -1)
die_perror("vhost-user kick eventfd_read()");
debug("vhost-user: ot kick_data: %016"PRIx64" idx:%d",
kick_data, ref.queue);
if (VHOST_USER_IS_QUEUE_TX(ref.queue))
vu_handle_tx(vdev, ref.queue, now);
}
/**
* vu_send_single() - Send a buffer to the front-end using the RX virtqueue
* @c: execution context
* @buf: address of the buffer
* @size: size of the buffer
*
* Return: number of bytes sent, -1 if there is an error
*/
int vu_send_single(const struct ctx *c, const void *buf, size_t size)
{
struct vu_dev *vdev = c->vdev;
struct vu_virtq *vq = &vdev->vq[VHOST_USER_RX_QUEUE];
struct vu_virtq_element elem[VIRTQUEUE_MAX_SIZE];
struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
size_t total;
int elem_cnt;
int i;
debug("vu_send_single size %zu", size);
if (!vu_queue_enabled(vq) || !vu_queue_started(vq)) {
err("Got packet, but RX virtqueue not usable yet");
return 0;
}
vu_init_elem(elem, in_sg, VIRTQUEUE_MAX_SIZE);
size += sizeof(struct virtio_net_hdr_mrg_rxbuf);
elem_cnt = vu_collect(vdev, vq, elem, VIRTQUEUE_MAX_SIZE, size, &total);
if (total < size) {
debug("vu_send_single: no space to send the data "
"elem_cnt %d size %zd", elem_cnt, total);
goto err;
}
vu_set_vnethdr(vdev, in_sg[0].iov_base, elem_cnt);
total -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
/* copy data from the buffer to the iovec */
iov_from_buf(in_sg, elem_cnt, sizeof(struct virtio_net_hdr_mrg_rxbuf),
buf, total);
if (*c->pcap) {
pcap_iov(in_sg, elem_cnt,
sizeof(struct virtio_net_hdr_mrg_rxbuf));
}
vu_flush(vdev, vq, elem, elem_cnt);
debug("vhost-user sent %zu", total);
return total;
err:
for (i = 0; i < elem_cnt; i++)
vu_queue_detach_element(vq);
return -1;
}
|