2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #define DPDK_NB_RX_DESC_DEFAULT 512
17 #define DPDK_NB_TX_DESC_DEFAULT 512
18 #define DPDK_NB_RX_DESC_VIRTIO 256
19 #define DPDK_NB_TX_DESC_VIRTIO 256
20 #define DPDK_NB_RX_DESC_10GE 2048
21 #define DPDK_NB_TX_DESC_10GE 2048
22 #define DPDK_NB_RX_DESC_40GE (4096-128)
23 #define DPDK_NB_TX_DESC_40GE 2048
25 /* These args appear by themselves */
26 #define foreach_eal_double_hyphen_predicate_arg \
34 #define foreach_eal_single_hyphen_mandatory_arg \
38 #define foreach_eal_single_hyphen_arg \
40 _(mem-alloc-request, m) \
43 /* These args are preceeded by "--" and followed by a single string */
44 #define foreach_eal_double_hyphen_arg \
52 dpdk_rx_burst ( dpdk_main_t * dm, dpdk_device_t * xd, u16 queue_id)
58 n_left = VLIB_FRAME_SIZE;
61 if (PREDICT_TRUE(xd->dev_type == VNET_DPDK_DEV_ETH))
65 n_this_chunk = rte_eth_rx_burst (xd->device_index, queue_id,
66 xd->rx_vectors[queue_id] + n_buffers, n_left);
67 n_buffers += n_this_chunk;
68 n_left -= n_this_chunk;
70 /* Empirically, DPDK r1.8 produces vectors w/ 32 or fewer elts */
71 if (n_this_chunk < 32)
75 else if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER)
77 vlib_main_t * vm = vlib_get_main();
78 vlib_buffer_main_t * bm = vm->buffer_main;
79 unsigned socket_id = rte_socket_id();
82 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
83 offset = queue_id * VIRTIO_QNUM;
85 struct vhost_virtqueue *vq =
86 xd->vu_vhost_dev.virtqueue[offset + VIRTIO_TXQ];
88 if (PREDICT_FALSE(!vq->enabled))
91 if (PREDICT_FALSE(!xd->vu_is_running))
95 n_buffers = rte_vhost_dequeue_burst(&xd->vu_vhost_dev, offset + VIRTIO_TXQ,
96 bm->pktmbuf_pools[socket_id],
97 xd->rx_vectors[queue_id], VLIB_FRAME_SIZE);
100 struct rte_mbuf **pkts = xd->rx_vectors[queue_id];
101 for (i = 0; i < n_buffers; i++) {
102 struct rte_mbuf *buff = pkts[i];
103 bytes += rte_pktmbuf_data_len(buff);
106 f64 now = vlib_time_now (vm);
108 dpdk_vu_vring *vring = NULL;
109 /* send pending interrupts if needed */
110 if (dpdk_vhost_user_want_interrupt(xd, offset + VIRTIO_TXQ)) {
111 vring = &(xd->vu_intf->vrings[offset + VIRTIO_TXQ]);
112 vring->n_since_last_int += n_buffers;
114 if ((vring->n_since_last_int && (vring->int_deadline < now))
115 || (vring->n_since_last_int > dm->vhost_coalesce_frames))
116 dpdk_vhost_user_send_interrupt(vm, xd, offset + VIRTIO_TXQ);
119 vring = &(xd->vu_intf->vrings[offset + VIRTIO_RXQ]);
120 vring->packets += n_buffers;
121 vring->bytes += bytes;
123 if (dpdk_vhost_user_want_interrupt(xd, offset + VIRTIO_RXQ)) {
124 if (vring->n_since_last_int && (vring->int_deadline < now))
125 dpdk_vhost_user_send_interrupt(vm, xd, offset + VIRTIO_RXQ);
129 #ifdef RTE_LIBRTE_KNI
130 else if (xd->dev_type == VNET_DPDK_DEV_KNI)
132 n_buffers = rte_kni_rx_burst(xd->kni, xd->rx_vectors[queue_id], VLIB_FRAME_SIZE);
133 rte_kni_handle_request(xd->kni);
146 dpdk_update_counters (dpdk_device_t * xd, f64 now)
148 vlib_simple_counter_main_t * cm;
149 vnet_main_t * vnm = vnet_get_main();
150 u32 my_cpu = os_get_cpu_number();
151 u64 rxerrors, last_rxerrors;
154 /* only update counters for PMD interfaces */
155 if (xd->dev_type != VNET_DPDK_DEV_ETH)
159 * DAW-FIXME: VMXNET3 device stop/start doesn't work,
160 * therefore fake the stop in the dpdk driver by
161 * silently dropping all of the incoming pkts instead of
162 * stopping the driver / hardware.
164 if (xd->admin_up != 0xff)
166 xd->time_last_stats_update = now ? now : xd->time_last_stats_update;
167 memcpy (&xd->last_stats, &xd->stats, sizeof (xd->last_stats));
168 rte_eth_stats_get (xd->device_index, &xd->stats);
170 /* maybe bump interface rx no buffer counter */
171 if (PREDICT_FALSE (xd->stats.rx_nombuf != xd->last_stats.rx_nombuf))
173 cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
174 VNET_INTERFACE_COUNTER_RX_NO_BUF);
176 vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index,
177 xd->stats.rx_nombuf -
178 xd->last_stats.rx_nombuf);
181 /* missed pkt counter */
182 if (PREDICT_FALSE (xd->stats.imissed != xd->last_stats.imissed))
184 cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
185 VNET_INTERFACE_COUNTER_RX_MISS);
187 vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index,
189 xd->last_stats.imissed);
191 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
192 rxerrors = xd->stats.ierrors;
193 last_rxerrors = xd->last_stats.ierrors;
195 rxerrors = xd->stats.ibadcrc
196 + xd->stats.ibadlen + xd->stats.ierrors;
197 last_rxerrors = xd->last_stats.ibadcrc
198 + xd->last_stats.ibadlen + xd->last_stats.ierrors;
201 if (PREDICT_FALSE (rxerrors != last_rxerrors))
203 cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
204 VNET_INTERFACE_COUNTER_RX_ERROR);
206 vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index,
207 rxerrors - last_rxerrors);
211 if ((len = rte_eth_xstats_get(xd->device_index, NULL, 0)) > 0)
213 vec_validate(xd->xstats, len - 1);
214 len = rte_eth_xstats_get(xd->device_index, xd->xstats, vec_len(xd->xstats));
215 ASSERT(vec_len(xd->xstats) == len);
216 _vec_len(xd->xstats) = len;