2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #define DPDK_NB_RX_DESC_DEFAULT 512
17 #define DPDK_NB_TX_DESC_DEFAULT 512
18 #define DPDK_NB_RX_DESC_VIRTIO 256
19 #define DPDK_NB_TX_DESC_VIRTIO 256
20 #define DPDK_NB_RX_DESC_10GE 2048
21 #define DPDK_NB_TX_DESC_10GE 2048
22 #define DPDK_NB_RX_DESC_40GE (4096-128)
23 #define DPDK_NB_TX_DESC_40GE 2048
24 #define DPDK_NB_RX_DESC_ENIC (4096+1024)
26 /* These args appear by themselves */
27 #define foreach_eal_double_hyphen_predicate_arg \
35 #define foreach_eal_single_hyphen_mandatory_arg \
39 #define foreach_eal_single_hyphen_arg \
41 _(mem-alloc-request, m) \
44 /* These args are preceeded by "--" and followed by a single string */
45 #define foreach_eal_double_hyphen_arg \
52 dpdk_rx_burst ( dpdk_main_t * dm, dpdk_device_t * xd, u16 queue_id)
58 n_left = VLIB_FRAME_SIZE;
61 if (PREDICT_TRUE(xd->dev_type == VNET_DPDK_DEV_ETH))
65 n_this_chunk = rte_eth_rx_burst (xd->device_index, queue_id,
66 xd->rx_vectors[queue_id] + n_buffers, n_left);
67 n_buffers += n_this_chunk;
68 n_left -= n_this_chunk;
70 /* Empirically, DPDK r1.8 produces vectors w/ 32 or fewer elts */
71 if (n_this_chunk < 32)
75 else if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER)
77 vlib_main_t * vm = vlib_get_main();
78 vlib_buffer_main_t * bm = vm->buffer_main;
79 unsigned socket_id = rte_socket_id();
82 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
83 offset = queue_id * VIRTIO_QNUM;
85 struct vhost_virtqueue *vq =
86 xd->vu_vhost_dev.virtqueue[offset + VIRTIO_TXQ];
88 if (PREDICT_FALSE(!vq->enabled))
91 if (PREDICT_FALSE(!xd->vu_is_running))
95 struct rte_mbuf **pkts = xd->rx_vectors[queue_id];
97 n_this_chunk = rte_vhost_dequeue_burst(&xd->vu_vhost_dev,
99 bm->pktmbuf_pools[socket_id],
102 n_buffers += n_this_chunk;
103 n_left -= n_this_chunk;
104 if (n_this_chunk == 0)
108 int i; u32 bytes = 0;
109 for (i = 0; i < n_buffers; i++) {
110 struct rte_mbuf *buff = pkts[i];
111 bytes += rte_pktmbuf_data_len(buff);
114 f64 now = vlib_time_now (vm);
116 dpdk_vu_vring *vring = NULL;
117 /* send pending interrupts if needed */
118 if (dpdk_vhost_user_want_interrupt(xd, offset + VIRTIO_TXQ)) {
119 vring = &(xd->vu_intf->vrings[offset + VIRTIO_TXQ]);
120 vring->n_since_last_int += n_buffers;
122 if ((vring->n_since_last_int && (vring->int_deadline < now))
123 || (vring->n_since_last_int > dm->conf->vhost_coalesce_frames))
124 dpdk_vhost_user_send_interrupt(vm, xd, offset + VIRTIO_TXQ);
127 vring = &(xd->vu_intf->vrings[offset + VIRTIO_RXQ]);
128 vring->packets += n_buffers;
129 vring->bytes += bytes;
131 if (dpdk_vhost_user_want_interrupt(xd, offset + VIRTIO_RXQ)) {
132 if (vring->n_since_last_int && (vring->int_deadline < now))
133 dpdk_vhost_user_send_interrupt(vm, xd, offset + VIRTIO_RXQ);
137 #ifdef RTE_LIBRTE_KNI
138 else if (xd->dev_type == VNET_DPDK_DEV_KNI)
140 n_buffers = rte_kni_rx_burst(xd->kni, xd->rx_vectors[queue_id], VLIB_FRAME_SIZE);
141 rte_kni_handle_request(xd->kni);
154 dpdk_get_xstats (dpdk_device_t * xd)
157 if ((len = rte_eth_xstats_get(xd->device_index, NULL, 0)) > 0)
159 vec_validate(xd->xstats, len - 1);
160 vec_validate(xd->last_cleared_xstats, len - 1);
162 len = rte_eth_xstats_get(xd->device_index, xd->xstats, vec_len(xd->xstats));
164 ASSERT(vec_len(xd->xstats) == len);
165 ASSERT(vec_len(xd->last_cleared_xstats) == len);
167 _vec_len(xd->xstats) = len;
168 _vec_len(xd->last_cleared_xstats) = len;
175 dpdk_update_counters (dpdk_device_t * xd, f64 now)
177 vlib_simple_counter_main_t * cm;
178 vnet_main_t * vnm = vnet_get_main();
179 u32 my_cpu = os_get_cpu_number();
180 u64 rxerrors, last_rxerrors;
182 /* only update counters for PMD interfaces */
183 if (xd->dev_type != VNET_DPDK_DEV_ETH)
187 * DAW-FIXME: VMXNET3 device stop/start doesn't work,
188 * therefore fake the stop in the dpdk driver by
189 * silently dropping all of the incoming pkts instead of
190 * stopping the driver / hardware.
192 if (xd->admin_up != 0xff)
194 xd->time_last_stats_update = now ? now : xd->time_last_stats_update;
195 clib_memcpy (&xd->last_stats, &xd->stats, sizeof (xd->last_stats));
196 rte_eth_stats_get (xd->device_index, &xd->stats);
198 /* maybe bump interface rx no buffer counter */
199 if (PREDICT_FALSE (xd->stats.rx_nombuf != xd->last_stats.rx_nombuf))
201 cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
202 VNET_INTERFACE_COUNTER_RX_NO_BUF);
204 vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index,
205 xd->stats.rx_nombuf -
206 xd->last_stats.rx_nombuf);
209 /* missed pkt counter */
210 if (PREDICT_FALSE (xd->stats.imissed != xd->last_stats.imissed))
212 cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
213 VNET_INTERFACE_COUNTER_RX_MISS);
215 vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index,
217 xd->last_stats.imissed);
219 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
220 rxerrors = xd->stats.ierrors;
221 last_rxerrors = xd->last_stats.ierrors;
223 rxerrors = xd->stats.ibadcrc
224 + xd->stats.ibadlen + xd->stats.ierrors;
225 last_rxerrors = xd->last_stats.ibadcrc
226 + xd->last_stats.ibadlen + xd->last_stats.ierrors;
229 if (PREDICT_FALSE (rxerrors != last_rxerrors))
231 cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
232 VNET_INTERFACE_COUNTER_RX_ERROR);
234 vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index,
235 rxerrors - last_rxerrors);