2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/vnet.h>
17 #include <vppinfra/vec.h>
18 #include <vppinfra/format.h>
19 #include <vppinfra/file.h>
20 #include <vlib/unix/unix.h>
23 #include <vnet/ip/ip.h>
24 #include <vnet/ethernet/ethernet.h>
25 #include <vnet/ethernet/arp_packet.h>
26 #include <vnet/interface/rx_queue_funcs.h>
27 #include <dpdk/buffer.h>
28 #include <dpdk/device/dpdk.h>
29 #include <dpdk/device/dpdk_priv.h>
30 #include <vppinfra/error.h>
32 /* DPDK TX offload to vnet hw interface caps mapppings */
36 vnet_hw_if_caps_t caps;
37 } tx_off_caps_map[] = {
38 { DEV_TX_OFFLOAD_IPV4_CKSUM, VNET_HW_IF_CAP_TX_IP4_CKSUM },
39 { DEV_TX_OFFLOAD_TCP_CKSUM, VNET_HW_IF_CAP_TX_TCP_CKSUM },
40 { DEV_TX_OFFLOAD_UDP_CKSUM, VNET_HW_IF_CAP_TX_UDP_CKSUM },
41 { DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, VNET_HW_IF_CAP_TX_IP4_OUTER_CKSUM },
42 { DEV_TX_OFFLOAD_OUTER_UDP_CKSUM, VNET_HW_IF_CAP_TX_UDP_OUTER_CKSUM },
43 { DEV_TX_OFFLOAD_TCP_TSO, VNET_HW_IF_CAP_TCP_GSO },
44 { DEV_TX_OFFLOAD_VXLAN_TNL_TSO, VNET_HW_IF_CAP_VXLAN_TNL_GSO }
48 dpdk_device_error (dpdk_device_t * xd, char *str, int rv)
50 dpdk_log_err ("Interface %U error %d: %s",
51 format_dpdk_device_name, xd->port_id, rv, rte_strerror (rv));
52 xd->errors = clib_error_return (xd->errors, "%s[port:%d, errno:%d]: %s",
53 str, xd->port_id, rv, rte_strerror (rv));
57 dpdk_device_setup (dpdk_device_t * xd)
59 vlib_main_t *vm = vlib_get_main ();
60 vnet_main_t *vnm = vnet_get_main ();
61 vlib_thread_main_t *tm = vlib_get_thread_main ();
62 vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, xd->sw_if_index);
63 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, xd->hw_if_index);
64 vnet_hw_if_caps_change_t caps = {};
65 struct rte_eth_dev_info dev_info;
66 struct rte_eth_conf conf = {};
72 ASSERT (vlib_get_thread_index () == 0);
74 clib_error_free (xd->errors);
75 sw->flags &= ~VNET_SW_INTERFACE_FLAG_ERROR;
77 if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP)
79 vnet_hw_interface_set_flags (vnm, xd->hw_if_index, 0);
80 dpdk_device_stop (xd);
83 rte_eth_dev_info_get (xd->port_id, &dev_info);
85 dpdk_log_debug ("[%u] configuring device %U", xd->port_id,
86 format_dpdk_rte_device, dev_info.device);
88 /* create rx and tx offload wishlist */
89 rxo = DEV_RX_OFFLOAD_IPV4_CKSUM;
92 if (xd->conf.enable_tcp_udp_checksum)
93 rxo |= DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM;
95 if (xd->conf.disable_tx_checksum_offload == 0 &&
96 xd->conf.enable_outer_checksum_offload)
97 txo |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
99 if (xd->conf.disable_tx_checksum_offload == 0)
100 txo |= DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM |
101 DEV_TX_OFFLOAD_UDP_CKSUM;
103 if (xd->conf.disable_multi_seg == 0)
105 txo |= DEV_TX_OFFLOAD_MULTI_SEGS;
106 rxo |= DEV_RX_OFFLOAD_SCATTER;
107 #if RTE_VERSION < RTE_VERSION_NUM(21, 11, 0, 0)
108 rxo |= DEV_RX_OFFLOAD_JUMBO_FRAME;
112 if (xd->conf.enable_lro)
113 rxo |= DEV_RX_OFFLOAD_TCP_LRO;
115 /* per-device offload config */
116 if (xd->conf.enable_tso)
117 txo |= DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO |
118 DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
120 if (xd->conf.disable_rx_scatter)
121 rxo &= ~DEV_RX_OFFLOAD_SCATTER;
123 /* mask unsupported offloads */
124 rxo &= dev_info.rx_offload_capa;
125 txo &= dev_info.tx_offload_capa;
127 dpdk_log_debug ("[%u] Supported RX offloads: %U", xd->port_id,
128 format_dpdk_rx_offload_caps, dev_info.rx_offload_capa);
129 dpdk_log_debug ("[%u] Configured RX offloads: %U", xd->port_id,
130 format_dpdk_rx_offload_caps, rxo);
131 dpdk_log_debug ("[%u] Supported TX offloads: %U", xd->port_id,
132 format_dpdk_tx_offload_caps, dev_info.tx_offload_capa);
133 dpdk_log_debug ("[%u] Configured TX offloads: %U", xd->port_id,
134 format_dpdk_tx_offload_caps, txo);
136 /* Enable flow director when flows exist */
137 if (xd->supported_flow_actions &&
138 (xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0)
139 conf.fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
141 /* finalize configuration */
142 conf.rxmode.offloads = rxo;
143 conf.txmode.offloads = txo;
144 if (rxo & DEV_RX_OFFLOAD_TCP_LRO)
145 conf.rxmode.max_lro_pkt_size = xd->conf.max_lro_pkt_size;
147 if (xd->conf.enable_lsc_int)
148 conf.intr_conf.lsc = 1;
149 if (xd->conf.enable_rxq_int)
150 conf.intr_conf.rxq = 1;
152 conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
153 if (xd->conf.n_rx_queues > 1)
155 if (xd->conf.disable_rss == 0)
157 conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
158 conf.rx_adv_conf.rss_conf.rss_hf = xd->conf.rss_hf;
162 #if RTE_VERSION < RTE_VERSION_NUM(21, 11, 0, 0)
163 if (rxo & DEV_RX_OFFLOAD_JUMBO_FRAME)
164 conf.rxmode.max_rx_pkt_len =
165 clib_min (ETHERNET_MAX_PACKET_BYTES, dev_info.max_rx_pktlen);
167 dpdk_log_debug ("[%u] min_mtu: %u, max_mtu: %u, min_rx_bufsize: %u, "
168 "max_rx_pktlen: %u, max_lro_pkt_size: %u",
169 xd->port_id, dev_info.min_mtu, dev_info.max_mtu,
170 dev_info.min_rx_bufsize, dev_info.max_rx_pktlen,
171 dev_info.max_lro_pkt_size);
173 mtu = xd->conf.disable_multi_seg ? 2000 : ETHERNET_MAX_PACKET_BYTES;
174 conf.rxmode.mtu = clib_min (mtu, dev_info.max_rx_pktlen);
178 rv = rte_eth_dev_configure (xd->port_id, xd->conf.n_rx_queues,
179 xd->conf.n_tx_queues, &conf);
181 if (rv < 0 && conf.intr_conf.rxq)
183 conf.intr_conf.rxq = 0;
189 dpdk_device_error (xd, "rte_eth_dev_configure", rv);
193 rte_eth_dev_get_mtu (xd->port_id, &mtu);
194 dpdk_log_debug ("[%u] device default mtu %u", xd->port_id, mtu);
196 hi->max_supported_packet_bytes = mtu;
197 if (hi->max_packet_bytes > mtu)
199 vnet_hw_interface_set_mtu (vnm, xd->hw_if_index, mtu);
203 rte_eth_dev_set_mtu (xd->port_id, hi->max_packet_bytes);
204 dpdk_log_debug ("[%u] port mtu set to %u", xd->port_id,
205 hi->max_packet_bytes);
208 vec_validate_aligned (xd->tx_queues, xd->conf.n_tx_queues - 1,
209 CLIB_CACHE_LINE_BYTES);
210 for (j = 0; j < xd->conf.n_tx_queues; j++)
212 rv = rte_eth_tx_queue_setup (xd->port_id, j, xd->conf.n_tx_desc,
215 /* retry with any other CPU socket */
217 rv = rte_eth_tx_queue_setup (xd->port_id, j, xd->conf.n_tx_desc,
220 dpdk_device_error (xd, "rte_eth_tx_queue_setup", rv);
222 if (xd->conf.n_tx_queues < tm->n_vlib_mains)
223 clib_spinlock_init (&vec_elt (xd->tx_queues, j).lock);
226 vec_validate_aligned (xd->rx_queues, xd->conf.n_rx_queues - 1,
227 CLIB_CACHE_LINE_BYTES);
229 for (j = 0; j < xd->conf.n_rx_queues; j++)
231 dpdk_rx_queue_t *rxq = vec_elt_at_index (xd->rx_queues, j);
232 u8 bpidx = vlib_buffer_pool_get_default_for_numa (
233 vm, vnet_hw_if_get_rx_queue_numa_node (vnm, rxq->queue_index));
234 vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, bpidx);
235 struct rte_mempool *mp = dpdk_mempool_by_buffer_pool_index[bpidx];
237 rv = rte_eth_rx_queue_setup (xd->port_id, j, xd->conf.n_rx_desc,
238 xd->cpu_socket, 0, mp);
240 /* retry with any other CPU socket */
242 rv = rte_eth_rx_queue_setup (xd->port_id, j, xd->conf.n_rx_desc,
243 SOCKET_ID_ANY, 0, mp);
245 rxq->buffer_pool_index = bp->index;
248 dpdk_device_error (xd, "rte_eth_rx_queue_setup", rv);
251 if (vec_len (xd->errors))
255 (VLIB_BUFFER_TOTAL_LENGTH_VALID | VLIB_BUFFER_EXT_HDR_VALID);
257 if ((rxo & (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) ==
258 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
260 (VNET_BUFFER_F_L4_CHECKSUM_COMPUTED | VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
262 dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_RX_IP4_CKSUM,
263 rxo & DEV_RX_OFFLOAD_IPV4_CKSUM);
264 dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_MAYBE_MULTISEG,
265 rxo & DEV_RX_OFFLOAD_SCATTER);
266 dpdk_device_flag_set (
267 xd, DPDK_DEVICE_FLAG_TX_OFFLOAD,
268 (txo & (DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM)) ==
269 (DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM));
271 /* unconditionally set mac filtering cap */
272 caps.val = caps.mask = VNET_HW_IF_CAP_MAC_FILTER;
274 ethernet_set_flags (vnm, xd->hw_if_index,
275 ETHERNET_INTERFACE_FLAG_DEFAULT_L3);
277 for (int i = 0; i < ARRAY_LEN (tx_off_caps_map); i++)
279 __typeof__ (tx_off_caps_map[0]) *v = tx_off_caps_map + i;
280 caps.mask |= v->caps;
281 if ((v->offload & txo) == v->offload)
285 vnet_hw_if_change_caps (vnm, xd->hw_if_index, &caps);
286 xd->enabled_rx_off = rxo;
287 xd->enabled_tx_off = txo;
289 if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP)
290 dpdk_device_start (xd);
292 if (vec_len (xd->errors))
298 xd->flags |= DPDK_DEVICE_FLAG_PMD_INIT_FAIL;
299 sw->flags |= VNET_SW_INTERFACE_FLAG_ERROR;
302 static clib_error_t *
303 dpdk_rx_read_ready (clib_file_t *uf)
305 vnet_main_t *vnm = vnet_get_main ();
306 dpdk_main_t *dm = &dpdk_main;
307 u32 qidx = uf->private_data;
308 vnet_hw_if_rx_queue_t *rxq = vnet_hw_if_get_rx_queue (vnm, qidx);
309 dpdk_device_t *xd = vec_elt_at_index (dm->devices, rxq->dev_instance);
312 CLIB_UNUSED (ssize_t size) = read (uf->file_descriptor, &b, sizeof (b));
313 if (rxq->mode != VNET_HW_IF_RX_MODE_POLLING)
315 vnet_hw_if_rx_queue_set_int_pending (vnm, uf->private_data);
316 rte_eth_dev_rx_intr_enable (xd->port_id, rxq->queue_id);
323 dpdk_setup_interrupts (dpdk_device_t *xd)
325 vnet_main_t *vnm = vnet_get_main ();
326 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, xd->hw_if_index);
331 if (!xd->conf.enable_rxq_int)
334 /* Probe for interrupt support */
335 if (rte_eth_dev_rx_intr_enable (xd->port_id, 0))
337 dpdk_log_info ("probe for interrupt mode for device %U. Failed.\n",
338 format_dpdk_device_name, xd->port_id);
342 xd->flags |= DPDK_DEVICE_FLAG_INT_SUPPORTED;
343 if (!(xd->flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE))
344 rte_eth_dev_rx_intr_disable (xd->port_id, 0);
345 dpdk_log_info ("Probe for interrupt mode for device %U. Success.\n",
346 format_dpdk_device_name, xd->port_id);
349 if (xd->flags & DPDK_DEVICE_FLAG_INT_SUPPORTED)
352 for (int q = 0; q < xd->conf.n_rx_queues; q++)
354 dpdk_rx_queue_t *rxq = vec_elt_at_index (xd->rx_queues, q);
355 clib_file_t f = { 0 };
356 rxq->efd = rte_eth_dev_rx_intr_ctl_q_get_fd (xd->port_id, q);
359 xd->flags &= ~DPDK_DEVICE_FLAG_INT_SUPPORTED;
363 f.read_function = dpdk_rx_read_ready;
364 f.flags = UNIX_FILE_EVENT_EDGE_TRIGGERED;
365 f.file_descriptor = rxq->efd;
366 f.private_data = rxq->queue_index;
368 format (0, "%U queue %u", format_dpdk_device_name, xd->port_id, q);
369 rxq->clib_file_index = clib_file_add (&file_main, &f);
370 vnet_hw_if_set_rx_queue_file_index (vnm, rxq->queue_index,
371 rxq->clib_file_index);
372 if (xd->flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE)
374 clib_file_main_t *fm = &file_main;
376 pool_elt_at_index (fm->file_pool, rxq->clib_file_index);
377 fm->file_update (f, UNIX_FILE_UPDATE_DELETE);
383 vnet_hw_if_set_caps (vnm, hi->hw_if_index, VNET_HW_IF_CAP_INT_MODE);
385 vnet_hw_if_unset_caps (vnm, hi->hw_if_index, VNET_HW_IF_CAP_INT_MODE);
386 vnet_hw_if_update_runtime_data (vnm, xd->hw_if_index);
390 dpdk_device_start (dpdk_device_t * xd)
394 if (xd->flags & DPDK_DEVICE_FLAG_PMD_INIT_FAIL)
397 rv = rte_eth_dev_start (xd->port_id);
401 dpdk_device_error (xd, "rte_eth_dev_start", rv);
405 dpdk_log_debug ("[%u] RX burst function: %U", xd->port_id,
406 format_dpdk_burst_fn, xd, VLIB_RX);
407 dpdk_log_debug ("[%u] TX burst function: %U", xd->port_id,
408 format_dpdk_burst_fn, xd, VLIB_TX);
410 dpdk_setup_interrupts (xd);
412 if (xd->default_mac_address)
413 rv = rte_eth_dev_default_mac_addr_set (xd->port_id,
414 (void *) xd->default_mac_address);
417 dpdk_device_error (xd, "rte_eth_dev_default_mac_addr_set", rv);
419 if (xd->flags & DPDK_DEVICE_FLAG_PROMISC)
420 rte_eth_promiscuous_enable (xd->port_id);
422 rte_eth_promiscuous_disable (xd->port_id);
424 rte_eth_allmulticast_enable (xd->port_id);
426 dpdk_log_info ("Interface %U started",
427 format_dpdk_device_name, xd->port_id);
431 dpdk_device_stop (dpdk_device_t * xd)
433 if (xd->flags & DPDK_DEVICE_FLAG_PMD_INIT_FAIL)
436 rte_eth_allmulticast_disable (xd->port_id);
437 rte_eth_dev_stop (xd->port_id);
438 clib_memset (&xd->link, 0, sizeof (struct rte_eth_link));
440 dpdk_log_info ("Interface %U stopped",
441 format_dpdk_device_name, xd->port_id);
444 void vl_api_force_rpc_call_main_thread (void *fp, u8 * data, u32 data_length);
447 dpdk_port_state_callback_inline (dpdk_portid_t port_id,
448 enum rte_eth_event_type type, void *param)
450 struct rte_eth_link link;
452 RTE_SET_USED (param);
453 if (type != RTE_ETH_EVENT_INTR_LSC)
455 dpdk_log_info ("Unknown event %d received for port %d", type, port_id);
459 rte_eth_link_get_nowait (port_id, &link);
460 u8 link_up = link.link_status;
462 dpdk_log_info ("Port %d Link Up - speed %u Mbps - %s",
463 port_id, (unsigned) link.link_speed,
464 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
465 "full-duplex" : "half-duplex");
467 dpdk_log_info ("Port %d Link Down\n\n", port_id);
473 dpdk_port_state_callback (dpdk_portid_t port_id,
474 enum rte_eth_event_type type,
476 void *ret_param __attribute__ ((unused)))
478 return dpdk_port_state_callback_inline (port_id, type, param);
481 /* If this device is PCI return pointer to info, otherwise NULL */
482 struct rte_pci_device *
483 dpdk_get_pci_device (const struct rte_eth_dev_info *info)
485 const struct rte_bus *bus;
487 bus = rte_bus_find_by_device (info->device);
488 if (bus && !strcmp (bus->name, "pci"))
489 return RTE_DEV_TO_PCI (info->device);
494 /* If this device is VMBUS return pointer to info, otherwise NULL */
495 struct rte_vmbus_device *
496 dpdk_get_vmbus_device (const struct rte_eth_dev_info *info)
498 const struct rte_bus *bus;
500 bus = rte_bus_find_by_device (info->device);
501 if (bus && !strcmp (bus->name, "vmbus"))
502 return container_of (info->device, struct rte_vmbus_device, device);
508 * fd.io coding-style-patch-verification: ON
511 * eval: (c-set-style "gnu")