2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/vnet.h>
17 #include <vppinfra/vec.h>
18 #include <vppinfra/format.h>
19 #include <vppinfra/file.h>
20 #include <vlib/unix/unix.h>
23 #include <vnet/ip/ip.h>
24 #include <vnet/ethernet/ethernet.h>
25 #include <vnet/ethernet/arp_packet.h>
26 #include <vnet/interface/rx_queue_funcs.h>
27 #include <dpdk/buffer.h>
28 #include <dpdk/device/dpdk.h>
29 #include <dpdk/device/dpdk_priv.h>
30 #include <vppinfra/error.h>
32 /* DPDK TX offload to vnet hw interface caps mapppings */
36 vnet_hw_if_caps_t caps;
37 } tx_off_caps_map[] = {
38 { DEV_TX_OFFLOAD_IPV4_CKSUM, VNET_HW_IF_CAP_TX_IP4_CKSUM },
39 { DEV_TX_OFFLOAD_TCP_CKSUM, VNET_HW_IF_CAP_TX_TCP_CKSUM },
40 { DEV_TX_OFFLOAD_UDP_CKSUM, VNET_HW_IF_CAP_TX_UDP_CKSUM },
41 { DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, VNET_HW_IF_CAP_TX_IP4_OUTER_CKSUM },
42 { DEV_TX_OFFLOAD_OUTER_UDP_CKSUM, VNET_HW_IF_CAP_TX_UDP_OUTER_CKSUM },
43 { DEV_TX_OFFLOAD_TCP_TSO, VNET_HW_IF_CAP_TCP_GSO },
44 { DEV_TX_OFFLOAD_VXLAN_TNL_TSO, VNET_HW_IF_CAP_VXLAN_TNL_GSO }
48 dpdk_device_error (dpdk_device_t * xd, char *str, int rv)
50 dpdk_log_err ("Interface %U error %d: %s",
51 format_dpdk_device_name, xd->port_id, rv, rte_strerror (rv));
52 xd->errors = clib_error_return (xd->errors, "%s[port:%d, errno:%d]: %s",
53 str, xd->port_id, rv, rte_strerror (rv));
57 dpdk_device_setup (dpdk_device_t * xd)
59 vlib_main_t *vm = vlib_get_main ();
60 vnet_main_t *vnm = vnet_get_main ();
61 vlib_thread_main_t *tm = vlib_get_thread_main ();
62 vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, xd->sw_if_index);
63 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, xd->hw_if_index);
64 vnet_hw_if_caps_change_t caps = {};
65 struct rte_eth_dev_info dev_info;
66 struct rte_eth_conf conf = {};
72 ASSERT (vlib_get_thread_index () == 0);
74 clib_error_free (xd->errors);
75 sw->flags &= ~VNET_SW_INTERFACE_FLAG_ERROR;
77 if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP)
79 vnet_hw_interface_set_flags (vnm, xd->hw_if_index, 0);
80 dpdk_device_stop (xd);
83 rte_eth_dev_info_get (xd->port_id, &dev_info);
85 dpdk_log_debug ("[%u] configuring device %U", xd->port_id,
86 format_dpdk_rte_device, dev_info.device);
88 /* create rx and tx offload wishlist */
89 rxo = DEV_RX_OFFLOAD_IPV4_CKSUM;
92 if (xd->conf.enable_tcp_udp_checksum)
93 rxo |= DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM;
95 if (xd->conf.disable_tx_checksum_offload == 0 &&
96 xd->conf.enable_outer_checksum_offload)
97 txo |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_OUTER_UDP_CKSUM;
99 if (xd->conf.disable_tx_checksum_offload == 0)
100 txo |= DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM |
101 DEV_TX_OFFLOAD_UDP_CKSUM;
103 if (xd->conf.disable_multi_seg == 0)
105 txo |= DEV_TX_OFFLOAD_MULTI_SEGS;
106 rxo |= DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_SCATTER;
109 if (xd->conf.enable_lro)
110 rxo |= DEV_RX_OFFLOAD_TCP_LRO;
112 /* per-device offload config */
113 if (xd->conf.enable_tso)
114 txo |= DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO |
115 DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
117 if (xd->conf.disable_rx_scatter)
118 rxo &= ~DEV_RX_OFFLOAD_SCATTER;
120 /* mask unsupported offloads */
121 rxo &= dev_info.rx_offload_capa;
122 txo &= dev_info.tx_offload_capa;
124 dpdk_log_debug ("[%u] Supported RX offloads: %U", xd->port_id,
125 format_dpdk_rx_offload_caps, dev_info.rx_offload_capa);
126 dpdk_log_debug ("[%u] Configured RX offloads: %U", xd->port_id,
127 format_dpdk_rx_offload_caps, rxo);
128 dpdk_log_debug ("[%u] Supported TX offloads: %U", xd->port_id,
129 format_dpdk_tx_offload_caps, dev_info.tx_offload_capa);
130 dpdk_log_debug ("[%u] Configured TX offloads: %U", xd->port_id,
131 format_dpdk_tx_offload_caps, txo);
133 /* Enable flow director when flows exist */
134 if (xd->supported_flow_actions &&
135 (xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0)
136 conf.fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
138 /* finalize configuration */
139 conf.rxmode.offloads = rxo;
140 conf.txmode.offloads = txo;
141 if (rxo & DEV_RX_OFFLOAD_TCP_LRO)
142 conf.rxmode.max_lro_pkt_size = xd->conf.max_lro_pkt_size;
144 if (xd->conf.enable_lsc_int)
145 conf.intr_conf.lsc = 1;
146 if (xd->conf.enable_rxq_int)
147 conf.intr_conf.rxq = 1;
149 conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
150 if (xd->conf.n_rx_queues > 1)
152 if (xd->conf.disable_rss == 0)
154 conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
155 conf.rx_adv_conf.rss_conf.rss_hf = xd->conf.rss_hf;
159 if (rxo & DEV_RX_OFFLOAD_JUMBO_FRAME)
160 conf.rxmode.max_rx_pkt_len =
161 clib_min (ETHERNET_MAX_PACKET_BYTES, dev_info.max_rx_pktlen);
163 rv = rte_eth_dev_configure (xd->port_id, xd->conf.n_rx_queues,
164 xd->conf.n_tx_queues, &conf);
168 dpdk_device_error (xd, "rte_eth_dev_configure", rv);
172 rte_eth_dev_get_mtu (xd->port_id, &mtu);
173 dpdk_log_debug ("[%u] device default mtu %u", xd->port_id, mtu);
175 hi->max_supported_packet_bytes = mtu;
176 if (hi->max_packet_bytes > mtu)
178 vnet_hw_interface_set_mtu (vnm, xd->hw_if_index, mtu);
182 rte_eth_dev_set_mtu (xd->port_id, hi->max_packet_bytes);
183 dpdk_log_debug ("[%u] port mtu set to %u", xd->port_id,
184 hi->max_packet_bytes);
187 vec_validate_aligned (xd->tx_queues, xd->conf.n_tx_queues - 1,
188 CLIB_CACHE_LINE_BYTES);
189 for (j = 0; j < xd->conf.n_tx_queues; j++)
191 rv = rte_eth_tx_queue_setup (xd->port_id, j, xd->conf.n_tx_desc,
194 /* retry with any other CPU socket */
196 rv = rte_eth_tx_queue_setup (xd->port_id, j, xd->conf.n_tx_desc,
199 dpdk_device_error (xd, "rte_eth_tx_queue_setup", rv);
201 if (xd->conf.n_tx_queues < tm->n_vlib_mains)
202 clib_spinlock_init (&vec_elt (xd->tx_queues, j).lock);
205 vec_validate_aligned (xd->rx_queues, xd->conf.n_rx_queues - 1,
206 CLIB_CACHE_LINE_BYTES);
208 for (j = 0; j < xd->conf.n_rx_queues; j++)
210 dpdk_rx_queue_t *rxq = vec_elt_at_index (xd->rx_queues, j);
211 u8 bpidx = vlib_buffer_pool_get_default_for_numa (
212 vm, vnet_hw_if_get_rx_queue_numa_node (vnm, rxq->queue_index));
213 vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, bpidx);
214 struct rte_mempool *mp = dpdk_mempool_by_buffer_pool_index[bpidx];
216 rv = rte_eth_rx_queue_setup (xd->port_id, j, xd->conf.n_rx_desc,
217 xd->cpu_socket, 0, mp);
219 /* retry with any other CPU socket */
221 rv = rte_eth_rx_queue_setup (xd->port_id, j, xd->conf.n_rx_desc,
222 SOCKET_ID_ANY, 0, mp);
224 rxq->buffer_pool_index = bp->index;
227 dpdk_device_error (xd, "rte_eth_rx_queue_setup", rv);
230 if (vec_len (xd->errors))
234 (VLIB_BUFFER_TOTAL_LENGTH_VALID | VLIB_BUFFER_EXT_HDR_VALID);
236 if ((rxo & (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) ==
237 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
239 (VNET_BUFFER_F_L4_CHECKSUM_COMPUTED | VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
241 dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_RX_IP4_CKSUM,
242 rxo & DEV_RX_OFFLOAD_IPV4_CKSUM);
243 dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_MAYBE_MULTISEG,
244 rxo & DEV_RX_OFFLOAD_SCATTER);
245 dpdk_device_flag_set (
246 xd, DPDK_DEVICE_FLAG_TX_OFFLOAD,
247 (txo & (DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM)) ==
248 (DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM));
250 /* unconditionally set mac filtering cap */
251 caps.val = caps.mask = VNET_HW_IF_CAP_MAC_FILTER;
253 ethernet_set_flags (vnm, xd->hw_if_index,
254 ETHERNET_INTERFACE_FLAG_DEFAULT_L3);
256 for (int i = 0; i < ARRAY_LEN (tx_off_caps_map); i++)
258 __typeof__ (tx_off_caps_map[0]) *v = tx_off_caps_map + i;
259 caps.mask |= v->caps;
260 if ((v->offload & txo) == v->offload)
264 vnet_hw_if_change_caps (vnm, xd->hw_if_index, &caps);
265 xd->enabled_rx_off = rxo;
266 xd->enabled_tx_off = txo;
268 if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP)
269 dpdk_device_start (xd);
271 if (vec_len (xd->errors))
277 xd->flags |= DPDK_DEVICE_FLAG_PMD_INIT_FAIL;
278 sw->flags |= VNET_SW_INTERFACE_FLAG_ERROR;
281 static clib_error_t *
282 dpdk_rx_read_ready (clib_file_t *uf)
284 vnet_main_t *vnm = vnet_get_main ();
285 dpdk_main_t *dm = &dpdk_main;
286 u32 qidx = uf->private_data;
287 vnet_hw_if_rx_queue_t *rxq = vnet_hw_if_get_rx_queue (vnm, qidx);
288 dpdk_device_t *xd = vec_elt_at_index (dm->devices, rxq->dev_instance);
291 CLIB_UNUSED (ssize_t size) = read (uf->file_descriptor, &b, sizeof (b));
292 if (rxq->mode != VNET_HW_IF_RX_MODE_POLLING)
294 vnet_hw_if_rx_queue_set_int_pending (vnm, uf->private_data);
295 rte_eth_dev_rx_intr_enable (xd->port_id, rxq->queue_id);
302 dpdk_setup_interrupts (dpdk_device_t *xd)
304 vnet_main_t *vnm = vnet_get_main ();
305 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, xd->hw_if_index);
310 if (!xd->conf.enable_rxq_int)
313 /* Probe for interrupt support */
314 if (rte_eth_dev_rx_intr_enable (xd->port_id, 0))
316 dpdk_log_info ("probe for interrupt mode for device %U. Failed.\n",
317 format_dpdk_device_name, xd->port_id);
321 xd->flags |= DPDK_DEVICE_FLAG_INT_SUPPORTED;
322 if (!(xd->flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE))
323 rte_eth_dev_rx_intr_disable (xd->port_id, 0);
324 dpdk_log_info ("Probe for interrupt mode for device %U. Success.\n",
325 format_dpdk_device_name, xd->port_id);
328 if (xd->flags & DPDK_DEVICE_FLAG_INT_SUPPORTED)
331 for (int q = 0; q < xd->conf.n_rx_queues; q++)
333 dpdk_rx_queue_t *rxq = vec_elt_at_index (xd->rx_queues, q);
334 clib_file_t f = { 0 };
335 rxq->efd = rte_eth_dev_rx_intr_ctl_q_get_fd (xd->port_id, q);
338 xd->flags &= ~DPDK_DEVICE_FLAG_INT_SUPPORTED;
342 f.read_function = dpdk_rx_read_ready;
343 f.flags = UNIX_FILE_EVENT_EDGE_TRIGGERED;
344 f.file_descriptor = rxq->efd;
345 f.private_data = rxq->queue_index;
347 format (0, "%U queue %u", format_dpdk_device_name, xd->port_id, q);
348 rxq->clib_file_index = clib_file_add (&file_main, &f);
349 vnet_hw_if_set_rx_queue_file_index (vnm, rxq->queue_index,
350 rxq->clib_file_index);
351 if (xd->flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE)
353 clib_file_main_t *fm = &file_main;
355 pool_elt_at_index (fm->file_pool, rxq->clib_file_index);
356 fm->file_update (f, UNIX_FILE_UPDATE_DELETE);
362 vnet_hw_if_set_caps (vnm, hi->hw_if_index, VNET_HW_IF_CAP_INT_MODE);
364 vnet_hw_if_unset_caps (vnm, hi->hw_if_index, VNET_HW_IF_CAP_INT_MODE);
365 vnet_hw_if_update_runtime_data (vnm, xd->hw_if_index);
369 dpdk_device_start (dpdk_device_t * xd)
373 if (xd->flags & DPDK_DEVICE_FLAG_PMD_INIT_FAIL)
376 rv = rte_eth_dev_start (xd->port_id);
380 dpdk_device_error (xd, "rte_eth_dev_start", rv);
384 dpdk_log_debug ("[%u] RX burst function: %U", xd->port_id,
385 format_dpdk_burst_fn, xd, VLIB_RX);
386 dpdk_log_debug ("[%u] TX burst function: %U", xd->port_id,
387 format_dpdk_burst_fn, xd, VLIB_TX);
389 dpdk_setup_interrupts (xd);
391 if (xd->default_mac_address)
392 rv = rte_eth_dev_default_mac_addr_set (xd->port_id,
393 (void *) xd->default_mac_address);
396 dpdk_device_error (xd, "rte_eth_dev_default_mac_addr_set", rv);
398 if (xd->flags & DPDK_DEVICE_FLAG_PROMISC)
399 rte_eth_promiscuous_enable (xd->port_id);
401 rte_eth_promiscuous_disable (xd->port_id);
403 rte_eth_allmulticast_enable (xd->port_id);
405 dpdk_log_info ("Interface %U started",
406 format_dpdk_device_name, xd->port_id);
410 dpdk_device_stop (dpdk_device_t * xd)
412 if (xd->flags & DPDK_DEVICE_FLAG_PMD_INIT_FAIL)
415 rte_eth_allmulticast_disable (xd->port_id);
416 rte_eth_dev_stop (xd->port_id);
417 clib_memset (&xd->link, 0, sizeof (struct rte_eth_link));
419 dpdk_log_info ("Interface %U stopped",
420 format_dpdk_device_name, xd->port_id);
423 void vl_api_force_rpc_call_main_thread (void *fp, u8 * data, u32 data_length);
426 dpdk_port_state_callback_inline (dpdk_portid_t port_id,
427 enum rte_eth_event_type type, void *param)
429 struct rte_eth_link link;
431 RTE_SET_USED (param);
432 if (type != RTE_ETH_EVENT_INTR_LSC)
434 dpdk_log_info ("Unknown event %d received for port %d", type, port_id);
438 rte_eth_link_get_nowait (port_id, &link);
439 u8 link_up = link.link_status;
441 dpdk_log_info ("Port %d Link Up - speed %u Mbps - %s",
442 port_id, (unsigned) link.link_speed,
443 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
444 "full-duplex" : "half-duplex");
446 dpdk_log_info ("Port %d Link Down\n\n", port_id);
452 dpdk_port_state_callback (dpdk_portid_t port_id,
453 enum rte_eth_event_type type,
455 void *ret_param __attribute__ ((unused)))
457 return dpdk_port_state_callback_inline (port_id, type, param);
460 /* If this device is PCI return pointer to info, otherwise NULL */
461 struct rte_pci_device *
462 dpdk_get_pci_device (const struct rte_eth_dev_info *info)
464 const struct rte_bus *bus;
466 bus = rte_bus_find_by_device (info->device);
467 if (bus && !strcmp (bus->name, "pci"))
468 return RTE_DEV_TO_PCI (info->device);
473 /* If this device is VMBUS return pointer to info, otherwise NULL */
474 struct rte_vmbus_device *
475 dpdk_get_vmbus_device (const struct rte_eth_dev_info *info)
477 const struct rte_bus *bus;
479 bus = rte_bus_find_by_device (info->device);
480 if (bus && !strcmp (bus->name, "vmbus"))
481 return container_of (info->device, struct rte_vmbus_device, device);
487 * fd.io coding-style-patch-verification: ON
490 * eval: (c-set-style "gnu")