2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/vnet.h>
17 #include <vppinfra/vec.h>
18 #include <vppinfra/format.h>
19 #include <vppinfra/file.h>
20 #include <vlib/unix/unix.h>
23 #include <vnet/ip/ip.h>
24 #include <vnet/ethernet/ethernet.h>
25 #include <vnet/ethernet/arp_packet.h>
26 #include <vnet/interface/rx_queue_funcs.h>
27 #include <dpdk/buffer.h>
28 #include <dpdk/device/dpdk.h>
29 #include <dpdk/device/dpdk_priv.h>
30 #include <vppinfra/error.h>
33 dpdk_device_error (dpdk_device_t * xd, char *str, int rv)
35 dpdk_log_err ("Interface %U error %d: %s",
36 format_dpdk_device_name, xd->port_id, rv, rte_strerror (rv));
37 xd->errors = clib_error_return (xd->errors, "%s[port:%d, errno:%d]: %s",
38 str, xd->port_id, rv, rte_strerror (rv));
42 dpdk_device_setup (dpdk_device_t * xd)
44 vlib_main_t *vm = vlib_get_main ();
45 vnet_main_t *vnm = vnet_get_main ();
46 vlib_thread_main_t *tm = vlib_get_thread_main ();
47 vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, xd->sw_if_index);
48 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, xd->hw_if_index);
49 struct rte_eth_dev_info dev_info;
54 ASSERT (vlib_get_thread_index () == 0);
56 clib_error_free (xd->errors);
57 sw->flags &= ~VNET_SW_INTERFACE_FLAG_ERROR;
59 if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP)
61 vnet_hw_interface_set_flags (vnm, xd->hw_if_index, 0);
62 dpdk_device_stop (xd);
65 /* Enable flow director when flows exist */
66 if (xd->pmd == VNET_DPDK_PMD_I40E)
68 if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0)
69 xd->port_conf.fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
71 xd->port_conf.fdir_conf.mode = RTE_FDIR_MODE_NONE;
74 rte_eth_dev_info_get (xd->port_id, &dev_info);
76 bitmap = xd->port_conf.txmode.offloads & ~dev_info.tx_offload_capa;
79 dpdk_log_warn ("unsupported tx offloads requested on port %u: %U",
80 xd->port_id, format_dpdk_tx_offload_caps, bitmap);
81 xd->port_conf.txmode.offloads ^= bitmap;
84 bitmap = xd->port_conf.rxmode.offloads & ~dev_info.rx_offload_capa;
87 dpdk_log_warn ("unsupported rx offloads requested on port %u: %U",
88 xd->port_id, format_dpdk_rx_offload_caps, bitmap);
89 xd->port_conf.rxmode.offloads ^= bitmap;
92 rv = rte_eth_dev_configure (xd->port_id, xd->conf.n_rx_queues,
93 xd->conf.n_tx_queues, &xd->port_conf);
97 dpdk_device_error (xd, "rte_eth_dev_configure", rv);
101 vec_validate_aligned (xd->tx_queues, xd->conf.n_tx_queues - 1,
102 CLIB_CACHE_LINE_BYTES);
103 for (j = 0; j < xd->conf.n_tx_queues; j++)
105 rv = rte_eth_tx_queue_setup (xd->port_id, j, xd->conf.n_tx_desc,
108 /* retry with any other CPU socket */
110 rv = rte_eth_tx_queue_setup (xd->port_id, j, xd->conf.n_tx_desc,
113 dpdk_device_error (xd, "rte_eth_tx_queue_setup", rv);
115 if (xd->conf.n_tx_queues < tm->n_vlib_mains)
116 clib_spinlock_init (&vec_elt (xd->tx_queues, j).lock);
119 vec_validate_aligned (xd->rx_queues, xd->conf.n_rx_queues - 1,
120 CLIB_CACHE_LINE_BYTES);
122 for (j = 0; j < xd->conf.n_rx_queues; j++)
124 dpdk_rx_queue_t *rxq = vec_elt_at_index (xd->rx_queues, j);
125 u8 bpidx = vlib_buffer_pool_get_default_for_numa (
126 vm, vnet_hw_if_get_rx_queue_numa_node (vnm, rxq->queue_index));
127 vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, bpidx);
128 struct rte_mempool *mp = dpdk_mempool_by_buffer_pool_index[bpidx];
130 rv = rte_eth_rx_queue_setup (xd->port_id, j, xd->conf.n_rx_desc,
131 xd->cpu_socket, 0, mp);
133 /* retry with any other CPU socket */
135 rv = rte_eth_rx_queue_setup (xd->port_id, j, xd->conf.n_rx_desc,
136 SOCKET_ID_ANY, 0, mp);
138 rxq->buffer_pool_index = bp->index;
141 dpdk_device_error (xd, "rte_eth_rx_queue_setup", rv);
144 if (vec_len (xd->errors))
147 rte_eth_dev_set_mtu (xd->port_id, hi->max_packet_bytes);
149 (VLIB_BUFFER_TOTAL_LENGTH_VALID | VLIB_BUFFER_EXT_HDR_VALID);
151 if ((xd->port_conf.rxmode.offloads &
152 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) ==
153 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
155 (VNET_BUFFER_F_L4_CHECKSUM_COMPUTED | VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
157 if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP)
158 dpdk_device_start (xd);
160 if (vec_len (xd->errors))
166 xd->flags |= DPDK_DEVICE_FLAG_PMD_INIT_FAIL;
167 sw->flags |= VNET_SW_INTERFACE_FLAG_ERROR;
170 static clib_error_t *
171 dpdk_rx_read_ready (clib_file_t *uf)
173 vnet_main_t *vnm = vnet_get_main ();
174 dpdk_main_t *dm = &dpdk_main;
175 u32 qidx = uf->private_data;
176 vnet_hw_if_rx_queue_t *rxq = vnet_hw_if_get_rx_queue (vnm, qidx);
177 dpdk_device_t *xd = vec_elt_at_index (dm->devices, rxq->dev_instance);
180 CLIB_UNUSED (ssize_t size) = read (uf->file_descriptor, &b, sizeof (b));
181 if (rxq->mode != VNET_HW_IF_RX_MODE_POLLING)
183 vnet_hw_if_rx_queue_set_int_pending (vnm, uf->private_data);
184 rte_eth_dev_rx_intr_enable (xd->port_id, rxq->queue_id);
191 dpdk_setup_interrupts (dpdk_device_t *xd)
193 vnet_main_t *vnm = vnet_get_main ();
194 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, xd->hw_if_index);
198 if (!xd->port_conf.intr_conf.rxq)
201 /* Probe for interrupt support */
202 if (rte_eth_dev_rx_intr_enable (xd->port_id, 0))
204 dpdk_log_info ("probe for interrupt mode for device %U. Failed.\n",
205 format_dpdk_device_name, xd->port_id);
209 xd->flags |= DPDK_DEVICE_FLAG_INT_SUPPORTED;
210 if (!(xd->flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE))
211 rte_eth_dev_rx_intr_disable (xd->port_id, 0);
212 dpdk_log_info ("Probe for interrupt mode for device %U. Success.\n",
213 format_dpdk_device_name, xd->port_id);
216 if (xd->flags & DPDK_DEVICE_FLAG_INT_SUPPORTED)
218 hi->caps |= VNET_HW_IF_CAP_INT_MODE;
219 for (int q = 0; q < xd->conf.n_rx_queues; q++)
221 dpdk_rx_queue_t *rxq = vec_elt_at_index (xd->rx_queues, q);
222 clib_file_t f = { 0 };
223 rxq->efd = rte_eth_dev_rx_intr_ctl_q_get_fd (xd->port_id, q);
226 xd->flags &= ~DPDK_DEVICE_FLAG_INT_SUPPORTED;
227 hi->caps &= ~VNET_HW_IF_CAP_INT_MODE;
230 f.read_function = dpdk_rx_read_ready;
231 f.flags = UNIX_FILE_EVENT_EDGE_TRIGGERED;
232 f.file_descriptor = rxq->efd;
233 f.private_data = rxq->queue_index;
235 format (0, "%U queue %u", format_dpdk_device_name, xd->port_id, q);
236 rxq->clib_file_index = clib_file_add (&file_main, &f);
237 vnet_hw_if_set_rx_queue_file_index (vnm, rxq->queue_index,
238 rxq->clib_file_index);
239 if (xd->flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE)
241 clib_file_main_t *fm = &file_main;
243 pool_elt_at_index (fm->file_pool, rxq->clib_file_index);
244 fm->file_update (f, UNIX_FILE_UPDATE_DELETE);
248 vnet_hw_if_update_runtime_data (vnm, xd->hw_if_index);
252 dpdk_device_start (dpdk_device_t * xd)
256 if (xd->flags & DPDK_DEVICE_FLAG_PMD_INIT_FAIL)
259 rv = rte_eth_dev_start (xd->port_id);
263 dpdk_device_error (xd, "rte_eth_dev_start", rv);
267 dpdk_setup_interrupts (xd);
269 if (xd->default_mac_address)
270 rv = rte_eth_dev_default_mac_addr_set (xd->port_id,
271 (void *) xd->default_mac_address);
274 dpdk_device_error (xd, "rte_eth_dev_default_mac_addr_set", rv);
276 if (xd->flags & DPDK_DEVICE_FLAG_PROMISC)
277 rte_eth_promiscuous_enable (xd->port_id);
279 rte_eth_promiscuous_disable (xd->port_id);
281 rte_eth_allmulticast_enable (xd->port_id);
283 dpdk_log_info ("Interface %U started",
284 format_dpdk_device_name, xd->port_id);
288 dpdk_device_stop (dpdk_device_t * xd)
290 if (xd->flags & DPDK_DEVICE_FLAG_PMD_INIT_FAIL)
293 rte_eth_allmulticast_disable (xd->port_id);
294 rte_eth_dev_stop (xd->port_id);
295 clib_memset (&xd->link, 0, sizeof (struct rte_eth_link));
297 dpdk_log_info ("Interface %U stopped",
298 format_dpdk_device_name, xd->port_id);
301 void vl_api_force_rpc_call_main_thread (void *fp, u8 * data, u32 data_length);
304 dpdk_port_state_callback_inline (dpdk_portid_t port_id,
305 enum rte_eth_event_type type, void *param)
307 struct rte_eth_link link;
309 RTE_SET_USED (param);
310 if (type != RTE_ETH_EVENT_INTR_LSC)
312 dpdk_log_info ("Unknown event %d received for port %d", type, port_id);
316 rte_eth_link_get_nowait (port_id, &link);
317 u8 link_up = link.link_status;
319 dpdk_log_info ("Port %d Link Up - speed %u Mbps - %s",
320 port_id, (unsigned) link.link_speed,
321 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
322 "full-duplex" : "half-duplex");
324 dpdk_log_info ("Port %d Link Down\n\n", port_id);
330 dpdk_port_state_callback (dpdk_portid_t port_id,
331 enum rte_eth_event_type type,
333 void *ret_param __attribute__ ((unused)))
335 return dpdk_port_state_callback_inline (port_id, type, param);
338 /* If this device is PCI return pointer to info, otherwise NULL */
339 struct rte_pci_device *
340 dpdk_get_pci_device (const struct rte_eth_dev_info *info)
342 const struct rte_bus *bus;
344 bus = rte_bus_find_by_device (info->device);
345 if (bus && !strcmp (bus->name, "pci"))
346 return RTE_DEV_TO_PCI (info->device);
351 /* If this device is VMBUS return pointer to info, otherwise NULL */
352 struct rte_vmbus_device *
353 dpdk_get_vmbus_device (const struct rte_eth_dev_info *info)
355 const struct rte_bus *bus;
357 bus = rte_bus_find_by_device (info->device);
358 if (bus && !strcmp (bus->name, "vmbus"))
359 return container_of (info->device, struct rte_vmbus_device, device);
365 * fd.io coding-style-patch-verification: ON
368 * eval: (c-set-style "gnu")