2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/vnet.h>
17 #include <vppinfra/vec.h>
18 #include <vppinfra/format.h>
19 #include <vlib/unix/cj.h>
22 #include <vnet/ip/ip.h>
23 #include <vnet/ethernet/ethernet.h>
24 #include <vnet/ethernet/arp_packet.h>
25 #include <dpdk/device/dpdk.h>
27 #include <dpdk/device/dpdk_priv.h>
28 #include <vppinfra/error.h>
31 dpdk_device_error (dpdk_device_t * xd, char *str, int rv)
33 dpdk_log_err ("Interface %U error %d: %s",
34 format_dpdk_device_name, xd->device_index, rv,
36 xd->errors = clib_error_return (xd->errors, "%s[port:%d, errno:%d]: %s",
37 str, xd->device_index, rv,
42 dpdk_device_setup (dpdk_device_t * xd)
44 dpdk_main_t *dm = &dpdk_main;
45 vnet_main_t *vnm = vnet_get_main ();
46 vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, xd->sw_if_index);
47 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, xd->hw_if_index);
51 ASSERT (vlib_get_thread_index () == 0);
53 clib_error_free (xd->errors);
54 sw->flags &= ~VNET_SW_INTERFACE_FLAG_ERROR;
56 if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP)
58 vnet_hw_interface_set_flags (dm->vnet_main, xd->hw_if_index, 0);
59 dpdk_device_stop (xd);
62 /* Enable flow director when flows exist */
63 if (xd->pmd == VNET_DPDK_PMD_I40E)
65 if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0)
66 xd->port_conf.fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
68 xd->port_conf.fdir_conf.mode = RTE_FDIR_MODE_NONE;
71 rv = rte_eth_dev_configure (xd->device_index, xd->rx_q_used,
72 xd->tx_q_used, &xd->port_conf);
76 dpdk_device_error (xd, "rte_eth_dev_configure", rv);
80 /* Set up one TX-queue per worker thread */
81 for (j = 0; j < xd->tx_q_used; j++)
83 rv = rte_eth_tx_queue_setup (xd->device_index, j, xd->nb_tx_desc,
84 xd->cpu_socket, &xd->tx_conf);
86 /* retry with any other CPU socket */
88 rv = rte_eth_tx_queue_setup (xd->device_index, j, xd->nb_tx_desc,
89 SOCKET_ID_ANY, &xd->tx_conf);
91 dpdk_device_error (xd, "rte_eth_tx_queue_setup", rv);
94 vec_validate_aligned (xd->buffer_pool_for_queue, xd->rx_q_used - 1,
95 CLIB_CACHE_LINE_BYTES);
96 for (j = 0; j < xd->rx_q_used; j++)
98 dpdk_mempool_private_t *privp;
99 uword tidx = vnet_get_device_input_thread_index (dm->vnet_main,
101 unsigned lcore = vlib_worker_threads[tidx].lcore_id;
102 u16 socket_id = rte_lcore_to_socket_id (lcore);
104 rv = rte_eth_rx_queue_setup (xd->device_index, j, xd->nb_rx_desc,
106 dm->pktmbuf_pools[socket_id]);
108 /* retry with any other CPU socket */
110 rv = rte_eth_rx_queue_setup (xd->device_index, j, xd->nb_rx_desc,
112 dm->pktmbuf_pools[socket_id]);
114 privp = rte_mempool_get_priv (dm->pktmbuf_pools[socket_id]);
115 xd->buffer_pool_for_queue[j] = privp->buffer_pool_index;
118 dpdk_device_error (xd, "rte_eth_rx_queue_setup", rv);
121 if (vec_len (xd->errors))
124 rte_eth_dev_set_mtu (xd->device_index, hi->max_packet_bytes);
126 if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP)
127 dpdk_device_start (xd);
129 if (vec_len (xd->errors))
135 xd->flags |= DPDK_DEVICE_FLAG_PMD_INIT_FAIL;
136 sw->flags |= VNET_SW_INTERFACE_FLAG_ERROR;
140 dpdk_device_start (dpdk_device_t * xd)
144 if (xd->flags & DPDK_DEVICE_FLAG_PMD_INIT_FAIL)
147 rv = rte_eth_dev_start (xd->device_index);
151 dpdk_device_error (xd, "rte_eth_dev_start", rv);
155 if (xd->default_mac_address)
157 rte_eth_dev_default_mac_addr_set (xd->device_index,
158 (struct ether_addr *)
159 xd->default_mac_address);
162 dpdk_device_error (xd, "rte_eth_dev_default_mac_addr_set", rv);
164 if (xd->flags & DPDK_DEVICE_FLAG_PROMISC)
165 rte_eth_promiscuous_enable (xd->device_index);
167 rte_eth_promiscuous_disable (xd->device_index);
169 rte_eth_allmulticast_enable (xd->device_index);
171 if (xd->pmd == VNET_DPDK_PMD_BOND)
173 dpdk_portid_t slink[16];
174 int nlink = rte_eth_bond_slaves_get (xd->device_index, slink, 16);
177 dpdk_portid_t dpdk_port = slink[--nlink];
178 rte_eth_allmulticast_enable (dpdk_port);
182 dpdk_log_info ("Interface %U started",
183 format_dpdk_device_name, xd->device_index);
187 dpdk_device_stop (dpdk_device_t * xd)
189 if (xd->flags & DPDK_DEVICE_FLAG_PMD_INIT_FAIL)
192 rte_eth_allmulticast_disable (xd->device_index);
193 rte_eth_dev_stop (xd->device_index);
195 /* For bonded interface, stop slave links */
196 if (xd->pmd == VNET_DPDK_PMD_BOND)
198 dpdk_portid_t slink[16];
199 int nlink = rte_eth_bond_slaves_get (xd->device_index, slink, 16);
202 dpdk_portid_t dpdk_port = slink[--nlink];
203 rte_eth_dev_stop (dpdk_port);
206 dpdk_log_info ("Interface %U stopped",
207 format_dpdk_device_name, xd->device_index);
210 /* Even type for send_garp_na_process */
214 } dpdk_send_garp_na_process_event_t;
216 static vlib_node_registration_t send_garp_na_proc_node;
219 send_garp_na_process (vlib_main_t * vm,
220 vlib_node_runtime_t * rt, vlib_frame_t * f)
222 vnet_main_t *vnm = vnet_get_main ();
223 uword event_type, *event_data = 0;
229 vlib_process_wait_for_event (vm);
230 event_type = vlib_process_get_events (vm, &event_data);
231 ASSERT (event_type == SEND_GARP_NA);
232 for (i = 0; i < vec_len (event_data); i++)
234 dpdk_port = event_data[i];
235 if (i < 5) /* wait 0.2 sec for link to settle, max total 1 sec */
236 vlib_process_suspend (vm, 0.2);
237 dpdk_device_t *xd = &dpdk_main.devices[dpdk_port];
238 u32 hw_if_index = xd->hw_if_index;
239 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
240 dpdk_update_link_state (xd, vlib_time_now (vm));
241 send_ip4_garp (vm, hi);
242 send_ip6_na (vm, hi);
244 vec_reset_length (event_data);
250 VLIB_REGISTER_NODE (send_garp_na_proc_node, static) = {
251 .function = send_garp_na_process,
252 .type = VLIB_NODE_TYPE_PROCESS,
253 .name = "send-garp-na-process",
257 void vl_api_force_rpc_call_main_thread (void *fp, u8 * data, u32 data_length);
260 garp_na_proc_callback (uword * dpdk_port)
262 vlib_main_t *vm = vlib_get_main ();
263 ASSERT (vlib_get_thread_index () == 0);
264 vlib_process_signal_event
265 (vm, send_garp_na_proc_node.index, SEND_GARP_NA, *dpdk_port);
269 dpdk_port_state_callback_inline (dpdk_portid_t port_id,
270 enum rte_eth_event_type type, void *param)
272 struct rte_eth_link link;
273 dpdk_device_t *xd = &dpdk_main.devices[port_id];
275 RTE_SET_USED (param);
276 if (type != RTE_ETH_EVENT_INTR_LSC)
278 dpdk_log_info ("Unknown event %d received for port %d", type, port_id);
282 rte_eth_link_get_nowait (port_id, &link);
283 u8 link_up = link.link_status;
285 if (xd->flags & DPDK_DEVICE_FLAG_BOND_SLAVE)
287 uword bd_port = xd->bond_port;
288 int bd_mode = rte_eth_bond_mode_get (bd_port);
289 dpdk_log_info ("Port %d state to %s, "
290 "slave of port %d BondEthernet%d in mode %d",
291 port_id, (link_up) ? "UP" : "DOWN",
292 bd_port, xd->bond_instance_num, bd_mode);
293 if (bd_mode == BONDING_MODE_ACTIVE_BACKUP)
295 vl_api_force_rpc_call_main_thread
296 (garp_na_proc_callback, (u8 *) & bd_port, sizeof (uword));
300 xd->flags |= DPDK_DEVICE_FLAG_BOND_SLAVE_UP;
302 xd->flags &= ~DPDK_DEVICE_FLAG_BOND_SLAVE_UP;
304 else /* Should not happen as callback not setup for "normal" links */
307 dpdk_log_info ("Port %d Link Up - speed %u Mbps - %s",
308 port_id, (unsigned) link.link_speed,
309 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
310 "full-duplex" : "half-duplex");
312 dpdk_log_info ("Port %d Link Down\n\n", port_id);
319 dpdk_port_state_callback (dpdk_portid_t port_id,
320 enum rte_eth_event_type type,
322 void *ret_param __attribute__ ((unused)))
324 return dpdk_port_state_callback_inline (port_id, type, param);
328 * fd.io coding-style-patch-verification: ON
331 * eval: (c-set-style "gnu")