2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/vnet.h>
17 #include <vppinfra/vec.h>
18 #include <vppinfra/format.h>
19 #include <vlib/unix/cj.h>
22 #include <vnet/ip/ip.h>
23 #include <vnet/ethernet/ethernet.h>
24 #include <vnet/ethernet/arp_packet.h>
25 #include <dpdk/device/dpdk.h>
27 #include <dpdk/device/dpdk_priv.h>
28 #include <vppinfra/error.h>
31 dpdk_device_error (dpdk_device_t * xd, char *str, int rv)
33 dpdk_log_err ("Interface %U error %d: %s",
34 format_dpdk_device_name, xd->device_index, rv,
36 xd->errors = clib_error_return (xd->errors, "%s[port:%d, errno:%d]: %s",
37 str, xd->device_index, rv,
42 dpdk_device_setup (dpdk_device_t * xd)
44 dpdk_main_t *dm = &dpdk_main;
45 vnet_main_t *vnm = vnet_get_main ();
46 vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
47 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, xd->hw_if_index);
51 ASSERT (vlib_get_thread_index () == 0);
53 clib_error_free (xd->errors);
54 sw->flags &= ~VNET_SW_INTERFACE_FLAG_ERROR;
56 if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP)
58 vnet_hw_interface_set_flags (dm->vnet_main, xd->hw_if_index, 0);
59 dpdk_device_stop (xd);
62 rv = rte_eth_dev_configure (xd->device_index, xd->rx_q_used,
63 xd->tx_q_used, &xd->port_conf);
67 dpdk_device_error (xd, "rte_eth_dev_configure", rv);
71 /* Set up one TX-queue per worker thread */
72 for (j = 0; j < xd->tx_q_used; j++)
74 rv = rte_eth_tx_queue_setup (xd->device_index, j, xd->nb_tx_desc,
75 xd->cpu_socket, &xd->tx_conf);
77 /* retry with any other CPU socket */
79 rv = rte_eth_tx_queue_setup (xd->device_index, j, xd->nb_tx_desc,
80 SOCKET_ID_ANY, &xd->tx_conf);
82 dpdk_device_error (xd, "rte_eth_tx_queue_setup", rv);
85 vec_validate_aligned (xd->buffer_pool_for_queue, xd->rx_q_used - 1,
86 CLIB_CACHE_LINE_BYTES);
87 for (j = 0; j < xd->rx_q_used; j++)
89 dpdk_mempool_private_t *privp;
90 uword tidx = vnet_get_device_input_thread_index (dm->vnet_main,
92 unsigned lcore = vlib_worker_threads[tidx].lcore_id;
93 u16 socket_id = rte_lcore_to_socket_id (lcore);
95 rv = rte_eth_rx_queue_setup (xd->device_index, j, xd->nb_rx_desc,
97 dm->pktmbuf_pools[socket_id]);
99 /* retry with any other CPU socket */
101 rv = rte_eth_rx_queue_setup (xd->device_index, j, xd->nb_rx_desc,
103 dm->pktmbuf_pools[socket_id]);
105 privp = rte_mempool_get_priv (dm->pktmbuf_pools[socket_id]);
106 xd->buffer_pool_for_queue[j] = privp->buffer_pool_index;
109 dpdk_device_error (xd, "rte_eth_rx_queue_setup", rv);
112 if (vec_len (xd->errors))
115 rte_eth_dev_set_mtu (xd->device_index, hi->max_packet_bytes);
117 if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP)
118 dpdk_device_start (xd);
120 if (vec_len (xd->errors))
126 xd->flags |= DPDK_DEVICE_FLAG_PMD_INIT_FAIL;
127 sw->flags |= VNET_SW_INTERFACE_FLAG_ERROR;
131 dpdk_device_start (dpdk_device_t * xd)
135 if (xd->flags & DPDK_DEVICE_FLAG_PMD_INIT_FAIL)
138 rv = rte_eth_dev_start (xd->device_index);
142 dpdk_device_error (xd, "rte_eth_dev_start", rv);
146 if (xd->default_mac_address)
148 rte_eth_dev_default_mac_addr_set (xd->device_index,
149 (struct ether_addr *)
150 xd->default_mac_address);
153 dpdk_device_error (xd, "rte_eth_dev_default_mac_addr_set", rv);
155 if (xd->flags & DPDK_DEVICE_FLAG_PROMISC)
156 rte_eth_promiscuous_enable (xd->device_index);
158 rte_eth_promiscuous_disable (xd->device_index);
160 rte_eth_allmulticast_enable (xd->device_index);
162 if (xd->pmd == VNET_DPDK_PMD_BOND)
164 dpdk_portid_t slink[16];
165 int nlink = rte_eth_bond_slaves_get (xd->device_index, slink, 16);
168 dpdk_portid_t dpdk_port = slink[--nlink];
169 rte_eth_allmulticast_enable (dpdk_port);
173 dpdk_log_info ("Interface %U started",
174 format_dpdk_device_name, xd->device_index);
178 dpdk_device_stop (dpdk_device_t * xd)
180 if (xd->flags & DPDK_DEVICE_FLAG_PMD_INIT_FAIL)
183 rte_eth_allmulticast_disable (xd->device_index);
184 rte_eth_dev_stop (xd->device_index);
186 /* For bonded interface, stop slave links */
187 if (xd->pmd == VNET_DPDK_PMD_BOND)
189 dpdk_portid_t slink[16];
190 int nlink = rte_eth_bond_slaves_get (xd->device_index, slink, 16);
193 dpdk_portid_t dpdk_port = slink[--nlink];
194 rte_eth_dev_stop (dpdk_port);
197 dpdk_log_info ("Interface %U stopped",
198 format_dpdk_device_name, xd->device_index);
201 /* Even type for send_garp_na_process */
205 } dpdk_send_garp_na_process_event_t;
207 static vlib_node_registration_t send_garp_na_proc_node;
210 send_garp_na_process (vlib_main_t * vm,
211 vlib_node_runtime_t * rt, vlib_frame_t * f)
213 vnet_main_t *vnm = vnet_get_main ();
214 uword event_type, *event_data = 0;
220 vlib_process_wait_for_event (vm);
221 event_type = vlib_process_get_events (vm, &event_data);
222 ASSERT (event_type == SEND_GARP_NA);
223 for (i = 0; i < vec_len (event_data); i++)
225 dpdk_port = event_data[i];
226 if (i < 5) /* wait 0.2 sec for link to settle, max total 1 sec */
227 vlib_process_suspend (vm, 0.2);
228 dpdk_device_t *xd = &dpdk_main.devices[dpdk_port];
229 u32 hw_if_index = xd->hw_if_index;
230 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
231 dpdk_update_link_state (xd, vlib_time_now (vm));
232 send_ip4_garp (vm, hi);
233 send_ip6_na (vm, hi);
235 vec_reset_length (event_data);
241 VLIB_REGISTER_NODE (send_garp_na_proc_node, static) = {
242 .function = send_garp_na_process,
243 .type = VLIB_NODE_TYPE_PROCESS,
244 .name = "send-garp-na-process",
248 void vl_api_force_rpc_call_main_thread (void *fp, u8 * data, u32 data_length);
251 garp_na_proc_callback (uword * dpdk_port)
253 vlib_main_t *vm = vlib_get_main ();
254 ASSERT (vlib_get_thread_index () == 0);
255 vlib_process_signal_event
256 (vm, send_garp_na_proc_node.index, SEND_GARP_NA, *dpdk_port);
260 dpdk_port_state_callback_inline (dpdk_portid_t port_id,
261 enum rte_eth_event_type type, void *param)
263 struct rte_eth_link link;
264 dpdk_device_t *xd = &dpdk_main.devices[port_id];
266 RTE_SET_USED (param);
267 if (type != RTE_ETH_EVENT_INTR_LSC)
269 dpdk_log_info ("Unknown event %d received for port %d", type, port_id);
273 rte_eth_link_get_nowait (port_id, &link);
274 u8 link_up = link.link_status;
276 if (xd->flags & DPDK_DEVICE_FLAG_BOND_SLAVE)
278 uword bd_port = xd->bond_port;
279 int bd_mode = rte_eth_bond_mode_get (bd_port);
280 dpdk_log_info ("Port %d state to %s, "
281 "slave of port %d BondEthernet%d in mode %d",
282 port_id, (link_up) ? "UP" : "DOWN",
283 bd_port, xd->port_id, bd_mode);
284 if (bd_mode == BONDING_MODE_ACTIVE_BACKUP)
286 vl_api_force_rpc_call_main_thread
287 (garp_na_proc_callback, (u8 *) & bd_port, sizeof (uword));
291 xd->flags |= DPDK_DEVICE_FLAG_BOND_SLAVE_UP;
293 xd->flags &= ~DPDK_DEVICE_FLAG_BOND_SLAVE_UP;
295 else /* Should not happen as callback not setup for "normal" links */
298 dpdk_log_info ("Port %d Link Up - speed %u Mbps - %s",
299 port_id, (unsigned) link.link_speed,
300 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
301 "full-duplex" : "half-duplex");
303 dpdk_log_info ("Port %d Link Down\n\n", port_id);
310 dpdk_port_state_callback (dpdk_portid_t port_id,
311 enum rte_eth_event_type type,
313 void *ret_param __attribute__ ((unused)))
315 return dpdk_port_state_callback_inline (port_id, type, param);
319 * fd.io coding-style-patch-verification: ON
322 * eval: (c-set-style "gnu")