2 *------------------------------------------------------------------
3 * Copyright (c) 2017 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
20 #include <vnet/llc/llc.h>
21 #include <vnet/snap/snap.h>
22 #include <vnet/bonding/node.h>
24 bond_main_t bond_main;
26 #define foreach_bond_input_error \
28 _(IF_DOWN, "interface down") \
29 _(NO_SLAVE, "no slave") \
30 _(NO_BOND, "no bond interface")\
31 _(PASS_THRU, "pass through")
35 #define _(f,s) BOND_INPUT_ERROR_##f,
36 foreach_bond_input_error
41 static char *bond_input_error_strings[] = {
43 foreach_bond_input_error
48 format_bond_input_trace (u8 * s, va_list * args)
50 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
51 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
52 bond_packet_trace_t *t = va_arg (*args, bond_packet_trace_t *);
53 vnet_hw_interface_t *hw, *hw1;
54 vnet_main_t *vnm = vnet_get_main ();
56 hw = vnet_get_sup_hw_interface (vnm, t->sw_if_index);
57 hw1 = vnet_get_sup_hw_interface (vnm, t->bond_sw_if_index);
58 s = format (s, "src %U, dst %U, %s -> %s",
59 format_ethernet_address, t->ethernet.src_address,
60 format_ethernet_address, t->ethernet.dst_address,
66 static_always_inline u8
67 packet_is_cdp (ethernet_header_t * eth)
72 llc = (llc_header_t *) (eth + 1);
73 snap = (snap_header_t *) (llc + 1);
75 return ((eth->type == htons (ETHERNET_TYPE_CDP)) ||
76 ((llc->src_sap == 0xAA) && (llc->control == 0x03) &&
77 (snap->protocol == htons (0x2000)) &&
78 (snap->oui[0] == 0) && (snap->oui[1] == 0) &&
79 (snap->oui[2] == 0x0C)));
83 bond_sw_if_index_rewrite (vlib_main_t * vm, vlib_node_runtime_t * node,
84 slave_if_t * sif, ethernet_header_t * eth,
88 u16 thread_index = vlib_get_thread_index ();
89 u16 *ethertype_p, ethertype;
90 ethernet_vlan_header_t *vlan;
92 if (PREDICT_TRUE (sif != 0))
94 bif = bond_get_master_by_sw_if_index (sif->group);
95 if (PREDICT_TRUE (bif != 0))
97 if (PREDICT_TRUE (vec_len (bif->slaves) >= 1))
99 if (PREDICT_TRUE (bif->admin_up == 1))
101 if (!ethernet_frame_is_tagged (ntohs (eth->type)))
103 // Let some layer2 packets pass through.
104 if (PREDICT_TRUE ((eth->type !=
105 htons (ETHERNET_TYPE_SLOW_PROTOCOLS))
106 && !packet_is_cdp (eth)
109 (ETHERNET_TYPE_802_1_LLDP))))
111 // Change the physical interface to
113 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
116 /* increase rx counters */
117 vlib_increment_simple_counter
118 (vnet_main.interface_main.sw_if_counters +
119 VNET_INTERFACE_COUNTER_RX, thread_index,
120 bif->sw_if_index, 1);
124 vlib_error_count (vm, node->node_index,
125 BOND_INPUT_ERROR_PASS_THRU, 1);
130 vlan = (void *) (eth + 1);
131 ethertype_p = &vlan->type;
132 if (*ethertype_p == ntohs (ETHERNET_TYPE_VLAN))
135 ethertype_p = &vlan->type;
137 ethertype = *ethertype_p;
138 if (PREDICT_TRUE ((ethertype !=
139 htons (ETHERNET_TYPE_SLOW_PROTOCOLS))
141 htons (ETHERNET_TYPE_CDP))
144 (ETHERNET_TYPE_802_1_LLDP))))
146 // Change the physical interface to
148 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
151 /* increase rx counters */
152 vlib_increment_simple_counter
153 (vnet_main.interface_main.sw_if_counters +
154 VNET_INTERFACE_COUNTER_RX, thread_index,
155 bif->sw_if_index, 1);
159 vlib_error_count (vm, node->node_index,
160 BOND_INPUT_ERROR_PASS_THRU, 1);
166 vlib_error_count (vm, node->node_index,
167 BOND_INPUT_ERROR_IF_DOWN, 1);
172 vlib_error_count (vm, node->node_index,
173 BOND_INPUT_ERROR_NO_SLAVE, 1);
178 vlib_error_count (vm, node->node_index,
179 BOND_INPUT_ERROR_NO_BOND, 1);
184 vlib_error_count (vm, node->node_index, BOND_INPUT_ERROR_NO_SLAVE, 1);
190 bond_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
191 vlib_frame_t * frame)
193 u32 bi0, bi1, bi2, bi3;
194 vlib_buffer_t *b0, *b1, *b2, *b3;
196 u32 *from, *to_next, n_left_from, n_left_to_next;
197 ethernet_header_t *eth, *eth1, *eth2, *eth3;
198 u32 next0, next1, next2, next3;
199 bond_packet_trace_t *t0;
200 uword n_trace = vlib_get_trace_count (vm, node);
201 u32 sw_if_index, sw_if_index1, sw_if_index2, sw_if_index3;
202 slave_if_t *sif, *sif1, *sif2, *sif3;
203 u16 thread_index = vlib_get_thread_index ();
205 /* Vector of buffer / pkt indices we're supposed to process */
206 from = vlib_frame_vector_args (frame);
208 /* Number of buffers / pkts */
209 n_left_from = frame->n_vectors;
211 /* Speculatively send the first buffer to the last disposition we used */
212 next_index = node->cached_next_index;
214 while (n_left_from > 0)
216 /* set up to enqueue to our disposition with index = next_index */
217 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
219 while (n_left_from >= 12 && n_left_to_next >= 4)
221 // Prefetch next iteration
223 vlib_buffer_t *b4, *b5, *b6, *b7;
225 b4 = vlib_get_buffer (vm, from[4]);
226 b5 = vlib_get_buffer (vm, from[5]);
227 b6 = vlib_get_buffer (vm, from[6]);
228 b7 = vlib_get_buffer (vm, from[7]);
230 vlib_prefetch_buffer_header (b4, STORE);
231 vlib_prefetch_buffer_header (b5, STORE);
232 vlib_prefetch_buffer_header (b6, STORE);
233 vlib_prefetch_buffer_header (b7, STORE);
235 CLIB_PREFETCH (b4->data, CLIB_CACHE_LINE_BYTES, LOAD);
236 CLIB_PREFETCH (b5->data, CLIB_CACHE_LINE_BYTES, LOAD);
237 CLIB_PREFETCH (b6->data, CLIB_CACHE_LINE_BYTES, LOAD);
238 CLIB_PREFETCH (b7->data, CLIB_CACHE_LINE_BYTES, LOAD);
261 b0 = vlib_get_buffer (vm, bi0);
262 b1 = vlib_get_buffer (vm, bi1);
263 b2 = vlib_get_buffer (vm, bi2);
264 b3 = vlib_get_buffer (vm, bi3);
266 vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_RX], &next0,
268 vnet_feature_next (vnet_buffer (b1)->sw_if_index[VLIB_RX], &next1,
270 vnet_feature_next (vnet_buffer (b2)->sw_if_index[VLIB_RX], &next2,
272 vnet_feature_next (vnet_buffer (b3)->sw_if_index[VLIB_RX], &next3,
275 eth = (ethernet_header_t *) vlib_buffer_get_current (b0);
276 eth1 = (ethernet_header_t *) vlib_buffer_get_current (b1);
277 eth2 = (ethernet_header_t *) vlib_buffer_get_current (b2);
278 eth3 = (ethernet_header_t *) vlib_buffer_get_current (b3);
280 sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
281 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
282 sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_RX];
283 sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_RX];
285 // sw_if_index points to the physical interface
286 sif = bond_get_slave_by_sw_if_index (sw_if_index);
287 sif1 = bond_get_slave_by_sw_if_index (sw_if_index1);
288 sif2 = bond_get_slave_by_sw_if_index (sw_if_index2);
289 sif3 = bond_get_slave_by_sw_if_index (sw_if_index3);
291 bond_sw_if_index_rewrite (vm, node, sif, eth, b0);
292 bond_sw_if_index_rewrite (vm, node, sif1, eth1, b1);
293 bond_sw_if_index_rewrite (vm, node, sif2, eth2, b2);
294 bond_sw_if_index_rewrite (vm, node, sif3, eth3, b3);
296 if (PREDICT_FALSE (n_trace > 0))
298 vlib_trace_buffer (vm, node, next0, b0, 0 /* follow_chain */ );
299 vlib_set_trace_count (vm, node, --n_trace);
300 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
302 t0->sw_if_index = sw_if_index;
303 t0->bond_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
305 if (PREDICT_TRUE (n_trace > 0))
307 vlib_trace_buffer (vm, node, next1, b1,
308 0 /* follow_chain */ );
309 vlib_set_trace_count (vm, node, --n_trace);
310 t0 = vlib_add_trace (vm, node, b1, sizeof (*t0));
311 t0->ethernet = *eth1;
312 t0->sw_if_index = sw_if_index1;
313 t0->bond_sw_if_index =
314 vnet_buffer (b1)->sw_if_index[VLIB_RX];
316 if (PREDICT_TRUE (n_trace > 0))
318 vlib_trace_buffer (vm, node, next1, b2,
319 0 /* follow_chain */ );
320 vlib_set_trace_count (vm, node, --n_trace);
321 t0 = vlib_add_trace (vm, node, b2, sizeof (*t0));
322 t0->ethernet = *eth2;
323 t0->sw_if_index = sw_if_index2;
324 t0->bond_sw_if_index =
325 vnet_buffer (b2)->sw_if_index[VLIB_RX];
327 if (PREDICT_TRUE (n_trace > 0))
329 vlib_trace_buffer (vm, node, next1, b2,
330 0 /* follow_chain */ );
331 vlib_set_trace_count (vm, node, --n_trace);
332 t0 = vlib_add_trace (vm, node, b3, sizeof (*t0));
333 t0->ethernet = *eth3;
334 t0->sw_if_index = sw_if_index3;
335 t0->bond_sw_if_index =
336 vnet_buffer (b3)->sw_if_index[VLIB_RX];
342 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
343 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
344 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b2);
345 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b3);
347 /* verify speculative enqueue, maybe switch current next frame */
348 vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
349 to_next, n_left_to_next,
350 bi0, bi1, bi2, bi3, next0, next1,
354 while (n_left_from > 0 && n_left_to_next > 0)
356 // Prefetch next iteration
361 p2 = vlib_get_buffer (vm, from[1]);
362 vlib_prefetch_buffer_header (p2, STORE);
363 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, LOAD);
374 b0 = vlib_get_buffer (vm, bi0);
375 vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_RX], &next0,
378 eth = (ethernet_header_t *) vlib_buffer_get_current (b0);
380 sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
381 // sw_if_index points to the physical interface
382 sif = bond_get_slave_by_sw_if_index (sw_if_index);
383 bond_sw_if_index_rewrite (vm, node, sif, eth, b0);
385 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
387 /* verify speculative enqueue, maybe switch current next frame */
388 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
389 to_next, n_left_to_next,
392 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
395 vlib_node_increment_counter (vm, bond_input_node.index,
396 BOND_INPUT_ERROR_NONE, frame->n_vectors);
398 vnet_device_increment_rx_packets (thread_index, frame->n_vectors);
400 return frame->n_vectors;
403 static clib_error_t *
404 bond_input_init (vlib_main_t * vm)
410 VLIB_REGISTER_NODE (bond_input_node) = {
411 .function = bond_input_fn,
412 .name = "bond-input",
413 .vector_size = sizeof (u32),
414 .format_buffer = format_ethernet_header_with_length,
415 .format_trace = format_bond_input_trace,
416 .type = VLIB_NODE_TYPE_INTERNAL,
417 .n_errors = BOND_INPUT_N_ERROR,
418 .error_strings = bond_input_error_strings,
426 VLIB_INIT_FUNCTION (bond_input_init);
428 VNET_FEATURE_INIT (bond_input, static) =
430 .arc_name = "device-input",
431 .node_name = "bond-input",
432 .runs_before = VNET_FEATURES ("ethernet-input"),
434 VLIB_NODE_FUNCTION_MULTIARCH (bond_input_node, bond_input_fn)
437 static clib_error_t *
438 bond_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
440 bond_main_t *bm = &bond_main;
442 vlib_main_t *vm = bm->vlib_main;
444 sif = bond_get_slave_by_sw_if_index (sw_if_index);
447 sif->port_enabled = flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP;
448 if (sif->port_enabled == 0)
450 if (sif->lacp_enabled == 0)
452 bond_disable_collecting_distributing (vm, sif);
457 if (sif->lacp_enabled == 0)
459 bond_enable_collecting_distributing (vm, sif);
467 VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (bond_sw_interface_up_down);
469 static clib_error_t *
470 bond_hw_interface_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
472 bond_main_t *bm = &bond_main;
474 vnet_sw_interface_t *sw;
475 vlib_main_t *vm = bm->vlib_main;
476 vnet_interface_main_t *im = &vnm->interface_main;
478 sw = pool_elt_at_index (im->sw_interfaces, hw_if_index);
479 sif = bond_get_slave_by_sw_if_index (sw->sw_if_index);
482 if (!(flags & VNET_HW_INTERFACE_FLAG_LINK_UP))
484 if (sif->lacp_enabled == 0)
486 bond_disable_collecting_distributing (vm, sif);
491 if (sif->lacp_enabled == 0)
493 bond_enable_collecting_distributing (vm, sif);
501 VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION (bond_hw_interface_up_down);
504 * fd.io coding-style-patch-verification: ON
507 * eval: (c-set-style "gnu")