2 *------------------------------------------------------------------
3 * Copyright (c) 2017 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
20 #include <vnet/llc/llc.h>
21 #include <vnet/snap/snap.h>
22 #include <vnet/bonding/node.h>
24 bond_main_t bond_main;
26 #define foreach_bond_input_error \
28 _(IF_DOWN, "interface down") \
29 _(NO_SLAVE, "no slave") \
30 _(NO_BOND, "no bond interface")\
31 _(PASS_THRU, "pass through")
35 #define _(f,s) BOND_INPUT_ERROR_##f,
36 foreach_bond_input_error
41 static char *bond_input_error_strings[] = {
43 foreach_bond_input_error
48 format_bond_input_trace (u8 * s, va_list * args)
50 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
51 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
52 bond_packet_trace_t *t = va_arg (*args, bond_packet_trace_t *);
54 s = format (s, "src %U, dst %U, %U -> %U",
55 format_ethernet_address, t->ethernet.src_address,
56 format_ethernet_address, t->ethernet.dst_address,
57 format_vnet_sw_if_index_name, vnet_get_main (),
59 format_vnet_sw_if_index_name, vnet_get_main (),
65 static_always_inline u8
66 packet_is_cdp (ethernet_header_t * eth)
71 llc = (llc_header_t *) (eth + 1);
72 snap = (snap_header_t *) (llc + 1);
74 return ((eth->type == htons (ETHERNET_TYPE_CDP)) ||
75 ((llc->src_sap == 0xAA) && (llc->control == 0x03) &&
76 (snap->protocol == htons (0x2000)) &&
77 (snap->oui[0] == 0) && (snap->oui[1] == 0) &&
78 (snap->oui[2] == 0x0C)));
82 bond_sw_if_index_rewrite (vlib_main_t * vm, vlib_node_runtime_t * node,
83 slave_if_t * sif, ethernet_header_t * eth,
87 u16 thread_index = vlib_get_thread_index ();
88 u16 *ethertype_p, ethertype;
89 ethernet_vlan_header_t *vlan;
91 if (PREDICT_TRUE (sif != 0))
93 bif = bond_get_master_by_sw_if_index (sif->group);
94 if (PREDICT_TRUE (bif != 0))
96 if (PREDICT_TRUE (vec_len (bif->slaves) >= 1))
98 if (PREDICT_TRUE (bif->admin_up == 1))
100 if (!ethernet_frame_is_tagged (ntohs (eth->type)))
102 // Let some layer2 packets pass through.
103 if (PREDICT_TRUE ((eth->type !=
104 htons (ETHERNET_TYPE_SLOW_PROTOCOLS))
105 && !packet_is_cdp (eth)
108 (ETHERNET_TYPE_802_1_LLDP))))
110 // Change the physical interface to
112 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
115 /* increase rx counters */
116 vlib_increment_simple_counter
117 (vnet_main.interface_main.sw_if_counters +
118 VNET_INTERFACE_COUNTER_RX, thread_index,
119 bif->sw_if_index, 1);
123 vlib_error_count (vm, node->node_index,
124 BOND_INPUT_ERROR_PASS_THRU, 1);
129 vlan = (void *) (eth + 1);
130 ethertype_p = &vlan->type;
131 if (*ethertype_p == ntohs (ETHERNET_TYPE_VLAN))
134 ethertype_p = &vlan->type;
136 ethertype = *ethertype_p;
137 if (PREDICT_TRUE ((ethertype !=
138 htons (ETHERNET_TYPE_SLOW_PROTOCOLS))
140 htons (ETHERNET_TYPE_CDP))
143 (ETHERNET_TYPE_802_1_LLDP))))
145 // Change the physical interface to
147 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
150 /* increase rx counters */
151 vlib_increment_simple_counter
152 (vnet_main.interface_main.sw_if_counters +
153 VNET_INTERFACE_COUNTER_RX, thread_index,
154 bif->sw_if_index, 1);
158 vlib_error_count (vm, node->node_index,
159 BOND_INPUT_ERROR_PASS_THRU, 1);
165 vlib_error_count (vm, node->node_index,
166 BOND_INPUT_ERROR_IF_DOWN, 1);
171 vlib_error_count (vm, node->node_index,
172 BOND_INPUT_ERROR_NO_SLAVE, 1);
177 vlib_error_count (vm, node->node_index,
178 BOND_INPUT_ERROR_NO_BOND, 1);
183 vlib_error_count (vm, node->node_index, BOND_INPUT_ERROR_NO_SLAVE, 1);
189 bond_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
190 vlib_frame_t * frame)
192 u32 bi0, bi1, bi2, bi3;
193 vlib_buffer_t *b0, *b1, *b2, *b3;
195 u32 *from, *to_next, n_left_from, n_left_to_next;
196 ethernet_header_t *eth, *eth1, *eth2, *eth3;
197 u32 next0, next1, next2, next3;
198 bond_packet_trace_t *t0;
199 uword n_trace = vlib_get_trace_count (vm, node);
200 u32 sw_if_index, sw_if_index1, sw_if_index2, sw_if_index3;
201 slave_if_t *sif, *sif1, *sif2, *sif3;
202 u16 thread_index = vlib_get_thread_index ();
204 /* Vector of buffer / pkt indices we're supposed to process */
205 from = vlib_frame_vector_args (frame);
207 /* Number of buffers / pkts */
208 n_left_from = frame->n_vectors;
210 /* Speculatively send the first buffer to the last disposition we used */
211 next_index = node->cached_next_index;
213 while (n_left_from > 0)
215 /* set up to enqueue to our disposition with index = next_index */
216 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
218 while (n_left_from >= 12 && n_left_to_next >= 4)
220 // Prefetch next iteration
222 vlib_buffer_t *b4, *b5, *b6, *b7;
224 b4 = vlib_get_buffer (vm, from[4]);
225 b5 = vlib_get_buffer (vm, from[5]);
226 b6 = vlib_get_buffer (vm, from[6]);
227 b7 = vlib_get_buffer (vm, from[7]);
229 vlib_prefetch_buffer_header (b4, STORE);
230 vlib_prefetch_buffer_header (b5, STORE);
231 vlib_prefetch_buffer_header (b6, STORE);
232 vlib_prefetch_buffer_header (b7, STORE);
234 CLIB_PREFETCH (b4->data, CLIB_CACHE_LINE_BYTES, LOAD);
235 CLIB_PREFETCH (b5->data, CLIB_CACHE_LINE_BYTES, LOAD);
236 CLIB_PREFETCH (b6->data, CLIB_CACHE_LINE_BYTES, LOAD);
237 CLIB_PREFETCH (b7->data, CLIB_CACHE_LINE_BYTES, LOAD);
260 b0 = vlib_get_buffer (vm, bi0);
261 b1 = vlib_get_buffer (vm, bi1);
262 b2 = vlib_get_buffer (vm, bi2);
263 b3 = vlib_get_buffer (vm, bi3);
265 vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_RX], &next0,
267 vnet_feature_next (vnet_buffer (b1)->sw_if_index[VLIB_RX], &next1,
269 vnet_feature_next (vnet_buffer (b2)->sw_if_index[VLIB_RX], &next2,
271 vnet_feature_next (vnet_buffer (b3)->sw_if_index[VLIB_RX], &next3,
274 eth = (ethernet_header_t *) vlib_buffer_get_current (b0);
275 eth1 = (ethernet_header_t *) vlib_buffer_get_current (b1);
276 eth2 = (ethernet_header_t *) vlib_buffer_get_current (b2);
277 eth3 = (ethernet_header_t *) vlib_buffer_get_current (b3);
279 sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
280 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
281 sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_RX];
282 sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_RX];
284 // sw_if_index points to the physical interface
285 sif = bond_get_slave_by_sw_if_index (sw_if_index);
286 sif1 = bond_get_slave_by_sw_if_index (sw_if_index1);
287 sif2 = bond_get_slave_by_sw_if_index (sw_if_index2);
288 sif3 = bond_get_slave_by_sw_if_index (sw_if_index3);
290 bond_sw_if_index_rewrite (vm, node, sif, eth, b0);
291 bond_sw_if_index_rewrite (vm, node, sif1, eth1, b1);
292 bond_sw_if_index_rewrite (vm, node, sif2, eth2, b2);
293 bond_sw_if_index_rewrite (vm, node, sif3, eth3, b3);
295 if (PREDICT_FALSE (n_trace > 0))
297 vlib_trace_buffer (vm, node, next0, b0, 0 /* follow_chain */ );
298 vlib_set_trace_count (vm, node, --n_trace);
299 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
301 t0->sw_if_index = sw_if_index;
302 t0->bond_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
304 if (PREDICT_TRUE (n_trace > 0))
306 vlib_trace_buffer (vm, node, next1, b1,
307 0 /* follow_chain */ );
308 vlib_set_trace_count (vm, node, --n_trace);
309 t0 = vlib_add_trace (vm, node, b1, sizeof (*t0));
310 t0->ethernet = *eth1;
311 t0->sw_if_index = sw_if_index1;
312 t0->bond_sw_if_index =
313 vnet_buffer (b1)->sw_if_index[VLIB_RX];
315 if (PREDICT_TRUE (n_trace > 0))
317 vlib_trace_buffer (vm, node, next1, b2,
318 0 /* follow_chain */ );
319 vlib_set_trace_count (vm, node, --n_trace);
320 t0 = vlib_add_trace (vm, node, b2, sizeof (*t0));
321 t0->ethernet = *eth2;
322 t0->sw_if_index = sw_if_index2;
323 t0->bond_sw_if_index =
324 vnet_buffer (b2)->sw_if_index[VLIB_RX];
326 if (PREDICT_TRUE (n_trace > 0))
328 vlib_trace_buffer (vm, node, next1, b2,
329 0 /* follow_chain */ );
330 vlib_set_trace_count (vm, node, --n_trace);
331 t0 = vlib_add_trace (vm, node, b3, sizeof (*t0));
332 t0->ethernet = *eth3;
333 t0->sw_if_index = sw_if_index3;
334 t0->bond_sw_if_index =
335 vnet_buffer (b3)->sw_if_index[VLIB_RX];
341 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
342 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
343 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b2);
344 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b3);
346 /* verify speculative enqueue, maybe switch current next frame */
347 vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
348 to_next, n_left_to_next,
349 bi0, bi1, bi2, bi3, next0, next1,
353 while (n_left_from > 0 && n_left_to_next > 0)
355 // Prefetch next iteration
360 p2 = vlib_get_buffer (vm, from[1]);
361 vlib_prefetch_buffer_header (p2, STORE);
362 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, LOAD);
373 b0 = vlib_get_buffer (vm, bi0);
374 vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_RX], &next0,
377 eth = (ethernet_header_t *) vlib_buffer_get_current (b0);
379 sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
380 // sw_if_index points to the physical interface
381 sif = bond_get_slave_by_sw_if_index (sw_if_index);
382 bond_sw_if_index_rewrite (vm, node, sif, eth, b0);
384 if (PREDICT_FALSE (n_trace > 0))
386 vlib_trace_buffer (vm, node, next0, b0, 0 /* follow_chain */ );
387 vlib_set_trace_count (vm, node, --n_trace);
388 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
390 t0->sw_if_index = sw_if_index;
391 t0->bond_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
395 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
397 /* verify speculative enqueue, maybe switch current next frame */
398 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
399 to_next, n_left_to_next,
402 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
405 vlib_node_increment_counter (vm, bond_input_node.index,
406 BOND_INPUT_ERROR_NONE, frame->n_vectors);
408 vnet_device_increment_rx_packets (thread_index, frame->n_vectors);
410 return frame->n_vectors;
413 static clib_error_t *
414 bond_input_init (vlib_main_t * vm)
420 VLIB_REGISTER_NODE (bond_input_node) = {
421 .function = bond_input_fn,
422 .name = "bond-input",
423 .vector_size = sizeof (u32),
424 .format_buffer = format_ethernet_header_with_length,
425 .format_trace = format_bond_input_trace,
426 .type = VLIB_NODE_TYPE_INTERNAL,
427 .n_errors = BOND_INPUT_N_ERROR,
428 .error_strings = bond_input_error_strings,
436 VLIB_INIT_FUNCTION (bond_input_init);
438 VNET_FEATURE_INIT (bond_input, static) =
440 .arc_name = "device-input",
441 .node_name = "bond-input",
442 .runs_before = VNET_FEATURES ("ethernet-input"),
444 VLIB_NODE_FUNCTION_MULTIARCH (bond_input_node, bond_input_fn)
447 static clib_error_t *
448 bond_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
450 bond_main_t *bm = &bond_main;
452 vlib_main_t *vm = bm->vlib_main;
454 sif = bond_get_slave_by_sw_if_index (sw_if_index);
457 sif->port_enabled = flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP;
458 if (sif->port_enabled == 0)
460 if (sif->lacp_enabled == 0)
462 bond_disable_collecting_distributing (vm, sif);
467 if (sif->lacp_enabled == 0)
469 bond_enable_collecting_distributing (vm, sif);
477 VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (bond_sw_interface_up_down);
479 static clib_error_t *
480 bond_hw_interface_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
482 bond_main_t *bm = &bond_main;
484 vnet_sw_interface_t *sw;
485 vlib_main_t *vm = bm->vlib_main;
487 sw = vnet_get_hw_sw_interface (vnm, hw_if_index);
488 sif = bond_get_slave_by_sw_if_index (sw->sw_if_index);
491 if (!(flags & VNET_HW_INTERFACE_FLAG_LINK_UP))
493 if (sif->lacp_enabled == 0)
495 bond_disable_collecting_distributing (vm, sif);
500 if (sif->lacp_enabled == 0)
502 bond_enable_collecting_distributing (vm, sif);
510 VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION (bond_hw_interface_up_down);
513 * fd.io coding-style-patch-verification: ON
516 * eval: (c-set-style "gnu")