2 *------------------------------------------------------------------
3 * Copyright (c) 2017 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
20 #include <vnet/llc/llc.h>
21 #include <vnet/snap/snap.h>
22 #include <vnet/bonding/node.h>
24 #ifndef CLIB_MARCH_VARIANT
25 bond_main_t bond_main;
26 #endif /* CLIB_MARCH_VARIANT */
28 #define foreach_bond_input_error \
30 _(IF_DOWN, "interface down") \
31 _(PASS_THRU, "pass through (CDP, LLDP, slow protocols)")
35 #define _(f,s) BOND_INPUT_ERROR_##f,
36 foreach_bond_input_error
41 static char *bond_input_error_strings[] = {
43 foreach_bond_input_error
48 format_bond_input_trace (u8 * s, va_list * args)
50 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
51 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
52 bond_packet_trace_t *t = va_arg (*args, bond_packet_trace_t *);
54 s = format (s, "src %U, dst %U, %U -> %U",
55 format_ethernet_address, t->ethernet.src_address,
56 format_ethernet_address, t->ethernet.dst_address,
57 format_vnet_sw_if_index_name, vnet_get_main (),
59 format_vnet_sw_if_index_name, vnet_get_main (),
71 static_always_inline u8
72 packet_is_cdp (ethernet_header_t * eth)
77 llc = (llc_header_t *) (eth + 1);
78 snap = (snap_header_t *) (llc + 1);
80 return ((eth->type == htons (ETHERNET_TYPE_CDP)) ||
81 ((llc->src_sap == 0xAA) && (llc->control == 0x03) &&
82 (snap->protocol == htons (0x2000)) &&
83 (snap->oui[0] == 0) && (snap->oui[1] == 0) &&
84 (snap->oui[2] == 0x0C)));
88 bond_sw_if_idx_rewrite (vlib_main_t * vm, vlib_node_runtime_t * node,
89 vlib_buffer_t * b, u32 bond_sw_if_index)
91 u16 *ethertype_p, ethertype;
92 ethernet_vlan_header_t *vlan;
93 ethernet_header_t *eth = (ethernet_header_t *) vlib_buffer_get_current (b);
95 ethertype = clib_mem_unaligned (ð->type, u16);
96 if (!ethernet_frame_is_tagged (ntohs (ethertype)))
98 // Let some layer2 packets pass through.
99 if (PREDICT_TRUE ((ethertype != htons (ETHERNET_TYPE_SLOW_PROTOCOLS))
100 && !packet_is_cdp (eth)
101 && (ethertype != htons (ETHERNET_TYPE_802_1_LLDP))))
103 /* Change the physical interface to bond interface */
104 vnet_buffer (b)->sw_if_index[VLIB_RX] = bond_sw_if_index;
110 vlan = (void *) (eth + 1);
111 ethertype_p = &vlan->type;
112 ethertype = clib_mem_unaligned (ethertype_p, u16);
113 if (ethertype == ntohs (ETHERNET_TYPE_VLAN))
116 ethertype_p = &vlan->type;
118 ethertype = clib_mem_unaligned (ethertype_p, u16);
119 if (PREDICT_TRUE ((ethertype != htons (ETHERNET_TYPE_SLOW_PROTOCOLS))
120 && (ethertype != htons (ETHERNET_TYPE_CDP))
121 && (ethertype != htons (ETHERNET_TYPE_802_1_LLDP))))
123 /* Change the physical interface to bond interface */
124 vnet_buffer (b)->sw_if_index[VLIB_RX] = bond_sw_if_index;
129 vlib_error_count (vm, node->node_index, BOND_INPUT_ERROR_PASS_THRU, 1);
134 bond_update_next (vlib_main_t * vm, vlib_node_runtime_t * node,
135 u32 * last_slave_sw_if_index, u32 slave_sw_if_index,
137 u32 * bond_sw_if_index, vlib_buffer_t * b,
138 u32 * next_index, vlib_error_t * error)
140 u16 thread_index = vm->thread_index;
144 if (PREDICT_TRUE (*last_slave_sw_if_index == slave_sw_if_index))
148 vlib_increment_simple_counter (vnet_main.interface_main.sw_if_counters +
149 VNET_INTERFACE_COUNTER_RX, thread_index,
150 *last_slave_sw_if_index, packet_count);
152 *last_slave_sw_if_index = slave_sw_if_index;
153 *next_index = BOND_INPUT_NEXT_DROP;
155 sif = bond_get_slave_by_sw_if_index (slave_sw_if_index);
158 bif = bond_get_master_by_dev_instance (sif->bif_dev_instance);
161 ASSERT (vec_len (bif->slaves));
163 if (PREDICT_TRUE (bif->admin_up == 0))
165 *bond_sw_if_index = slave_sw_if_index;
166 *error = node->errors[BOND_INPUT_ERROR_IF_DOWN];
169 *bond_sw_if_index = bif->sw_if_index;
171 vnet_feature_next (next_index, b);
174 VLIB_NODE_FN (bond_input_node) (vlib_main_t * vm,
175 vlib_node_runtime_t * node,
176 vlib_frame_t * frame)
178 u16 thread_index = vm->thread_index;
180 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
181 u32 sw_if_indices[VLIB_FRAME_SIZE], *sw_if_index;
182 u16 nexts[VLIB_FRAME_SIZE], *next;
183 u32 last_slave_sw_if_index = ~0;
184 u32 bond_sw_if_index = 0;
185 vlib_error_t error = 0;
189 /* Vector of buffer / pkt indices we're supposed to process */
190 from = vlib_frame_vector_args (frame);
192 /* Number of buffers / pkts */
193 n_left = frame->n_vectors;
195 vlib_get_buffers (vm, from, bufs, n_left);
199 sw_if_index = sw_if_indices;
204 /* Prefetch next iteration */
205 if (PREDICT_TRUE (n_left >= 16))
207 vlib_prefetch_buffer_data (b[8], LOAD);
208 vlib_prefetch_buffer_data (b[9], LOAD);
209 vlib_prefetch_buffer_data (b[10], LOAD);
210 vlib_prefetch_buffer_data (b[11], LOAD);
212 vlib_prefetch_buffer_header (b[12], LOAD);
213 vlib_prefetch_buffer_header (b[13], LOAD);
214 vlib_prefetch_buffer_header (b[14], LOAD);
215 vlib_prefetch_buffer_header (b[15], LOAD);
218 sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
219 sw_if_index[1] = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
220 sw_if_index[2] = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
221 sw_if_index[3] = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
223 x |= sw_if_index[0] ^ last_slave_sw_if_index;
224 x |= sw_if_index[1] ^ last_slave_sw_if_index;
225 x |= sw_if_index[2] ^ last_slave_sw_if_index;
226 x |= sw_if_index[3] ^ last_slave_sw_if_index;
228 if (PREDICT_TRUE (x == 0))
230 next[0] = next[1] = next[2] = next[3] = next_index;
231 if (next_index == BOND_INPUT_NEXT_DROP)
241 bond_sw_if_idx_rewrite (vm, node, b[0], bond_sw_if_index);
243 bond_sw_if_idx_rewrite (vm, node, b[1], bond_sw_if_index);
245 bond_sw_if_idx_rewrite (vm, node, b[2], bond_sw_if_index);
247 bond_sw_if_idx_rewrite (vm, node, b[3], bond_sw_if_index);
253 bond_update_next (vm, node, &last_slave_sw_if_index, sw_if_index[0],
254 cnt, &bond_sw_if_index, b[0], &next_index,
256 next[0] = next_index;
257 if (next_index == BOND_INPUT_NEXT_DROP)
260 cnt += bond_sw_if_idx_rewrite (vm, node, b[0], bond_sw_if_index);
262 bond_update_next (vm, node, &last_slave_sw_if_index, sw_if_index[1],
263 cnt, &bond_sw_if_index, b[1], &next_index,
265 next[1] = next_index;
266 if (next_index == BOND_INPUT_NEXT_DROP)
269 cnt += bond_sw_if_idx_rewrite (vm, node, b[1], bond_sw_if_index);
271 bond_update_next (vm, node, &last_slave_sw_if_index, sw_if_index[2],
272 cnt, &bond_sw_if_index, b[2], &next_index,
274 next[2] = next_index;
275 if (next_index == BOND_INPUT_NEXT_DROP)
278 cnt += bond_sw_if_idx_rewrite (vm, node, b[2], bond_sw_if_index);
280 bond_update_next (vm, node, &last_slave_sw_if_index, sw_if_index[3],
281 cnt, &bond_sw_if_index, b[3], &next_index,
283 next[3] = next_index;
284 if (next_index == BOND_INPUT_NEXT_DROP)
287 cnt += bond_sw_if_idx_rewrite (vm, node, b[3], bond_sw_if_index);
290 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
291 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]);
292 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]);
293 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]);
304 sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
305 bond_update_next (vm, node, &last_slave_sw_if_index, sw_if_index[0],
306 cnt, &bond_sw_if_index, b[0], &next_index, &error);
307 next[0] = next_index;
308 if (next_index == BOND_INPUT_NEXT_DROP)
311 bond_sw_if_idx_rewrite (vm, node, b[0], bond_sw_if_index);
313 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
322 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
324 n_left = frame->n_vectors; /* number of packets to process */
326 sw_if_index = sw_if_indices;
327 bond_packet_trace_t *t0;
331 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
333 t0 = vlib_add_trace (vm, node, b[0], sizeof (*t0));
334 t0->sw_if_index = sw_if_index[0];
335 clib_memcpy_fast (&t0->ethernet, vlib_buffer_get_current (b[0]),
336 sizeof (ethernet_header_t));
337 t0->bond_sw_if_index = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
346 /* increase rx counters */
347 vlib_increment_simple_counter
348 (vnet_main.interface_main.sw_if_counters +
349 VNET_INTERFACE_COUNTER_RX, thread_index, bond_sw_if_index, cnt);
351 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
352 vlib_node_increment_counter (vm, bond_input_node.index,
353 BOND_INPUT_ERROR_NONE, frame->n_vectors);
355 return frame->n_vectors;
358 static clib_error_t *
359 bond_input_init (vlib_main_t * vm)
365 VLIB_REGISTER_NODE (bond_input_node) = {
366 .name = "bond-input",
367 .vector_size = sizeof (u32),
368 .format_buffer = format_ethernet_header_with_length,
369 .format_trace = format_bond_input_trace,
370 .type = VLIB_NODE_TYPE_INTERNAL,
371 .n_errors = BOND_INPUT_N_ERROR,
372 .error_strings = bond_input_error_strings,
373 .n_next_nodes = BOND_INPUT_N_NEXT,
376 [BOND_INPUT_NEXT_DROP] = "error-drop"
380 VLIB_INIT_FUNCTION (bond_input_init);
382 VNET_FEATURE_INIT (bond_input, static) =
384 .arc_name = "device-input",
385 .node_name = "bond-input",
386 .runs_before = VNET_FEATURES ("ethernet-input"),
390 static clib_error_t *
391 bond_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
393 bond_main_t *bm = &bond_main;
395 vlib_main_t *vm = bm->vlib_main;
397 sif = bond_get_slave_by_sw_if_index (sw_if_index);
400 sif->port_enabled = flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP;
401 if (sif->lacp_enabled)
404 if (sif->port_enabled == 0)
406 bond_disable_collecting_distributing (vm, sif);
410 vnet_main_t *vnm = vnet_get_main ();
411 vnet_hw_interface_t *hw =
412 vnet_get_sup_hw_interface (vnm, sw_if_index);
414 if (hw->flags & VNET_HW_INTERFACE_FLAG_LINK_UP)
415 bond_enable_collecting_distributing (vm, sif);
422 VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (bond_sw_interface_up_down);
424 static clib_error_t *
425 bond_hw_interface_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
427 bond_main_t *bm = &bond_main;
429 vnet_sw_interface_t *sw;
430 vlib_main_t *vm = bm->vlib_main;
432 sw = vnet_get_hw_sw_interface (vnm, hw_if_index);
433 sif = bond_get_slave_by_sw_if_index (sw->sw_if_index);
436 if (sif->lacp_enabled)
439 if (!(flags & VNET_HW_INTERFACE_FLAG_LINK_UP))
441 bond_disable_collecting_distributing (vm, sif);
443 else if (sif->port_enabled)
445 bond_enable_collecting_distributing (vm, sif);
452 VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION (bond_hw_interface_up_down);
455 * fd.io coding-style-patch-verification: ON
458 * eval: (c-set-style "gnu")