bonding: drop traffic on backup interface for active-backup mode
[vpp.git] / src / vnet / bonding / node.c
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2017 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17
18 #define _GNU_SOURCE
19 #include <stdint.h>
20 #include <vnet/llc/llc.h>
21 #include <vnet/snap/snap.h>
22 #include <vnet/bonding/node.h>
23
24 #ifndef CLIB_MARCH_VARIANT
25 bond_main_t bond_main;
26 #endif /* CLIB_MARCH_VARIANT */
27
28 #define foreach_bond_input_error \
29   _(NONE, "no error")            \
30   _(IF_DOWN, "interface down")   \
31   _(PASSIVE_IF, "traffic received on passive interface")   \
32   _(PASS_THRU, "pass through (CDP, LLDP, slow protocols)")
33
34 typedef enum
35 {
36 #define _(f,s) BOND_INPUT_ERROR_##f,
37   foreach_bond_input_error
38 #undef _
39     BOND_INPUT_N_ERROR,
40 } bond_input_error_t;
41
42 static char *bond_input_error_strings[] = {
43 #define _(n,s) s,
44   foreach_bond_input_error
45 #undef _
46 };
47
48 static u8 *
49 format_bond_input_trace (u8 * s, va_list * args)
50 {
51   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
52   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
53   bond_packet_trace_t *t = va_arg (*args, bond_packet_trace_t *);
54
55   s = format (s, "src %U, dst %U, %U -> %U",
56               format_ethernet_address, t->ethernet.src_address,
57               format_ethernet_address, t->ethernet.dst_address,
58               format_vnet_sw_if_index_name, vnet_get_main (),
59               t->sw_if_index,
60               format_vnet_sw_if_index_name, vnet_get_main (),
61               t->bond_sw_if_index);
62
63   return s;
64 }
65
66 typedef enum
67 {
68   BOND_INPUT_NEXT_DROP,
69   BOND_INPUT_N_NEXT,
70 } bond_output_next_t;
71
72 static_always_inline u8
73 packet_is_cdp (ethernet_header_t * eth)
74 {
75   llc_header_t *llc;
76   snap_header_t *snap;
77
78   llc = (llc_header_t *) (eth + 1);
79   snap = (snap_header_t *) (llc + 1);
80
81   return ((eth->type == htons (ETHERNET_TYPE_CDP)) ||
82           ((llc->src_sap == 0xAA) && (llc->control == 0x03) &&
83            (snap->protocol == htons (0x2000)) &&
84            (snap->oui[0] == 0) && (snap->oui[1] == 0) &&
85            (snap->oui[2] == 0x0C)));
86 }
87
88 static inline void
89 bond_sw_if_idx_rewrite (vlib_main_t * vm, vlib_node_runtime_t * node,
90                         vlib_buffer_t * b, u32 bond_sw_if_index,
91                         u32 * n_rx_packets, u32 * n_rx_bytes)
92 {
93   u16 *ethertype_p, ethertype;
94   ethernet_vlan_header_t *vlan;
95   ethernet_header_t *eth = (ethernet_header_t *) vlib_buffer_get_current (b);
96
97   (*n_rx_packets)++;
98   *n_rx_bytes += b->current_length;
99   ethertype = clib_mem_unaligned (&eth->type, u16);
100   if (!ethernet_frame_is_tagged (ntohs (ethertype)))
101     {
102       // Let some layer2 packets pass through.
103       if (PREDICT_TRUE ((ethertype != htons (ETHERNET_TYPE_SLOW_PROTOCOLS))
104                         && !packet_is_cdp (eth)
105                         && (ethertype != htons (ETHERNET_TYPE_802_1_LLDP))))
106         {
107           /* Change the physical interface to bond interface */
108           vnet_buffer (b)->sw_if_index[VLIB_RX] = bond_sw_if_index;
109           return;
110         }
111     }
112   else
113     {
114       vlan = (void *) (eth + 1);
115       ethertype_p = &vlan->type;
116       ethertype = clib_mem_unaligned (ethertype_p, u16);
117       if (ethertype == ntohs (ETHERNET_TYPE_VLAN))
118         {
119           vlan++;
120           ethertype_p = &vlan->type;
121         }
122       ethertype = clib_mem_unaligned (ethertype_p, u16);
123       if (PREDICT_TRUE ((ethertype != htons (ETHERNET_TYPE_SLOW_PROTOCOLS))
124                         && (ethertype != htons (ETHERNET_TYPE_CDP))
125                         && (ethertype != htons (ETHERNET_TYPE_802_1_LLDP))))
126         {
127           /* Change the physical interface to bond interface */
128           vnet_buffer (b)->sw_if_index[VLIB_RX] = bond_sw_if_index;
129           return;
130         }
131     }
132
133   vlib_error_count (vm, node->node_index, BOND_INPUT_ERROR_PASS_THRU, 1);
134   return;
135 }
136
137 static inline void
138 bond_update_next (vlib_main_t * vm, vlib_node_runtime_t * node,
139                   u32 * last_slave_sw_if_index, u32 slave_sw_if_index,
140                   u32 * bond_sw_if_index, vlib_buffer_t * b,
141                   u32 * next_index, vlib_error_t * error)
142 {
143   slave_if_t *sif;
144   bond_if_t *bif;
145
146   *next_index = BOND_INPUT_NEXT_DROP;
147   *error = 0;
148
149   if (PREDICT_TRUE (*last_slave_sw_if_index == slave_sw_if_index))
150     goto next;
151
152   *last_slave_sw_if_index = slave_sw_if_index;
153
154   sif = bond_get_slave_by_sw_if_index (slave_sw_if_index);
155   ASSERT (sif);
156
157   bif = bond_get_master_by_dev_instance (sif->bif_dev_instance);
158
159   ASSERT (bif);
160   ASSERT (vec_len (bif->slaves));
161
162   if (PREDICT_FALSE (bif->admin_up == 0))
163     {
164       *bond_sw_if_index = slave_sw_if_index;
165       *error = node->errors[BOND_INPUT_ERROR_IF_DOWN];
166       return;
167     }
168
169   if (PREDICT_FALSE ((bif->mode == BOND_MODE_ACTIVE_BACKUP) &&
170                      vec_len (bif->active_slaves) &&
171                      (slave_sw_if_index != bif->active_slaves[0])))
172     {
173       *bond_sw_if_index = slave_sw_if_index;
174       *error = node->errors[BOND_INPUT_ERROR_PASSIVE_IF];
175       return;
176     }
177
178   *bond_sw_if_index = bif->sw_if_index;
179
180 next:
181   vnet_feature_next (next_index, b);
182 }
183
184 static_always_inline void
185 bond_update_next_x4 (vlib_buffer_t * b0, vlib_buffer_t * b1,
186                      vlib_buffer_t * b2, vlib_buffer_t * b3)
187 {
188   u32 tmp0, tmp1, tmp2, tmp3;
189
190   tmp0 = tmp1 = tmp2 = tmp3 = BOND_INPUT_NEXT_DROP;
191   vnet_feature_next (&tmp0, b0);
192   vnet_feature_next (&tmp1, b1);
193   vnet_feature_next (&tmp2, b2);
194   vnet_feature_next (&tmp3, b3);
195 }
196
197 VLIB_NODE_FN (bond_input_node) (vlib_main_t * vm,
198                                 vlib_node_runtime_t * node,
199                                 vlib_frame_t * frame)
200 {
201   u16 thread_index = vm->thread_index;
202   u32 *from, n_left;
203   vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
204   u32 sw_if_indices[VLIB_FRAME_SIZE], *sw_if_index;
205   u16 nexts[VLIB_FRAME_SIZE], *next;
206   u32 last_slave_sw_if_index = ~0;
207   u32 bond_sw_if_index = 0;
208   vlib_error_t error = 0;
209   u32 next_index = 0;
210   u32 n_rx_bytes = 0, n_rx_packets = 0;
211
212   /* Vector of buffer / pkt indices we're supposed to process */
213   from = vlib_frame_vector_args (frame);
214
215   /* Number of buffers / pkts */
216   n_left = frame->n_vectors;
217
218   vlib_get_buffers (vm, from, bufs, n_left);
219
220   b = bufs;
221   next = nexts;
222   sw_if_index = sw_if_indices;
223
224   while (n_left >= 4)
225     {
226       u32 x = 0;
227       /* Prefetch next iteration */
228       if (PREDICT_TRUE (n_left >= 16))
229         {
230           vlib_prefetch_buffer_data (b[8], LOAD);
231           vlib_prefetch_buffer_data (b[9], LOAD);
232           vlib_prefetch_buffer_data (b[10], LOAD);
233           vlib_prefetch_buffer_data (b[11], LOAD);
234
235           vlib_prefetch_buffer_header (b[12], LOAD);
236           vlib_prefetch_buffer_header (b[13], LOAD);
237           vlib_prefetch_buffer_header (b[14], LOAD);
238           vlib_prefetch_buffer_header (b[15], LOAD);
239         }
240
241       sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
242       sw_if_index[1] = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
243       sw_if_index[2] = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
244       sw_if_index[3] = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
245
246       x |= sw_if_index[0] ^ last_slave_sw_if_index;
247       x |= sw_if_index[1] ^ last_slave_sw_if_index;
248       x |= sw_if_index[2] ^ last_slave_sw_if_index;
249       x |= sw_if_index[3] ^ last_slave_sw_if_index;
250
251       if (PREDICT_TRUE (x == 0))
252         {
253           /*
254            * Optimize to call update_next only if there is a feature arc
255            * after bond-input. Test feature count greater than 1 because
256            * bond-input itself is a feature arc for this slave interface.
257            */
258           ASSERT ((vnet_buffer (b[0])->feature_arc_index ==
259                    vnet_buffer (b[1])->feature_arc_index) &&
260                   (vnet_buffer (b[0])->feature_arc_index ==
261                    vnet_buffer (b[2])->feature_arc_index) &&
262                   (vnet_buffer (b[0])->feature_arc_index ==
263                    vnet_buffer (b[3])->feature_arc_index));
264           if (PREDICT_FALSE (vnet_get_feature_count
265                              (vnet_buffer (b[0])->feature_arc_index,
266                               last_slave_sw_if_index) > 1))
267             bond_update_next_x4 (b[0], b[1], b[2], b[3]);
268
269           next[0] = next[1] = next[2] = next[3] = next_index;
270           if (next_index == BOND_INPUT_NEXT_DROP)
271             {
272               b[0]->error = error;
273               b[1]->error = error;
274               b[2]->error = error;
275               b[3]->error = error;
276             }
277           else
278             {
279               bond_sw_if_idx_rewrite (vm, node, b[0], bond_sw_if_index,
280                                       &n_rx_packets, &n_rx_bytes);
281               bond_sw_if_idx_rewrite (vm, node, b[1], bond_sw_if_index,
282                                       &n_rx_packets, &n_rx_bytes);
283               bond_sw_if_idx_rewrite (vm, node, b[2], bond_sw_if_index,
284                                       &n_rx_packets, &n_rx_bytes);
285               bond_sw_if_idx_rewrite (vm, node, b[3], bond_sw_if_index,
286                                       &n_rx_packets, &n_rx_bytes);
287             }
288         }
289       else
290         {
291           bond_update_next (vm, node, &last_slave_sw_if_index, sw_if_index[0],
292                             &bond_sw_if_index, b[0], &next_index, &error);
293           next[0] = next_index;
294           if (next_index == BOND_INPUT_NEXT_DROP)
295             b[0]->error = error;
296           else
297             bond_sw_if_idx_rewrite (vm, node, b[0], bond_sw_if_index,
298                                     &n_rx_packets, &n_rx_bytes);
299
300           bond_update_next (vm, node, &last_slave_sw_if_index, sw_if_index[1],
301                             &bond_sw_if_index, b[1], &next_index, &error);
302           next[1] = next_index;
303           if (next_index == BOND_INPUT_NEXT_DROP)
304             b[1]->error = error;
305           else
306             bond_sw_if_idx_rewrite (vm, node, b[1], bond_sw_if_index,
307                                     &n_rx_packets, &n_rx_bytes);
308
309           bond_update_next (vm, node, &last_slave_sw_if_index, sw_if_index[2],
310                             &bond_sw_if_index, b[2], &next_index, &error);
311           next[2] = next_index;
312           if (next_index == BOND_INPUT_NEXT_DROP)
313             b[2]->error = error;
314           else
315             bond_sw_if_idx_rewrite (vm, node, b[2], bond_sw_if_index,
316                                     &n_rx_packets, &n_rx_bytes);
317
318           bond_update_next (vm, node, &last_slave_sw_if_index, sw_if_index[3],
319                             &bond_sw_if_index, b[3], &next_index, &error);
320           next[3] = next_index;
321           if (next_index == BOND_INPUT_NEXT_DROP)
322             b[3]->error = error;
323           else
324             bond_sw_if_idx_rewrite (vm, node, b[3], bond_sw_if_index,
325                                     &n_rx_packets, &n_rx_bytes);
326         }
327
328       VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
329       VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]);
330       VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]);
331       VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]);
332
333       /* next */
334       n_left -= 4;
335       b += 4;
336       sw_if_index += 4;
337       next += 4;
338     }
339
340   while (n_left)
341     {
342       sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
343       bond_update_next (vm, node, &last_slave_sw_if_index, sw_if_index[0],
344                         &bond_sw_if_index, b[0], &next_index, &error);
345       next[0] = next_index;
346       if (next_index == BOND_INPUT_NEXT_DROP)
347         b[0]->error = error;
348       else
349         bond_sw_if_idx_rewrite (vm, node, b[0], bond_sw_if_index,
350                                 &n_rx_packets, &n_rx_bytes);
351
352       VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
353
354       /* next */
355       n_left -= 1;
356       b += 1;
357       sw_if_index += 1;
358       next += 1;
359     }
360
361   if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
362     {
363       n_left = frame->n_vectors;        /* number of packets to process */
364       b = bufs;
365       sw_if_index = sw_if_indices;
366       bond_packet_trace_t *t0;
367
368       while (n_left)
369         {
370           if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
371             {
372               t0 = vlib_add_trace (vm, node, b[0], sizeof (*t0));
373               t0->sw_if_index = sw_if_index[0];
374               clib_memcpy_fast (&t0->ethernet, vlib_buffer_get_current (b[0]),
375                                 sizeof (ethernet_header_t));
376               t0->bond_sw_if_index = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
377             }
378           /* next */
379           n_left--;
380           b++;
381           sw_if_index++;
382         }
383     }
384
385   /* increase rx counters */
386   vlib_increment_combined_counter
387     (vnet_main.interface_main.combined_sw_if_counters +
388      VNET_INTERFACE_COUNTER_RX, thread_index, bond_sw_if_index, n_rx_packets,
389      n_rx_bytes);
390
391   vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
392   vlib_node_increment_counter (vm, bond_input_node.index,
393                                BOND_INPUT_ERROR_NONE, frame->n_vectors);
394
395   return frame->n_vectors;
396 }
397
398 static clib_error_t *
399 bond_input_init (vlib_main_t * vm)
400 {
401   return 0;
402 }
403
404 /* *INDENT-OFF* */
405 VLIB_REGISTER_NODE (bond_input_node) = {
406   .name = "bond-input",
407   .vector_size = sizeof (u32),
408   .format_buffer = format_ethernet_header_with_length,
409   .format_trace = format_bond_input_trace,
410   .type = VLIB_NODE_TYPE_INTERNAL,
411   .n_errors = BOND_INPUT_N_ERROR,
412   .error_strings = bond_input_error_strings,
413   .n_next_nodes = BOND_INPUT_N_NEXT,
414   .next_nodes =
415   {
416     [BOND_INPUT_NEXT_DROP] = "error-drop"
417   }
418 };
419
420 VLIB_INIT_FUNCTION (bond_input_init);
421
422 VNET_FEATURE_INIT (bond_input, static) =
423 {
424   .arc_name = "device-input",
425   .node_name = "bond-input",
426   .runs_before = VNET_FEATURES ("ethernet-input"),
427 };
428 /* *INDENT-ON* */
429
430 static clib_error_t *
431 bond_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
432 {
433   bond_main_t *bm = &bond_main;
434   slave_if_t *sif;
435   vlib_main_t *vm = bm->vlib_main;
436
437   sif = bond_get_slave_by_sw_if_index (sw_if_index);
438   if (sif)
439     {
440       if (sif->lacp_enabled)
441         return 0;
442
443       /* port_enabled is both admin up and hw link up */
444       sif->port_enabled = ((flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) &&
445                            vnet_sw_interface_is_link_up (vnm, sw_if_index));
446       if (sif->port_enabled == 0)
447         bond_disable_collecting_distributing (vm, sif);
448       else
449         bond_enable_collecting_distributing (vm, sif);
450     }
451
452   return 0;
453 }
454
455 VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (bond_sw_interface_up_down);
456
457 static clib_error_t *
458 bond_hw_interface_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
459 {
460   bond_main_t *bm = &bond_main;
461   slave_if_t *sif;
462   vnet_sw_interface_t *sw;
463   vlib_main_t *vm = bm->vlib_main;
464
465   sw = vnet_get_hw_sw_interface (vnm, hw_if_index);
466   sif = bond_get_slave_by_sw_if_index (sw->sw_if_index);
467   if (sif)
468     {
469       if (sif->lacp_enabled)
470         return 0;
471
472       /* port_enabled is both admin up and hw link up */
473       sif->port_enabled = ((flags & VNET_HW_INTERFACE_FLAG_LINK_UP) &&
474                            vnet_sw_interface_is_admin_up (vnm,
475                                                           sw->sw_if_index));
476       if (sif->port_enabled == 0)
477         bond_disable_collecting_distributing (vm, sif);
478       else
479         bond_enable_collecting_distributing (vm, sif);
480     }
481
482   return 0;
483 }
484
485 VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION (bond_hw_interface_up_down);
486
487 /*
488  * fd.io coding-style-patch-verification: ON
489  *
490  * Local Variables:
491  * eval: (c-set-style "gnu")
492  * End:
493  */