2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * ethernet_node.c: ethernet packet processing
18 * Copyright (c) 2008 Eliot Dresselhaus
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 #include <vlib/vlib.h>
41 #include <vnet/pg/pg.h>
42 #include <vnet/ethernet/ethernet.h>
43 #include <vnet/ethernet/p2p_ethernet.h>
44 #include <vnet/devices/pipe/pipe.h>
45 #include <vppinfra/sparse_vec.h>
46 #include <vnet/l2/l2_bvi.h>
49 #define foreach_ethernet_input_next \
50 _ (PUNT, "error-punt") \
51 _ (DROP, "error-drop") \
52 _ (LLC, "llc-input") \
53 _ (IP4_INPUT, "ip4-input") \
54 _ (IP4_INPUT_NCS, "ip4-input-no-checksum")
58 #define _(s,n) ETHERNET_INPUT_NEXT_##s,
59 foreach_ethernet_input_next
61 ETHERNET_INPUT_N_NEXT,
62 } ethernet_input_next_t;
68 ethernet_input_frame_t frame_data;
69 } ethernet_input_trace_t;
72 format_ethernet_input_trace (u8 * s, va_list * va)
74 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
75 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
76 ethernet_input_trace_t *t = va_arg (*va, ethernet_input_trace_t *);
77 u32 indent = format_get_indent (s);
81 s = format (s, "frame: flags 0x%x", t->frame_flags);
82 if (t->frame_flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
83 s = format (s, ", hw-if-index %u, sw-if-index %u",
84 t->frame_data.hw_if_index, t->frame_data.sw_if_index);
85 s = format (s, "\n%U", format_white_space, indent);
87 s = format (s, "%U", format_ethernet_header, t->packet_data);
92 extern vlib_node_registration_t ethernet_input_node;
96 ETHERNET_INPUT_VARIANT_ETHERNET,
97 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE,
98 ETHERNET_INPUT_VARIANT_NOT_L2,
99 } ethernet_input_variant_t;
102 // Parse the ethernet header to extract vlan tags and innermost ethertype
103 static_always_inline void
104 parse_header (ethernet_input_variant_t variant,
108 u16 * outer_id, u16 * inner_id, u32 * match_flags)
112 if (variant == ETHERNET_INPUT_VARIANT_ETHERNET
113 || variant == ETHERNET_INPUT_VARIANT_NOT_L2)
115 ethernet_header_t *e0;
117 e0 = (void *) (b0->data + b0->current_data);
119 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
120 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
122 vlib_buffer_advance (b0, sizeof (e0[0]));
124 *type = clib_net_to_host_u16 (e0->type);
126 else if (variant == ETHERNET_INPUT_VARIANT_ETHERNET_TYPE)
128 // here when prior node was LLC/SNAP processing
131 e0 = (void *) (b0->data + b0->current_data);
133 vlib_buffer_advance (b0, sizeof (e0[0]));
135 *type = clib_net_to_host_u16 (e0[0]);
138 // save for distinguishing between dot1q and dot1ad later
141 // default the tags to 0 (used if there is no corresponding tag)
145 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_0_TAG;
148 // check for vlan encaps
149 if (ethernet_frame_is_tagged (*type))
151 ethernet_vlan_header_t *h0;
154 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_1_TAG;
156 h0 = (void *) (b0->data + b0->current_data);
158 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
160 *outer_id = tag & 0xfff;
162 *match_flags &= ~SUBINT_CONFIG_MATCH_1_TAG;
164 *type = clib_net_to_host_u16 (h0->type);
166 vlib_buffer_advance (b0, sizeof (h0[0]));
169 if (*type == ETHERNET_TYPE_VLAN)
171 // Double tagged packet
172 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_2_TAG;
174 h0 = (void *) (b0->data + b0->current_data);
176 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
178 *inner_id = tag & 0xfff;
180 *type = clib_net_to_host_u16 (h0->type);
182 vlib_buffer_advance (b0, sizeof (h0[0]));
184 if (*type == ETHERNET_TYPE_VLAN)
186 // More than double tagged packet
187 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_3_TAG;
189 vlib_buffer_advance (b0, sizeof (h0[0]));
190 vlan_count = 3; // "unknown" number, aka, 3-or-more
194 ethernet_buffer_set_vlan_count (b0, vlan_count);
197 // Determine the subinterface for this packet, given the result of the
198 // vlan table lookups and vlan header parsing. Check the most specific
200 static_always_inline void
201 identify_subint (vnet_hw_interface_t * hi,
204 main_intf_t * main_intf,
205 vlan_intf_t * vlan_intf,
206 qinq_intf_t * qinq_intf,
207 u32 * new_sw_if_index, u8 * error0, u32 * is_l2)
211 matched = eth_identify_subint (hi, b0, match_flags,
212 main_intf, vlan_intf, qinq_intf,
213 new_sw_if_index, error0, is_l2);
218 // Perform L3 my-mac filter
219 // A unicast packet arriving on an L3 interface must have a dmac matching the interface mac.
220 // This is required for promiscuous mode, else we will forward packets we aren't supposed to.
223 ethernet_header_t *e0;
224 e0 = (void *) (b0->data + vnet_buffer (b0)->l2_hdr_offset);
226 if (!(ethernet_address_cast (e0->dst_address)))
228 if (!eth_mac_equal ((u8 *) e0, hi->hw_address))
230 *error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
235 // Check for down subinterface
236 *error0 = (*new_sw_if_index) != ~0 ? (*error0) : ETHERNET_ERROR_DOWN;
240 static_always_inline void
241 determine_next_node (ethernet_main_t * em,
242 ethernet_input_variant_t variant,
244 u32 type0, vlib_buffer_t * b0, u8 * error0, u8 * next0)
246 vnet_buffer (b0)->l3_hdr_offset = b0->current_data;
247 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
249 if (PREDICT_FALSE (*error0 != ETHERNET_ERROR_NONE))
251 // some error occurred
252 *next0 = ETHERNET_INPUT_NEXT_DROP;
256 // record the L2 len and reset the buffer so the L2 header is preserved
257 u32 eth_start = vnet_buffer (b0)->l2_hdr_offset;
258 vnet_buffer (b0)->l2.l2_len = b0->current_data - eth_start;
259 *next0 = em->l2_next;
260 ASSERT (vnet_buffer (b0)->l2.l2_len ==
261 ethernet_buffer_header_size (b0));
262 vlib_buffer_advance (b0, -(vnet_buffer (b0)->l2.l2_len));
264 // check for common IP/MPLS ethertypes
266 else if (type0 == ETHERNET_TYPE_IP4)
268 *next0 = em->l3_next.input_next_ip4;
270 else if (type0 == ETHERNET_TYPE_IP6)
272 *next0 = em->l3_next.input_next_ip6;
274 else if (type0 == ETHERNET_TYPE_MPLS)
276 *next0 = em->l3_next.input_next_mpls;
279 else if (em->redirect_l3)
281 // L3 Redirect is on, the cached common next nodes will be
282 // pointing to the redirect node, catch the uncommon types here
283 *next0 = em->redirect_l3_next;
287 // uncommon ethertype, check table
289 i0 = sparse_vec_index (em->l3_next.input_next_by_type, type0);
290 *next0 = vec_elt (em->l3_next.input_next_by_type, i0);
293 SPARSE_VEC_INVALID_INDEX ? ETHERNET_ERROR_UNKNOWN_TYPE : *error0;
295 // The table is not populated with LLC values, so check that now.
296 // If variant is variant_ethernet then we came from LLC processing. Don't
297 // go back there; drop instead using by keeping the drop/bad table result.
298 if ((type0 < 0x600) && (variant == ETHERNET_INPUT_VARIANT_ETHERNET))
300 *next0 = ETHERNET_INPUT_NEXT_LLC;
307 ETYPE_ID_UNKNOWN = 0,
314 static_always_inline void
315 eth_input_advance_and_flags (vlib_main_t * vm, u32 * from, u32 n_left,
316 i16 advance, u32 and_flags, u32 or_flags)
321 vlib_get_buffers (vm, from, b, 8);
322 vlib_buffer_advance (b[0], advance);
323 vlib_buffer_advance (b[1], advance);
324 vlib_buffer_advance (b[2], advance);
325 vlib_buffer_advance (b[3], advance);
326 vlib_buffer_advance (b[4], advance);
327 vlib_buffer_advance (b[5], advance);
328 vlib_buffer_advance (b[6], advance);
329 vlib_buffer_advance (b[7], advance);
330 b[0]->flags = (b[0]->flags & and_flags) | or_flags;
331 b[1]->flags = (b[1]->flags & and_flags) | or_flags;
332 b[2]->flags = (b[2]->flags & and_flags) | or_flags;
333 b[3]->flags = (b[3]->flags & and_flags) | or_flags;
334 b[4]->flags = (b[4]->flags & and_flags) | or_flags;
335 b[5]->flags = (b[5]->flags & and_flags) | or_flags;
336 b[6]->flags = (b[6]->flags & and_flags) | or_flags;
337 b[7]->flags = (b[7]->flags & and_flags) | or_flags;
344 vlib_get_buffers (vm, from, b, 1);
345 vlib_buffer_advance (b[0], advance);
346 b[0]->flags = (b[0]->flags & and_flags) | or_flags;
355 u16 etypes[VLIB_FRAME_SIZE];
356 u32 bufs_by_etype[ETYPE_N_IDS][VLIB_FRAME_SIZE];
357 u16 n_bufs_by_etype[ETYPE_N_IDS];
360 /* following vector code relies on following assumptions */
361 STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_data, 0);
362 STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_length, 2);
363 STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, flags, 4);
364 STATIC_ASSERT (STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l2_hdr_offset) ==
365 STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l3_hdr_offset) - 2,
366 "l3_hdr_offset must follow l2_hdr_offset");
368 static_always_inline void
369 eth_input_adv_and_flags_x4 (vlib_buffer_t ** b, i16 adv, u32 flags, int is_l3)
371 #ifdef CLIB_HAVE_VEC256
372 /* to reduce number of small loads/stores we are loading first 64 bits
373 of each buffer metadata into 256-bit register so we can advance
374 current_data, current_length and flags.
375 Observed saving of this code is ~2 clocks per packet */
378 /* vector if signed 16 bit integers used in signed vector add operation
379 to advnce current_data and current_length */
380 u32x8 flags4 = { 0, flags, 0, flags, 0, flags, 0, flags };
382 adv, -adv, 0, 0, adv, -adv, 0, 0,
383 adv, -adv, 0, 0, adv, -adv, 0, 0
386 /* load 4 x 64 bits */
387 r = u64x4_gather (b[0], b[1], b[2], b[3]);
393 radv = (u64x4) ((i16x16) r + adv4);
395 /* write 4 x 64 bits */
396 u64x4_scatter (is_l3 ? radv : r, b[0], b[1], b[2], b[3]);
398 /* use old current_data as l2_hdr_offset and new current_data as
400 r = (u64x4) u16x16_blend (r, radv << 16, 0xaa);
402 /* store both l2_hdr_offset and l3_hdr_offset in single store operation */
403 u32x8_scatter_one ((u32x8) r, 0, &vnet_buffer (b[0])->l2_hdr_offset);
404 u32x8_scatter_one ((u32x8) r, 2, &vnet_buffer (b[1])->l2_hdr_offset);
405 u32x8_scatter_one ((u32x8) r, 4, &vnet_buffer (b[2])->l2_hdr_offset);
406 u32x8_scatter_one ((u32x8) r, 6, &vnet_buffer (b[3])->l2_hdr_offset);
408 ASSERT (b[0]->current_data == vnet_buffer (b[0])->l3_hdr_offset);
409 ASSERT (b[1]->current_data == vnet_buffer (b[1])->l3_hdr_offset);
410 ASSERT (b[2]->current_data == vnet_buffer (b[2])->l3_hdr_offset);
411 ASSERT (b[3]->current_data == vnet_buffer (b[3])->l3_hdr_offset);
413 ASSERT (b[0]->current_data - vnet_buffer (b[0])->l2_hdr_offset == adv);
414 ASSERT (b[1]->current_data - vnet_buffer (b[1])->l2_hdr_offset == adv);
415 ASSERT (b[2]->current_data - vnet_buffer (b[2])->l2_hdr_offset == adv);
416 ASSERT (b[3]->current_data - vnet_buffer (b[3])->l2_hdr_offset == adv);
419 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
420 vnet_buffer (b[1])->l2_hdr_offset = b[1]->current_data;
421 vnet_buffer (b[2])->l2_hdr_offset = b[2]->current_data;
422 vnet_buffer (b[3])->l2_hdr_offset = b[3]->current_data;
423 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
424 vnet_buffer (b[1])->l3_hdr_offset = b[1]->current_data + adv;
425 vnet_buffer (b[2])->l3_hdr_offset = b[2]->current_data + adv;
426 vnet_buffer (b[3])->l3_hdr_offset = b[3]->current_data + adv;
430 vlib_buffer_advance (b[0], adv);
431 vlib_buffer_advance (b[1], adv);
432 vlib_buffer_advance (b[2], adv);
433 vlib_buffer_advance (b[3], adv);
436 b[0]->flags |= flags;
437 b[1]->flags |= flags;
438 b[2]->flags |= flags;
439 b[3]->flags |= flags;
444 vnet_buffer (b[0])->l2.l2_len = adv;
445 vnet_buffer (b[1])->l2.l2_len = adv;
446 vnet_buffer (b[2])->l2.l2_len = adv;
447 vnet_buffer (b[3])->l2.l2_len = adv;
451 static_always_inline void
452 eth_input_adv_and_flags_x1 (vlib_buffer_t ** b, i16 adv, u32 flags, int is_l3)
454 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
455 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
458 vlib_buffer_advance (b[0], adv);
459 b[0]->flags |= flags;
461 vnet_buffer (b[0])->l2.l2_len = adv;
464 static_always_inline void
465 eth_input_process_frame (vlib_main_t * vm, u32 * from, u16 * etype,
466 u32 n_left, int is_l3)
468 vlib_buffer_t *b[16];
469 ethernet_header_t *e;
470 int adv = sizeof (ethernet_header_t);
472 u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
473 VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
477 vlib_buffer_t **ph = b + 12, **pd = b + 8;
478 vlib_get_buffers (vm, from, b, 4);
479 vlib_get_buffers (vm, from + 8, b + 8, 8);
481 vlib_prefetch_buffer_header (ph[0], LOAD);
482 vlib_prefetch_buffer_data (pd[0], LOAD);
483 e = vlib_buffer_get_current (b[0]);
486 vlib_prefetch_buffer_header (ph[1], LOAD);
487 vlib_prefetch_buffer_data (pd[1], LOAD);
488 e = vlib_buffer_get_current (b[1]);
491 vlib_prefetch_buffer_header (ph[2], LOAD);
492 vlib_prefetch_buffer_data (pd[2], LOAD);
493 e = vlib_buffer_get_current (b[2]);
496 vlib_prefetch_buffer_header (ph[3], LOAD);
497 vlib_prefetch_buffer_data (pd[3], LOAD);
498 e = vlib_buffer_get_current (b[3]);
501 eth_input_adv_and_flags_x4 (b, adv, flags, is_l3);
510 vlib_get_buffers (vm, from, b, 4);
512 e = vlib_buffer_get_current (b[0]);
515 e = vlib_buffer_get_current (b[1]);
518 e = vlib_buffer_get_current (b[2]);
521 e = vlib_buffer_get_current (b[3]);
524 eth_input_adv_and_flags_x4 (b, adv, flags, is_l3);
533 vlib_get_buffers (vm, from, b, 1);
535 e = vlib_buffer_get_current (b[0]);
538 eth_input_adv_and_flags_x1 (b, adv, flags, is_l3);
547 static_always_inline void
548 eth_input_sort (vlib_main_t * vm, u32 * from, u32 n_packets,
549 eth_input_data_t * d)
551 u16 *etype = d->etypes;
552 i32 n_left = n_packets;
554 #if defined (CLIB_HAVE_VEC256)
556 u16x16 et16_ip4 = u16x16_splat (clib_host_to_net_u16 (ETHERNET_TYPE_IP4));
557 u16x16 et16_ip6 = u16x16_splat (clib_host_to_net_u16 (ETHERNET_TYPE_IP6));
558 u16x16 et16_mpls = u16x16_splat (clib_host_to_net_u16 (ETHERNET_TYPE_MPLS));
559 u16x16 id16_ip4 = u16x16_splat (ETYPE_ID_IP4);
560 u16x16 id16_ip6 = u16x16_splat (ETYPE_ID_IP6);
561 u16x16 id16_mpls = u16x16_splat (ETYPE_ID_MPLS);
566 e16 = u16x16_load_unaligned (etype);
567 r += (e16 == et16_ip4) & id16_ip4;
568 r += (e16 == et16_ip6) & id16_ip6;
569 r += (e16 == et16_mpls) & id16_mpls;
570 u16x16_store_unaligned (r, etype);
574 #elif defined (CLIB_HAVE_VEC128)
576 u16x8 et8_ip4 = u16x8_splat (clib_host_to_net_u16 (ETHERNET_TYPE_IP4));
577 u16x8 et8_ip6 = u16x8_splat (clib_host_to_net_u16 (ETHERNET_TYPE_IP6));
578 u16x8 et8_mpls = u16x8_splat (clib_host_to_net_u16 (ETHERNET_TYPE_MPLS));
579 u16x8 id8_ip4 = u16x8_splat (ETYPE_ID_IP4);
580 u16x8 id8_ip6 = u16x8_splat (ETYPE_ID_IP6);
581 u16x8 id8_mpls = u16x8_splat (ETYPE_ID_MPLS);
586 e8 = u16x8_load_unaligned (etype);
587 r += (e8 == et8_ip4) & id8_ip4;
588 r += (e8 == et8_ip6) & id8_ip6;
589 r += (e8 == et8_mpls) & id8_mpls;
590 u16x8_store_unaligned (r, etype);
597 if (etype[0] == ETHERNET_TYPE_IP4)
598 etype[0] = ETYPE_ID_IP4;
599 else if (etype[0] == ETHERNET_TYPE_IP6)
600 etype[0] = ETYPE_ID_IP6;
601 else if (etype[0] == ETHERNET_TYPE_MPLS)
602 etype[0] = ETYPE_ID_MPLS;
604 etype[0] = ETYPE_ID_UNKNOWN;
614 clib_memset_u16 (d->n_bufs_by_etype, 0, ETYPE_N_IDS);
619 y = d->n_bufs_by_etype[x];
621 #ifdef CLIB_HAVE_VEC256
622 if (n_left >= 16 && u16x16_is_all_equal (u16x16_load_unaligned (etype),
625 clib_memcpy_fast (&d->bufs_by_etype[x][y], from, 16 * sizeof (u32));
626 d->n_bufs_by_etype[x] += 16;
635 #ifdef CLIB_HAVE_VEC128
636 if (n_left >= 8 && u16x8_is_all_equal (u16x8_load_unaligned (etype),
639 clib_memcpy_fast (&d->bufs_by_etype[x][y], from, 8 * sizeof (u32));
640 d->n_bufs_by_etype[x] += 8;
649 d->bufs_by_etype[x][y] = from[0];
650 d->n_bufs_by_etype[x]++;
659 static_always_inline void
660 ethernet_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
661 vlib_frame_t * from_frame)
664 if ((node->flags & VLIB_NODE_FLAG_TRACE) == 0)
667 from = vlib_frame_vector_args (from_frame);
668 n_left = from_frame->n_vectors;
672 ethernet_input_trace_t *t0;
673 vlib_buffer_t *b0 = vlib_get_buffer (vm, from[0]);
675 if (b0->flags & VLIB_BUFFER_IS_TRACED)
677 t0 = vlib_add_trace (vm, node, b0, sizeof (ethernet_input_trace_t));
678 clib_memcpy_fast (t0->packet_data, b0->data + b0->current_data,
679 sizeof (t0->packet_data));
680 t0->frame_flags = from_frame->flags;
681 clib_memcpy_fast (&t0->frame_data,
682 vlib_frame_scalar_args (from_frame),
683 sizeof (ethernet_input_frame_t));
690 static_always_inline void
691 ethernet_input_inline (vlib_main_t * vm,
692 vlib_node_runtime_t * node,
693 u32 * from, u32 n_packets,
694 ethernet_input_variant_t variant)
696 vnet_main_t *vnm = vnet_get_main ();
697 ethernet_main_t *em = ðernet_main;
698 vlib_node_runtime_t *error_node;
699 u32 n_left_from, next_index, *to_next;
700 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
701 u32 thread_index = vm->thread_index;
702 u32 cached_sw_if_index = ~0;
703 u32 cached_is_l2 = 0; /* shut up gcc */
704 vnet_hw_interface_t *hi = NULL; /* used for main interface only */
706 if (variant != ETHERNET_INPUT_VARIANT_ETHERNET)
707 error_node = vlib_node_get_runtime (vm, ethernet_input_node.index);
711 n_left_from = n_packets;
713 next_index = node->cached_next_index;
714 stats_sw_if_index = node->runtime_data[0];
715 stats_n_packets = stats_n_bytes = 0;
717 while (n_left_from > 0)
721 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
723 while (n_left_from >= 4 && n_left_to_next >= 2)
726 vlib_buffer_t *b0, *b1;
727 u8 next0, next1, error0, error1;
728 u16 type0, orig_type0, type1, orig_type1;
729 u16 outer_id0, inner_id0, outer_id1, inner_id1;
730 u32 match_flags0, match_flags1;
731 u32 old_sw_if_index0, new_sw_if_index0, len0, old_sw_if_index1,
732 new_sw_if_index1, len1;
733 vnet_hw_interface_t *hi0, *hi1;
734 main_intf_t *main_intf0, *main_intf1;
735 vlan_intf_t *vlan_intf0, *vlan_intf1;
736 qinq_intf_t *qinq_intf0, *qinq_intf1;
738 ethernet_header_t *e0, *e1;
740 /* Prefetch next iteration. */
742 vlib_buffer_t *b2, *b3;
744 b2 = vlib_get_buffer (vm, from[2]);
745 b3 = vlib_get_buffer (vm, from[3]);
747 vlib_prefetch_buffer_header (b2, STORE);
748 vlib_prefetch_buffer_header (b3, STORE);
750 CLIB_PREFETCH (b2->data, sizeof (ethernet_header_t), LOAD);
751 CLIB_PREFETCH (b3->data, sizeof (ethernet_header_t), LOAD);
763 b0 = vlib_get_buffer (vm, bi0);
764 b1 = vlib_get_buffer (vm, bi1);
766 error0 = error1 = ETHERNET_ERROR_NONE;
767 e0 = vlib_buffer_get_current (b0);
768 type0 = clib_net_to_host_u16 (e0->type);
769 e1 = vlib_buffer_get_current (b1);
770 type1 = clib_net_to_host_u16 (e1->type);
772 /* Set the L2 header offset for all packets */
773 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
774 vnet_buffer (b1)->l2_hdr_offset = b1->current_data;
775 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
776 b1->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
778 /* Speed-path for the untagged case */
779 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
780 && !ethernet_frame_is_any_tagged_x2 (type0,
784 subint_config_t *subint0;
785 u32 sw_if_index0, sw_if_index1;
787 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
788 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
789 is_l20 = cached_is_l2;
791 /* This is probably wholly unnecessary */
792 if (PREDICT_FALSE (sw_if_index0 != sw_if_index1))
795 /* Now sw_if_index0 == sw_if_index1 */
796 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
798 cached_sw_if_index = sw_if_index0;
799 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
800 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
801 subint0 = &intf0->untagged_subint;
802 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
805 if (PREDICT_TRUE (is_l20 != 0))
807 vnet_buffer (b0)->l3_hdr_offset =
808 vnet_buffer (b0)->l2_hdr_offset +
809 sizeof (ethernet_header_t);
810 vnet_buffer (b1)->l3_hdr_offset =
811 vnet_buffer (b1)->l2_hdr_offset +
812 sizeof (ethernet_header_t);
813 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
814 b1->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
816 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
818 vnet_buffer (b1)->l2.l2_len = sizeof (ethernet_header_t);
822 if (!ethernet_address_cast (e0->dst_address) &&
823 (hi->hw_address != 0) &&
824 !eth_mac_equal ((u8 *) e0, hi->hw_address))
825 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
826 if (!ethernet_address_cast (e1->dst_address) &&
827 (hi->hw_address != 0) &&
828 !eth_mac_equal ((u8 *) e1, hi->hw_address))
829 error1 = ETHERNET_ERROR_L3_MAC_MISMATCH;
830 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
831 determine_next_node (em, variant, 0, type0, b0,
833 vlib_buffer_advance (b1, sizeof (ethernet_header_t));
834 determine_next_node (em, variant, 0, type1, b1,
840 /* Slow-path for the tagged case */
842 parse_header (variant,
845 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
847 parse_header (variant,
850 &orig_type1, &outer_id1, &inner_id1, &match_flags1);
852 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
853 old_sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
855 eth_vlan_table_lookups (em,
862 &main_intf0, &vlan_intf0, &qinq_intf0);
864 eth_vlan_table_lookups (em,
871 &main_intf1, &vlan_intf1, &qinq_intf1);
873 identify_subint (hi0,
878 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
880 identify_subint (hi1,
885 qinq_intf1, &new_sw_if_index1, &error1, &is_l21);
887 // Save RX sw_if_index for later nodes
888 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
890 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
891 vnet_buffer (b1)->sw_if_index[VLIB_RX] =
893 ETHERNET_ERROR_NONE ? old_sw_if_index1 : new_sw_if_index1;
895 // Check if there is a stat to take (valid and non-main sw_if_index for pkt 0 or pkt 1)
896 if (((new_sw_if_index0 != ~0)
897 && (new_sw_if_index0 != old_sw_if_index0))
898 || ((new_sw_if_index1 != ~0)
899 && (new_sw_if_index1 != old_sw_if_index1)))
902 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
903 - vnet_buffer (b0)->l2_hdr_offset;
904 len1 = vlib_buffer_length_in_chain (vm, b1) + b1->current_data
905 - vnet_buffer (b1)->l2_hdr_offset;
907 stats_n_packets += 2;
908 stats_n_bytes += len0 + len1;
911 (!(new_sw_if_index0 == stats_sw_if_index
912 && new_sw_if_index1 == stats_sw_if_index)))
914 stats_n_packets -= 2;
915 stats_n_bytes -= len0 + len1;
917 if (new_sw_if_index0 != old_sw_if_index0
918 && new_sw_if_index0 != ~0)
919 vlib_increment_combined_counter (vnm->
920 interface_main.combined_sw_if_counters
922 VNET_INTERFACE_COUNTER_RX,
926 if (new_sw_if_index1 != old_sw_if_index1
927 && new_sw_if_index1 != ~0)
928 vlib_increment_combined_counter (vnm->
929 interface_main.combined_sw_if_counters
931 VNET_INTERFACE_COUNTER_RX,
936 if (new_sw_if_index0 == new_sw_if_index1)
938 if (stats_n_packets > 0)
940 vlib_increment_combined_counter
941 (vnm->interface_main.combined_sw_if_counters
942 + VNET_INTERFACE_COUNTER_RX,
945 stats_n_packets, stats_n_bytes);
946 stats_n_packets = stats_n_bytes = 0;
948 stats_sw_if_index = new_sw_if_index0;
953 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
956 determine_next_node (em, variant, is_l20, type0, b0, &error0,
958 determine_next_node (em, variant, is_l21, type1, b1, &error1,
962 b0->error = error_node->errors[error0];
963 b1->error = error_node->errors[error1];
965 // verify speculative enqueue
966 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
967 n_left_to_next, bi0, bi1, next0,
971 while (n_left_from > 0 && n_left_to_next > 0)
976 u16 type0, orig_type0;
977 u16 outer_id0, inner_id0;
979 u32 old_sw_if_index0, new_sw_if_index0, len0;
980 vnet_hw_interface_t *hi0;
981 main_intf_t *main_intf0;
982 vlan_intf_t *vlan_intf0;
983 qinq_intf_t *qinq_intf0;
984 ethernet_header_t *e0;
987 // Prefetch next iteration
992 p2 = vlib_get_buffer (vm, from[1]);
993 vlib_prefetch_buffer_header (p2, STORE);
994 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, LOAD);
1002 n_left_to_next -= 1;
1004 b0 = vlib_get_buffer (vm, bi0);
1006 error0 = ETHERNET_ERROR_NONE;
1007 e0 = vlib_buffer_get_current (b0);
1008 type0 = clib_net_to_host_u16 (e0->type);
1010 /* Set the L2 header offset for all packets */
1011 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
1012 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1014 /* Speed-path for the untagged case */
1015 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
1016 && !ethernet_frame_is_tagged (type0)))
1019 subint_config_t *subint0;
1022 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1023 is_l20 = cached_is_l2;
1025 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
1027 cached_sw_if_index = sw_if_index0;
1028 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
1029 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1030 subint0 = &intf0->untagged_subint;
1031 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
1035 if (PREDICT_TRUE (is_l20 != 0))
1037 vnet_buffer (b0)->l3_hdr_offset =
1038 vnet_buffer (b0)->l2_hdr_offset +
1039 sizeof (ethernet_header_t);
1040 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
1041 next0 = em->l2_next;
1042 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
1046 if (!ethernet_address_cast (e0->dst_address) &&
1047 (hi->hw_address != 0) &&
1048 !eth_mac_equal ((u8 *) e0, hi->hw_address))
1049 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
1050 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
1051 determine_next_node (em, variant, 0, type0, b0,
1057 /* Slow-path for the tagged case */
1058 parse_header (variant,
1061 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
1063 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1065 eth_vlan_table_lookups (em,
1072 &main_intf0, &vlan_intf0, &qinq_intf0);
1074 identify_subint (hi0,
1079 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
1081 // Save RX sw_if_index for later nodes
1082 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1084 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
1086 // Increment subinterface stats
1087 // Note that interface-level counters have already been incremented
1088 // prior to calling this function. Thus only subinterface counters
1089 // are incremented here.
1091 // Interface level counters include packets received on the main
1092 // interface and all subinterfaces. Subinterface level counters
1093 // include only those packets received on that subinterface
1094 // Increment stats if the subint is valid and it is not the main intf
1095 if ((new_sw_if_index0 != ~0)
1096 && (new_sw_if_index0 != old_sw_if_index0))
1099 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
1100 - vnet_buffer (b0)->l2_hdr_offset;
1102 stats_n_packets += 1;
1103 stats_n_bytes += len0;
1105 // Batch stat increments from the same subinterface so counters
1106 // don't need to be incremented for every packet.
1107 if (PREDICT_FALSE (new_sw_if_index0 != stats_sw_if_index))
1109 stats_n_packets -= 1;
1110 stats_n_bytes -= len0;
1112 if (new_sw_if_index0 != ~0)
1113 vlib_increment_combined_counter
1114 (vnm->interface_main.combined_sw_if_counters
1115 + VNET_INTERFACE_COUNTER_RX,
1116 thread_index, new_sw_if_index0, 1, len0);
1117 if (stats_n_packets > 0)
1119 vlib_increment_combined_counter
1120 (vnm->interface_main.combined_sw_if_counters
1121 + VNET_INTERFACE_COUNTER_RX,
1123 stats_sw_if_index, stats_n_packets, stats_n_bytes);
1124 stats_n_packets = stats_n_bytes = 0;
1126 stats_sw_if_index = new_sw_if_index0;
1130 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
1133 determine_next_node (em, variant, is_l20, type0, b0, &error0,
1137 b0->error = error_node->errors[error0];
1139 // verify speculative enqueue
1140 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1141 to_next, n_left_to_next,
1145 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1148 // Increment any remaining batched stats
1149 if (stats_n_packets > 0)
1151 vlib_increment_combined_counter
1152 (vnm->interface_main.combined_sw_if_counters
1153 + VNET_INTERFACE_COUNTER_RX,
1154 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
1155 node->runtime_data[0] = stats_sw_if_index;
1159 static_always_inline void
1160 eth_input_enqueue_untagged (vlib_main_t * vm, vlib_node_runtime_t * node,
1161 eth_input_data_t * d, int ip4_cksum_ok, int is_l3)
1163 ethernet_main_t *em = ðernet_main;
1168 if (d->n_bufs_by_etype[id])
1172 next_index = em->l3_next.input_next_ip4;
1173 if (next_index == ETHERNET_INPUT_NEXT_IP4_INPUT && ip4_cksum_ok)
1174 next_index = ETHERNET_INPUT_NEXT_IP4_INPUT_NCS;
1177 next_index = em->l2_next;
1179 vlib_buffer_enqueue_to_single_next (vm, node, d->bufs_by_etype[id],
1180 next_index, d->n_bufs_by_etype[id]);
1184 if (d->n_bufs_by_etype[id])
1186 next_index = is_l3 ? em->l3_next.input_next_ip6 : em->l2_next;
1187 vlib_buffer_enqueue_to_single_next (vm, node, d->bufs_by_etype[id],
1188 next_index, d->n_bufs_by_etype[id]);
1192 if (d->n_bufs_by_etype[id])
1194 next_index = is_l3 ? em->l3_next.input_next_mpls : em->l2_next;
1195 vlib_buffer_enqueue_to_single_next (vm, node, d->bufs_by_etype[id],
1196 next_index, d->n_bufs_by_etype[id]);
1199 id = ETYPE_ID_UNKNOWN;
1200 if (d->n_bufs_by_etype[id])
1202 /* in case of l3 interfaces, we already advanced buffer so we need to
1205 eth_input_advance_and_flags (vm, d->bufs_by_etype[id],
1206 d->n_bufs_by_etype[id],
1207 -(i16) sizeof (ethernet_header_t),
1208 ~VNET_BUFFER_F_L3_HDR_OFFSET_VALID, 0);
1209 ethernet_input_inline (vm, node, d->bufs_by_etype[id],
1210 d->n_bufs_by_etype[id],
1211 ETHERNET_INPUT_VARIANT_ETHERNET);
1215 VLIB_NODE_FN (ethernet_input_node) (vlib_main_t * vm,
1216 vlib_node_runtime_t * node,
1217 vlib_frame_t * frame)
1219 vnet_main_t *vnm = vnet_get_main ();
1220 ethernet_main_t *em = ðernet_main;
1221 u32 *from = vlib_frame_vector_args (frame);
1222 u32 n_packets = frame->n_vectors;
1224 ethernet_input_trace (vm, node, frame);
1226 if (frame->flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
1228 eth_input_data_t data, *d = &data;
1229 ethernet_input_frame_t *ef = vlib_frame_scalar_args (frame);
1230 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, ef->hw_if_index);
1231 main_intf_t *intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1232 subint_config_t *subint0 = &intf0->untagged_subint;
1233 int ip4_cksum_ok = (frame->flags & ETH_INPUT_FRAME_F_IP4_CKSUM_OK) != 0;
1235 if (subint0->flags & SUBINT_CONFIG_L2)
1237 /* untagged packets are treated as L2 */
1238 eth_input_process_frame (vm, from, d->etypes, n_packets, 0);
1239 eth_input_sort (vm, from, n_packets, d);
1240 eth_input_enqueue_untagged (vm, node, d, ip4_cksum_ok, 0);
1244 ethernet_interface_t *ei;
1245 ei = pool_elt_at_index (em->interfaces, hi->hw_instance);
1247 /* currently only slowpath deals with dmac check */
1248 if (ei->flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL)
1251 /* untagged packets are treated as L3 */
1252 eth_input_process_frame (vm, from, d->etypes, n_packets, 1);
1253 eth_input_sort (vm, from, n_packets, d);
1254 eth_input_enqueue_untagged (vm, node, d, ip4_cksum_ok, 1);
1260 ethernet_input_inline (vm, node, from, n_packets,
1261 ETHERNET_INPUT_VARIANT_ETHERNET);
1265 VLIB_NODE_FN (ethernet_input_type_node) (vlib_main_t * vm,
1266 vlib_node_runtime_t * node,
1267 vlib_frame_t * from_frame)
1269 u32 *from = vlib_frame_vector_args (from_frame);
1270 u32 n_packets = from_frame->n_vectors;
1271 ethernet_input_trace (vm, node, from_frame);
1272 ethernet_input_inline (vm, node, from, n_packets,
1273 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE);
1277 VLIB_NODE_FN (ethernet_input_not_l2_node) (vlib_main_t * vm,
1278 vlib_node_runtime_t * node,
1279 vlib_frame_t * from_frame)
1281 u32 *from = vlib_frame_vector_args (from_frame);
1282 u32 n_packets = from_frame->n_vectors;
1283 ethernet_input_trace (vm, node, from_frame);
1284 ethernet_input_inline (vm, node, from, n_packets,
1285 ETHERNET_INPUT_VARIANT_NOT_L2);
1290 // Return the subinterface config struct for the given sw_if_index
1291 // Also return via parameter the appropriate match flags for the
1292 // configured number of tags.
1293 // On error (unsupported or not ethernet) return 0.
1294 static subint_config_t *
1295 ethernet_sw_interface_get_config (vnet_main_t * vnm,
1297 u32 * flags, u32 * unsupported)
1299 ethernet_main_t *em = ðernet_main;
1300 vnet_hw_interface_t *hi;
1301 vnet_sw_interface_t *si;
1302 main_intf_t *main_intf;
1303 vlan_table_t *vlan_table;
1304 qinq_table_t *qinq_table;
1305 subint_config_t *subint = 0;
1307 hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
1309 if (!hi || (hi->hw_class_index != ethernet_hw_interface_class.index))
1312 goto done; // non-ethernet interface
1315 // ensure there's an entry for the main intf (shouldn't really be necessary)
1316 vec_validate (em->main_intfs, hi->hw_if_index);
1317 main_intf = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1319 // Locate the subint for the given ethernet config
1320 si = vnet_get_sw_interface (vnm, sw_if_index);
1322 if (si->type == VNET_SW_INTERFACE_TYPE_P2P)
1324 p2p_ethernet_main_t *p2pm = &p2p_main;
1325 u32 p2pe_sw_if_index =
1326 p2p_ethernet_lookup (hi->hw_if_index, si->p2p.client_mac);
1327 if (p2pe_sw_if_index == ~0)
1329 pool_get (p2pm->p2p_subif_pool, subint);
1330 si->p2p.pool_index = subint - p2pm->p2p_subif_pool;
1333 subint = vec_elt_at_index (p2pm->p2p_subif_pool, si->p2p.pool_index);
1334 *flags = SUBINT_CONFIG_P2P;
1336 else if (si->type == VNET_SW_INTERFACE_TYPE_PIPE)
1340 pipe = pipe_get (sw_if_index);
1341 subint = &pipe->subint;
1342 *flags = SUBINT_CONFIG_P2P;
1344 else if (si->sub.eth.flags.default_sub)
1346 subint = &main_intf->default_subint;
1347 *flags = SUBINT_CONFIG_MATCH_1_TAG |
1348 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
1350 else if ((si->sub.eth.flags.no_tags) || (si->sub.eth.raw_flags == 0))
1352 // if no flags are set then this is a main interface
1353 // so treat as untagged
1354 subint = &main_intf->untagged_subint;
1355 *flags = SUBINT_CONFIG_MATCH_0_TAG;
1360 // first get the vlan table
1361 if (si->sub.eth.flags.dot1ad)
1363 if (main_intf->dot1ad_vlans == 0)
1365 // Allocate a vlan table from the pool
1366 pool_get (em->vlan_pool, vlan_table);
1367 main_intf->dot1ad_vlans = vlan_table - em->vlan_pool;
1371 // Get ptr to existing vlan table
1373 vec_elt_at_index (em->vlan_pool, main_intf->dot1ad_vlans);
1378 if (main_intf->dot1q_vlans == 0)
1380 // Allocate a vlan table from the pool
1381 pool_get (em->vlan_pool, vlan_table);
1382 main_intf->dot1q_vlans = vlan_table - em->vlan_pool;
1386 // Get ptr to existing vlan table
1388 vec_elt_at_index (em->vlan_pool, main_intf->dot1q_vlans);
1392 if (si->sub.eth.flags.one_tag)
1394 *flags = si->sub.eth.flags.exact_match ?
1395 SUBINT_CONFIG_MATCH_1_TAG :
1396 (SUBINT_CONFIG_MATCH_1_TAG |
1397 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1399 if (si->sub.eth.flags.outer_vlan_id_any)
1401 // not implemented yet
1407 // a single vlan, a common case
1409 &vlan_table->vlans[si->sub.eth.
1410 outer_vlan_id].single_tag_subint;
1417 *flags = si->sub.eth.flags.exact_match ?
1418 SUBINT_CONFIG_MATCH_2_TAG :
1419 (SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1421 if (si->sub.eth.flags.outer_vlan_id_any
1422 && si->sub.eth.flags.inner_vlan_id_any)
1424 // not implemented yet
1429 if (si->sub.eth.flags.inner_vlan_id_any)
1431 // a specific outer and "any" inner
1432 // don't need a qinq table for this
1434 &vlan_table->vlans[si->sub.eth.
1435 outer_vlan_id].inner_any_subint;
1436 if (si->sub.eth.flags.exact_match)
1438 *flags = SUBINT_CONFIG_MATCH_2_TAG;
1442 *flags = SUBINT_CONFIG_MATCH_2_TAG |
1443 SUBINT_CONFIG_MATCH_3_TAG;
1448 // a specific outer + specifc innner vlan id, a common case
1450 // get the qinq table
1451 if (vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs == 0)
1453 // Allocate a qinq table from the pool
1454 pool_get (em->qinq_pool, qinq_table);
1455 vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs =
1456 qinq_table - em->qinq_pool;
1460 // Get ptr to existing qinq table
1462 vec_elt_at_index (em->qinq_pool,
1463 vlan_table->vlans[si->sub.
1467 subint = &qinq_table->vlans[si->sub.eth.inner_vlan_id].subint;
1476 static clib_error_t *
1477 ethernet_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
1479 subint_config_t *subint;
1482 clib_error_t *error = 0;
1484 // Find the config for this subinterface
1486 ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
1491 // not implemented yet or not ethernet
1495 subint->sw_if_index =
1496 ((flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? sw_if_index : ~0);
1502 VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (ethernet_sw_interface_up_down);
1505 #ifndef CLIB_MARCH_VARIANT
1506 // Set the L2/L3 mode for the subinterface
1508 ethernet_sw_interface_set_l2_mode (vnet_main_t * vnm, u32 sw_if_index, u32 l2)
1510 subint_config_t *subint;
1514 vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, sw_if_index);
1516 is_port = !(sw->type == VNET_SW_INTERFACE_TYPE_SUB);
1518 // Find the config for this subinterface
1520 ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
1525 // unimplemented or not ethernet
1529 // Double check that the config we found is for our interface (or the interface is down)
1530 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
1534 subint->flags |= SUBINT_CONFIG_L2;
1537 SUBINT_CONFIG_MATCH_0_TAG | SUBINT_CONFIG_MATCH_1_TAG
1538 | SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
1542 subint->flags &= ~SUBINT_CONFIG_L2;
1545 ~(SUBINT_CONFIG_MATCH_1_TAG | SUBINT_CONFIG_MATCH_2_TAG
1546 | SUBINT_CONFIG_MATCH_3_TAG);
1554 * Set the L2/L3 mode for the subinterface regardless of port
1557 ethernet_sw_interface_set_l2_mode_noport (vnet_main_t * vnm,
1558 u32 sw_if_index, u32 l2)
1560 subint_config_t *subint;
1564 /* Find the config for this subinterface */
1566 ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
1571 /* unimplemented or not ethernet */
1576 * Double check that the config we found is for our interface (or the
1577 * interface is down)
1579 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
1583 subint->flags |= SUBINT_CONFIG_L2;
1587 subint->flags &= ~SUBINT_CONFIG_L2;
1595 static clib_error_t *
1596 ethernet_sw_interface_add_del (vnet_main_t * vnm,
1597 u32 sw_if_index, u32 is_create)
1599 clib_error_t *error = 0;
1600 subint_config_t *subint;
1602 u32 unsupported = 0;
1604 // Find the config for this subinterface
1606 ethernet_sw_interface_get_config (vnm, sw_if_index, &match_flags,
1611 // not implemented yet or not ethernet
1614 // this is the NYI case
1615 error = clib_error_return (0, "not implemented yet");
1626 // Initialize the subint
1627 if (subint->flags & SUBINT_CONFIG_VALID)
1629 // Error vlan already in use
1630 error = clib_error_return (0, "vlan is already in use");
1634 // Note that config is L3 by default
1635 subint->flags = SUBINT_CONFIG_VALID | match_flags;
1636 subint->sw_if_index = ~0; // because interfaces are initially down
1643 VNET_SW_INTERFACE_ADD_DEL_FUNCTION (ethernet_sw_interface_add_del);
1645 static char *ethernet_error_strings[] = {
1646 #define ethernet_error(n,c,s) s,
1647 #include "error.def"
1648 #undef ethernet_error
1652 VLIB_REGISTER_NODE (ethernet_input_node) = {
1653 .name = "ethernet-input",
1654 /* Takes a vector of packets. */
1655 .vector_size = sizeof (u32),
1656 .scalar_size = sizeof (ethernet_input_frame_t),
1657 .n_errors = ETHERNET_N_ERROR,
1658 .error_strings = ethernet_error_strings,
1659 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
1661 #define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
1662 foreach_ethernet_input_next
1665 .format_buffer = format_ethernet_header_with_length,
1666 .format_trace = format_ethernet_input_trace,
1667 .unformat_buffer = unformat_ethernet_header,
1670 VLIB_REGISTER_NODE (ethernet_input_type_node) = {
1671 .name = "ethernet-input-type",
1672 /* Takes a vector of packets. */
1673 .vector_size = sizeof (u32),
1674 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
1676 #define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
1677 foreach_ethernet_input_next
1682 VLIB_REGISTER_NODE (ethernet_input_not_l2_node) = {
1683 .name = "ethernet-input-not-l2",
1684 /* Takes a vector of packets. */
1685 .vector_size = sizeof (u32),
1686 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
1688 #define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
1689 foreach_ethernet_input_next
1695 #ifndef CLIB_MARCH_VARIANT
1697 ethernet_set_rx_redirect (vnet_main_t * vnm,
1698 vnet_hw_interface_t * hi, u32 enable)
1700 // Insure all packets go to ethernet-input (i.e. untagged ipv4 packets
1701 // don't go directly to ip4-input)
1702 vnet_hw_interface_rx_redirect_to_node
1703 (vnm, hi->hw_if_index, enable ? ethernet_input_node.index : ~0);
1708 * Initialization and registration for the next_by_ethernet structure
1712 next_by_ethertype_init (next_by_ethertype_t * l3_next)
1714 l3_next->input_next_by_type = sparse_vec_new
1715 ( /* elt bytes */ sizeof (l3_next->input_next_by_type[0]),
1716 /* bits in index */ BITS (((ethernet_header_t *) 0)->type));
1718 vec_validate (l3_next->sparse_index_by_input_next_index,
1719 ETHERNET_INPUT_NEXT_DROP);
1720 vec_validate (l3_next->sparse_index_by_input_next_index,
1721 ETHERNET_INPUT_NEXT_PUNT);
1722 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_DROP] =
1723 SPARSE_VEC_INVALID_INDEX;
1724 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_PUNT] =
1725 SPARSE_VEC_INVALID_INDEX;
1728 * Make sure we don't wipe out an ethernet registration by mistake
1729 * Can happen if init function ordering constraints are missing.
1733 ethernet_main_t *em = ðernet_main;
1734 ASSERT (em->next_by_ethertype_register_called == 0);
1740 // Add an ethertype -> next index mapping to the structure
1742 next_by_ethertype_register (next_by_ethertype_t * l3_next,
1743 u32 ethertype, u32 next_index)
1747 ethernet_main_t *em = ðernet_main;
1751 ethernet_main_t *em = ðernet_main;
1752 em->next_by_ethertype_register_called = 1;
1755 /* Setup ethernet type -> next index sparse vector mapping. */
1756 n = sparse_vec_validate (l3_next->input_next_by_type, ethertype);
1759 /* Rebuild next index -> sparse index inverse mapping when sparse vector
1761 vec_validate (l3_next->sparse_index_by_input_next_index, next_index);
1762 for (i = 1; i < vec_len (l3_next->input_next_by_type); i++)
1764 sparse_index_by_input_next_index[l3_next->input_next_by_type[i]] = i;
1766 // do not allow the cached next index's to be updated if L3
1767 // redirect is enabled, as it will have overwritten them
1768 if (!em->redirect_l3)
1770 // Cache common ethertypes directly
1771 if (ethertype == ETHERNET_TYPE_IP4)
1773 l3_next->input_next_ip4 = next_index;
1775 else if (ethertype == ETHERNET_TYPE_IP6)
1777 l3_next->input_next_ip6 = next_index;
1779 else if (ethertype == ETHERNET_TYPE_MPLS)
1781 l3_next->input_next_mpls = next_index;
1788 static clib_error_t *
1789 ethernet_input_init (vlib_main_t * vm)
1791 ethernet_main_t *em = ðernet_main;
1792 __attribute__ ((unused)) vlan_table_t *invalid_vlan_table;
1793 __attribute__ ((unused)) qinq_table_t *invalid_qinq_table;
1795 ethernet_setup_node (vm, ethernet_input_node.index);
1796 ethernet_setup_node (vm, ethernet_input_type_node.index);
1797 ethernet_setup_node (vm, ethernet_input_not_l2_node.index);
1799 next_by_ethertype_init (&em->l3_next);
1801 // Initialize pools and vector for vlan parsing
1802 vec_validate (em->main_intfs, 10); // 10 main interfaces
1803 pool_alloc (em->vlan_pool, 10);
1804 pool_alloc (em->qinq_pool, 1);
1806 // The first vlan pool will always be reserved for an invalid table
1807 pool_get (em->vlan_pool, invalid_vlan_table); // first id = 0
1808 // The first qinq pool will always be reserved for an invalid table
1809 pool_get (em->qinq_pool, invalid_qinq_table); // first id = 0
1814 VLIB_INIT_FUNCTION (ethernet_input_init);
1817 ethernet_register_input_type (vlib_main_t * vm,
1818 ethernet_type_t type, u32 node_index)
1820 ethernet_main_t *em = ðernet_main;
1821 ethernet_type_info_t *ti;
1825 clib_error_t *error = vlib_call_init_function (vm, ethernet_init);
1827 clib_error_report (error);
1830 ti = ethernet_get_type_info (em, type);
1831 ti->node_index = node_index;
1832 ti->next_index = vlib_node_add_next (vm,
1833 ethernet_input_node.index, node_index);
1834 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
1835 ASSERT (i == ti->next_index);
1837 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
1838 ASSERT (i == ti->next_index);
1840 // Add the L3 node for this ethertype to the next nodes structure
1841 next_by_ethertype_register (&em->l3_next, type, ti->next_index);
1843 // Call the registration functions for other nodes that want a mapping
1844 l2bvi_register_input_type (vm, type, node_index);
1848 ethernet_register_l2_input (vlib_main_t * vm, u32 node_index)
1850 ethernet_main_t *em = ðernet_main;
1854 vlib_node_add_next (vm, ethernet_input_node.index, node_index);
1857 * Even if we never use these arcs, we have to align the next indices...
1859 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
1861 ASSERT (i == em->l2_next);
1863 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
1864 ASSERT (i == em->l2_next);
1867 // Register a next node for L3 redirect, and enable L3 redirect
1869 ethernet_register_l3_redirect (vlib_main_t * vm, u32 node_index)
1871 ethernet_main_t *em = ðernet_main;
1874 em->redirect_l3 = 1;
1875 em->redirect_l3_next = vlib_node_add_next (vm,
1876 ethernet_input_node.index,
1879 * Change the cached next nodes to the redirect node
1881 em->l3_next.input_next_ip4 = em->redirect_l3_next;
1882 em->l3_next.input_next_ip6 = em->redirect_l3_next;
1883 em->l3_next.input_next_mpls = em->redirect_l3_next;
1886 * Even if we never use these arcs, we have to align the next indices...
1888 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
1890 ASSERT (i == em->redirect_l3_next);
1892 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
1894 ASSERT (i == em->redirect_l3_next);
1899 * fd.io coding-style-patch-verification: ON
1902 * eval: (c-set-style "gnu")