2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * ethernet_node.c: ethernet packet processing
18 * Copyright (c) 2008 Eliot Dresselhaus
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 #include <vlib/vlib.h>
41 #include <vnet/pg/pg.h>
42 #include <vnet/ethernet/ethernet.h>
43 #include <vnet/ethernet/p2p_ethernet.h>
44 #include <vnet/devices/pipe/pipe.h>
45 #include <vppinfra/sparse_vec.h>
46 #include <vnet/l2/l2_bvi.h>
49 #define foreach_ethernet_input_next \
50 _ (PUNT, "error-punt") \
51 _ (DROP, "error-drop") \
52 _ (LLC, "llc-input") \
53 _ (IP4_INPUT, "ip4-input") \
54 _ (IP4_INPUT_NCS, "ip4-input-no-checksum")
58 #define _(s,n) ETHERNET_INPUT_NEXT_##s,
59 foreach_ethernet_input_next
61 ETHERNET_INPUT_N_NEXT,
62 } ethernet_input_next_t;
68 ethernet_input_frame_t frame_data;
69 } ethernet_input_trace_t;
72 format_ethernet_input_trace (u8 * s, va_list * va)
74 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
75 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
76 ethernet_input_trace_t *t = va_arg (*va, ethernet_input_trace_t *);
77 u32 indent = format_get_indent (s);
81 s = format (s, "frame: flags 0x%x", t->frame_flags);
82 if (t->frame_flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
83 s = format (s, ", hw-if-index %u, sw-if-index %u",
84 t->frame_data.hw_if_index, t->frame_data.sw_if_index);
85 s = format (s, "\n%U", format_white_space, indent);
87 s = format (s, "%U", format_ethernet_header, t->packet_data);
92 extern vlib_node_registration_t ethernet_input_node;
96 ETHERNET_INPUT_VARIANT_ETHERNET,
97 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE,
98 ETHERNET_INPUT_VARIANT_NOT_L2,
99 } ethernet_input_variant_t;
102 // Parse the ethernet header to extract vlan tags and innermost ethertype
103 static_always_inline void
104 parse_header (ethernet_input_variant_t variant,
108 u16 * outer_id, u16 * inner_id, u32 * match_flags)
112 if (variant == ETHERNET_INPUT_VARIANT_ETHERNET
113 || variant == ETHERNET_INPUT_VARIANT_NOT_L2)
115 ethernet_header_t *e0;
117 e0 = (void *) (b0->data + b0->current_data);
119 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
120 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
122 vlib_buffer_advance (b0, sizeof (e0[0]));
124 *type = clib_net_to_host_u16 (e0->type);
126 else if (variant == ETHERNET_INPUT_VARIANT_ETHERNET_TYPE)
128 // here when prior node was LLC/SNAP processing
131 e0 = (void *) (b0->data + b0->current_data);
133 vlib_buffer_advance (b0, sizeof (e0[0]));
135 *type = clib_net_to_host_u16 (e0[0]);
138 // save for distinguishing between dot1q and dot1ad later
141 // default the tags to 0 (used if there is no corresponding tag)
145 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_0_TAG;
148 // check for vlan encaps
149 if (ethernet_frame_is_tagged (*type))
151 ethernet_vlan_header_t *h0;
154 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_1_TAG;
156 h0 = (void *) (b0->data + b0->current_data);
158 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
160 *outer_id = tag & 0xfff;
162 *match_flags &= ~SUBINT_CONFIG_MATCH_1_TAG;
164 *type = clib_net_to_host_u16 (h0->type);
166 vlib_buffer_advance (b0, sizeof (h0[0]));
169 if (*type == ETHERNET_TYPE_VLAN)
171 // Double tagged packet
172 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_2_TAG;
174 h0 = (void *) (b0->data + b0->current_data);
176 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
178 *inner_id = tag & 0xfff;
180 *type = clib_net_to_host_u16 (h0->type);
182 vlib_buffer_advance (b0, sizeof (h0[0]));
184 if (*type == ETHERNET_TYPE_VLAN)
186 // More than double tagged packet
187 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_3_TAG;
189 vlib_buffer_advance (b0, sizeof (h0[0]));
190 vlan_count = 3; // "unknown" number, aka, 3-or-more
194 ethernet_buffer_set_vlan_count (b0, vlan_count);
197 // Determine the subinterface for this packet, given the result of the
198 // vlan table lookups and vlan header parsing. Check the most specific
200 static_always_inline void
201 identify_subint (vnet_hw_interface_t * hi,
204 main_intf_t * main_intf,
205 vlan_intf_t * vlan_intf,
206 qinq_intf_t * qinq_intf,
207 u32 * new_sw_if_index, u8 * error0, u32 * is_l2)
211 matched = eth_identify_subint (hi, match_flags, main_intf, vlan_intf,
212 qinq_intf, new_sw_if_index, error0, is_l2);
217 // Perform L3 my-mac filter
218 // A unicast packet arriving on an L3 interface must have a dmac matching the interface mac.
219 // This is required for promiscuous mode, else we will forward packets we aren't supposed to.
222 ethernet_header_t *e0;
223 e0 = (void *) (b0->data + vnet_buffer (b0)->l2_hdr_offset);
225 if (!(ethernet_address_cast (e0->dst_address)))
227 if (!eth_mac_equal ((u8 *) e0, hi->hw_address))
229 *error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
234 // Check for down subinterface
235 *error0 = (*new_sw_if_index) != ~0 ? (*error0) : ETHERNET_ERROR_DOWN;
239 static_always_inline void
240 determine_next_node (ethernet_main_t * em,
241 ethernet_input_variant_t variant,
243 u32 type0, vlib_buffer_t * b0, u8 * error0, u8 * next0)
245 vnet_buffer (b0)->l3_hdr_offset = b0->current_data;
246 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
248 if (PREDICT_FALSE (*error0 != ETHERNET_ERROR_NONE))
250 // some error occurred
251 *next0 = ETHERNET_INPUT_NEXT_DROP;
255 // record the L2 len and reset the buffer so the L2 header is preserved
256 u32 eth_start = vnet_buffer (b0)->l2_hdr_offset;
257 vnet_buffer (b0)->l2.l2_len = b0->current_data - eth_start;
258 *next0 = em->l2_next;
259 ASSERT (vnet_buffer (b0)->l2.l2_len ==
260 ethernet_buffer_header_size (b0));
261 vlib_buffer_advance (b0, -(vnet_buffer (b0)->l2.l2_len));
263 // check for common IP/MPLS ethertypes
265 else if (type0 == ETHERNET_TYPE_IP4)
267 *next0 = em->l3_next.input_next_ip4;
269 else if (type0 == ETHERNET_TYPE_IP6)
271 *next0 = em->l3_next.input_next_ip6;
273 else if (type0 == ETHERNET_TYPE_MPLS)
275 *next0 = em->l3_next.input_next_mpls;
278 else if (em->redirect_l3)
280 // L3 Redirect is on, the cached common next nodes will be
281 // pointing to the redirect node, catch the uncommon types here
282 *next0 = em->redirect_l3_next;
286 // uncommon ethertype, check table
288 i0 = sparse_vec_index (em->l3_next.input_next_by_type, type0);
289 *next0 = vec_elt (em->l3_next.input_next_by_type, i0);
292 SPARSE_VEC_INVALID_INDEX ? ETHERNET_ERROR_UNKNOWN_TYPE : *error0;
294 // The table is not populated with LLC values, so check that now.
295 // If variant is variant_ethernet then we came from LLC processing. Don't
296 // go back there; drop instead using by keeping the drop/bad table result.
297 if ((type0 < 0x600) && (variant == ETHERNET_INPUT_VARIANT_ETHERNET))
299 *next0 = ETHERNET_INPUT_NEXT_LLC;
306 ETYPE_ID_UNKNOWN = 0,
313 static_always_inline void
314 eth_input_advance_and_flags (vlib_main_t * vm, u32 * from, u32 n_left,
315 i16 advance, u32 and_flags, u32 or_flags)
320 vlib_get_buffers (vm, from, b, 8);
321 vlib_buffer_advance (b[0], advance);
322 vlib_buffer_advance (b[1], advance);
323 vlib_buffer_advance (b[2], advance);
324 vlib_buffer_advance (b[3], advance);
325 vlib_buffer_advance (b[4], advance);
326 vlib_buffer_advance (b[5], advance);
327 vlib_buffer_advance (b[6], advance);
328 vlib_buffer_advance (b[7], advance);
329 b[0]->flags = (b[0]->flags & and_flags) | or_flags;
330 b[1]->flags = (b[1]->flags & and_flags) | or_flags;
331 b[2]->flags = (b[2]->flags & and_flags) | or_flags;
332 b[3]->flags = (b[3]->flags & and_flags) | or_flags;
333 b[4]->flags = (b[4]->flags & and_flags) | or_flags;
334 b[5]->flags = (b[5]->flags & and_flags) | or_flags;
335 b[6]->flags = (b[6]->flags & and_flags) | or_flags;
336 b[7]->flags = (b[7]->flags & and_flags) | or_flags;
343 vlib_get_buffers (vm, from, b, 1);
344 vlib_buffer_advance (b[0], advance);
345 b[0]->flags = (b[0]->flags & and_flags) | or_flags;
354 u16 etypes[VLIB_FRAME_SIZE];
355 u32 bufs_by_etype[ETYPE_N_IDS][VLIB_FRAME_SIZE];
356 u16 n_bufs_by_etype[ETYPE_N_IDS];
359 /* following vector code relies on following assumptions */
360 STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_data, 0);
361 STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_length, 2);
362 STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, flags, 4);
363 STATIC_ASSERT (STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l2_hdr_offset) ==
364 STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l3_hdr_offset) - 2,
365 "l3_hdr_offset must follow l2_hdr_offset");
367 static_always_inline void
368 eth_input_adv_and_flags_x4 (vlib_buffer_t ** b, i16 adv, u32 flags, int is_l3)
370 #ifdef CLIB_HAVE_VEC256
371 /* to reduce number of small loads/stores we are loading first 64 bits
372 of each buffer metadata into 256-bit register so we can advance
373 current_data, current_length and flags.
374 Observed saving of this code is ~2 clocks per packet */
377 /* vector if signed 16 bit integers used in signed vector add operation
378 to advnce current_data and current_length */
379 u32x8 flags4 = { 0, flags, 0, flags, 0, flags, 0, flags };
381 adv, -adv, 0, 0, adv, -adv, 0, 0,
382 adv, -adv, 0, 0, adv, -adv, 0, 0
385 /* load 4 x 64 bits */
386 r = u64x4_gather (b[0], b[1], b[2], b[3]);
392 radv = (u64x4) ((i16x16) r + adv4);
394 /* write 4 x 64 bits */
395 u64x4_scatter (is_l3 ? radv : r, b[0], b[1], b[2], b[3]);
397 /* use old current_data as l2_hdr_offset and new current_data as
399 r = (u64x4) u16x16_blend (r, radv << 16, 0xaa);
401 /* store both l2_hdr_offset and l3_hdr_offset in single store operation */
402 u32x8_scatter_one ((u32x8) r, 0, &vnet_buffer (b[0])->l2_hdr_offset);
403 u32x8_scatter_one ((u32x8) r, 2, &vnet_buffer (b[1])->l2_hdr_offset);
404 u32x8_scatter_one ((u32x8) r, 4, &vnet_buffer (b[2])->l2_hdr_offset);
405 u32x8_scatter_one ((u32x8) r, 6, &vnet_buffer (b[3])->l2_hdr_offset);
409 ASSERT (b[0]->current_data == vnet_buffer (b[0])->l3_hdr_offset);
410 ASSERT (b[1]->current_data == vnet_buffer (b[1])->l3_hdr_offset);
411 ASSERT (b[2]->current_data == vnet_buffer (b[2])->l3_hdr_offset);
412 ASSERT (b[3]->current_data == vnet_buffer (b[3])->l3_hdr_offset);
414 ASSERT (b[0]->current_data - vnet_buffer (b[0])->l2_hdr_offset == adv);
415 ASSERT (b[1]->current_data - vnet_buffer (b[1])->l2_hdr_offset == adv);
416 ASSERT (b[2]->current_data - vnet_buffer (b[2])->l2_hdr_offset == adv);
417 ASSERT (b[3]->current_data - vnet_buffer (b[3])->l2_hdr_offset == adv);
421 ASSERT (b[0]->current_data == vnet_buffer (b[0])->l2_hdr_offset);
422 ASSERT (b[1]->current_data == vnet_buffer (b[1])->l2_hdr_offset);
423 ASSERT (b[2]->current_data == vnet_buffer (b[2])->l2_hdr_offset);
424 ASSERT (b[3]->current_data == vnet_buffer (b[3])->l2_hdr_offset);
426 ASSERT (b[0]->current_data - vnet_buffer (b[0])->l3_hdr_offset == -adv);
427 ASSERT (b[1]->current_data - vnet_buffer (b[1])->l3_hdr_offset == -adv);
428 ASSERT (b[2]->current_data - vnet_buffer (b[2])->l3_hdr_offset == -adv);
429 ASSERT (b[3]->current_data - vnet_buffer (b[3])->l3_hdr_offset == -adv);
433 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
434 vnet_buffer (b[1])->l2_hdr_offset = b[1]->current_data;
435 vnet_buffer (b[2])->l2_hdr_offset = b[2]->current_data;
436 vnet_buffer (b[3])->l2_hdr_offset = b[3]->current_data;
437 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
438 vnet_buffer (b[1])->l3_hdr_offset = b[1]->current_data + adv;
439 vnet_buffer (b[2])->l3_hdr_offset = b[2]->current_data + adv;
440 vnet_buffer (b[3])->l3_hdr_offset = b[3]->current_data + adv;
444 vlib_buffer_advance (b[0], adv);
445 vlib_buffer_advance (b[1], adv);
446 vlib_buffer_advance (b[2], adv);
447 vlib_buffer_advance (b[3], adv);
450 b[0]->flags |= flags;
451 b[1]->flags |= flags;
452 b[2]->flags |= flags;
453 b[3]->flags |= flags;
458 vnet_buffer (b[0])->l2.l2_len = adv;
459 vnet_buffer (b[1])->l2.l2_len = adv;
460 vnet_buffer (b[2])->l2.l2_len = adv;
461 vnet_buffer (b[3])->l2.l2_len = adv;
465 static_always_inline void
466 eth_input_adv_and_flags_x1 (vlib_buffer_t ** b, i16 adv, u32 flags, int is_l3)
468 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
469 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
472 vlib_buffer_advance (b[0], adv);
473 b[0]->flags |= flags;
475 vnet_buffer (b[0])->l2.l2_len = adv;
478 static_always_inline void
479 eth_input_process_frame (vlib_main_t * vm, u32 * from, u16 * etype,
480 u32 n_left, int is_l3)
482 vlib_buffer_t *b[16];
483 ethernet_header_t *e;
484 int adv = sizeof (ethernet_header_t);
486 u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
487 VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
491 vlib_buffer_t **ph = b + 12, **pd = b + 8;
492 vlib_get_buffers (vm, from, b, 4);
493 vlib_get_buffers (vm, from + 8, b + 8, 8);
495 vlib_prefetch_buffer_header (ph[0], LOAD);
496 vlib_prefetch_buffer_data (pd[0], LOAD);
497 e = vlib_buffer_get_current (b[0]);
500 vlib_prefetch_buffer_header (ph[1], LOAD);
501 vlib_prefetch_buffer_data (pd[1], LOAD);
502 e = vlib_buffer_get_current (b[1]);
505 vlib_prefetch_buffer_header (ph[2], LOAD);
506 vlib_prefetch_buffer_data (pd[2], LOAD);
507 e = vlib_buffer_get_current (b[2]);
510 vlib_prefetch_buffer_header (ph[3], LOAD);
511 vlib_prefetch_buffer_data (pd[3], LOAD);
512 e = vlib_buffer_get_current (b[3]);
515 eth_input_adv_and_flags_x4 (b, adv, flags, is_l3);
524 vlib_get_buffers (vm, from, b, 4);
526 e = vlib_buffer_get_current (b[0]);
529 e = vlib_buffer_get_current (b[1]);
532 e = vlib_buffer_get_current (b[2]);
535 e = vlib_buffer_get_current (b[3]);
538 eth_input_adv_and_flags_x4 (b, adv, flags, is_l3);
547 vlib_get_buffers (vm, from, b, 1);
549 e = vlib_buffer_get_current (b[0]);
552 eth_input_adv_and_flags_x1 (b, adv, flags, is_l3);
561 static_always_inline void
562 eth_input_sort (vlib_main_t * vm, u32 * from, u32 n_packets,
563 eth_input_data_t * d)
565 u16 *etype = d->etypes;
566 i32 n_left = n_packets;
568 #if defined (CLIB_HAVE_VEC256)
570 u16x16 et16_ip4 = u16x16_splat (clib_host_to_net_u16 (ETHERNET_TYPE_IP4));
571 u16x16 et16_ip6 = u16x16_splat (clib_host_to_net_u16 (ETHERNET_TYPE_IP6));
572 u16x16 et16_mpls = u16x16_splat (clib_host_to_net_u16 (ETHERNET_TYPE_MPLS));
573 u16x16 id16_ip4 = u16x16_splat (ETYPE_ID_IP4);
574 u16x16 id16_ip6 = u16x16_splat (ETYPE_ID_IP6);
575 u16x16 id16_mpls = u16x16_splat (ETYPE_ID_MPLS);
580 e16 = u16x16_load_unaligned (etype);
581 r += (e16 == et16_ip4) & id16_ip4;
582 r += (e16 == et16_ip6) & id16_ip6;
583 r += (e16 == et16_mpls) & id16_mpls;
584 u16x16_store_unaligned (r, etype);
588 #elif defined (CLIB_HAVE_VEC128)
590 u16x8 et8_ip4 = u16x8_splat (clib_host_to_net_u16 (ETHERNET_TYPE_IP4));
591 u16x8 et8_ip6 = u16x8_splat (clib_host_to_net_u16 (ETHERNET_TYPE_IP6));
592 u16x8 et8_mpls = u16x8_splat (clib_host_to_net_u16 (ETHERNET_TYPE_MPLS));
593 u16x8 id8_ip4 = u16x8_splat (ETYPE_ID_IP4);
594 u16x8 id8_ip6 = u16x8_splat (ETYPE_ID_IP6);
595 u16x8 id8_mpls = u16x8_splat (ETYPE_ID_MPLS);
600 e8 = u16x8_load_unaligned (etype);
601 r += (e8 == et8_ip4) & id8_ip4;
602 r += (e8 == et8_ip6) & id8_ip6;
603 r += (e8 == et8_mpls) & id8_mpls;
604 u16x8_store_unaligned (r, etype);
611 if (etype[0] == ETHERNET_TYPE_IP4)
612 etype[0] = ETYPE_ID_IP4;
613 else if (etype[0] == ETHERNET_TYPE_IP6)
614 etype[0] = ETYPE_ID_IP6;
615 else if (etype[0] == ETHERNET_TYPE_MPLS)
616 etype[0] = ETYPE_ID_MPLS;
618 etype[0] = ETYPE_ID_UNKNOWN;
628 clib_memset_u16 (d->n_bufs_by_etype, 0, ETYPE_N_IDS);
633 y = d->n_bufs_by_etype[x];
635 #ifdef CLIB_HAVE_VEC256
636 if (n_left >= 16 && u16x16_is_all_equal (u16x16_load_unaligned (etype),
639 clib_memcpy_fast (&d->bufs_by_etype[x][y], from, 16 * sizeof (u32));
640 d->n_bufs_by_etype[x] += 16;
649 #ifdef CLIB_HAVE_VEC128
650 if (n_left >= 8 && u16x8_is_all_equal (u16x8_load_unaligned (etype),
653 clib_memcpy_fast (&d->bufs_by_etype[x][y], from, 8 * sizeof (u32));
654 d->n_bufs_by_etype[x] += 8;
663 d->bufs_by_etype[x][y] = from[0];
664 d->n_bufs_by_etype[x]++;
673 static_always_inline void
674 ethernet_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
675 vlib_frame_t * from_frame)
678 if ((node->flags & VLIB_NODE_FLAG_TRACE) == 0)
681 from = vlib_frame_vector_args (from_frame);
682 n_left = from_frame->n_vectors;
686 ethernet_input_trace_t *t0;
687 vlib_buffer_t *b0 = vlib_get_buffer (vm, from[0]);
689 if (b0->flags & VLIB_BUFFER_IS_TRACED)
691 t0 = vlib_add_trace (vm, node, b0, sizeof (ethernet_input_trace_t));
692 clib_memcpy_fast (t0->packet_data, b0->data + b0->current_data,
693 sizeof (t0->packet_data));
694 t0->frame_flags = from_frame->flags;
695 clib_memcpy_fast (&t0->frame_data,
696 vlib_frame_scalar_args (from_frame),
697 sizeof (ethernet_input_frame_t));
704 static_always_inline void
705 ethernet_input_inline (vlib_main_t * vm,
706 vlib_node_runtime_t * node,
707 u32 * from, u32 n_packets,
708 ethernet_input_variant_t variant)
710 vnet_main_t *vnm = vnet_get_main ();
711 ethernet_main_t *em = ðernet_main;
712 vlib_node_runtime_t *error_node;
713 u32 n_left_from, next_index, *to_next;
714 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
715 u32 thread_index = vm->thread_index;
716 u32 cached_sw_if_index = ~0;
717 u32 cached_is_l2 = 0; /* shut up gcc */
718 vnet_hw_interface_t *hi = NULL; /* used for main interface only */
720 if (variant != ETHERNET_INPUT_VARIANT_ETHERNET)
721 error_node = vlib_node_get_runtime (vm, ethernet_input_node.index);
725 n_left_from = n_packets;
727 next_index = node->cached_next_index;
728 stats_sw_if_index = node->runtime_data[0];
729 stats_n_packets = stats_n_bytes = 0;
731 while (n_left_from > 0)
735 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
737 while (n_left_from >= 4 && n_left_to_next >= 2)
740 vlib_buffer_t *b0, *b1;
741 u8 next0, next1, error0, error1;
742 u16 type0, orig_type0, type1, orig_type1;
743 u16 outer_id0, inner_id0, outer_id1, inner_id1;
744 u32 match_flags0, match_flags1;
745 u32 old_sw_if_index0, new_sw_if_index0, len0, old_sw_if_index1,
746 new_sw_if_index1, len1;
747 vnet_hw_interface_t *hi0, *hi1;
748 main_intf_t *main_intf0, *main_intf1;
749 vlan_intf_t *vlan_intf0, *vlan_intf1;
750 qinq_intf_t *qinq_intf0, *qinq_intf1;
752 ethernet_header_t *e0, *e1;
754 /* Prefetch next iteration. */
756 vlib_buffer_t *b2, *b3;
758 b2 = vlib_get_buffer (vm, from[2]);
759 b3 = vlib_get_buffer (vm, from[3]);
761 vlib_prefetch_buffer_header (b2, STORE);
762 vlib_prefetch_buffer_header (b3, STORE);
764 CLIB_PREFETCH (b2->data, sizeof (ethernet_header_t), LOAD);
765 CLIB_PREFETCH (b3->data, sizeof (ethernet_header_t), LOAD);
777 b0 = vlib_get_buffer (vm, bi0);
778 b1 = vlib_get_buffer (vm, bi1);
780 error0 = error1 = ETHERNET_ERROR_NONE;
781 e0 = vlib_buffer_get_current (b0);
782 type0 = clib_net_to_host_u16 (e0->type);
783 e1 = vlib_buffer_get_current (b1);
784 type1 = clib_net_to_host_u16 (e1->type);
786 /* Set the L2 header offset for all packets */
787 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
788 vnet_buffer (b1)->l2_hdr_offset = b1->current_data;
789 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
790 b1->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
792 /* Speed-path for the untagged case */
793 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
794 && !ethernet_frame_is_any_tagged_x2 (type0,
798 subint_config_t *subint0;
799 u32 sw_if_index0, sw_if_index1;
801 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
802 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
803 is_l20 = cached_is_l2;
805 /* This is probably wholly unnecessary */
806 if (PREDICT_FALSE (sw_if_index0 != sw_if_index1))
809 /* Now sw_if_index0 == sw_if_index1 */
810 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
812 cached_sw_if_index = sw_if_index0;
813 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
814 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
815 subint0 = &intf0->untagged_subint;
816 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
819 if (PREDICT_TRUE (is_l20 != 0))
821 vnet_buffer (b0)->l3_hdr_offset =
822 vnet_buffer (b0)->l2_hdr_offset +
823 sizeof (ethernet_header_t);
824 vnet_buffer (b1)->l3_hdr_offset =
825 vnet_buffer (b1)->l2_hdr_offset +
826 sizeof (ethernet_header_t);
827 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
828 b1->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
830 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
832 vnet_buffer (b1)->l2.l2_len = sizeof (ethernet_header_t);
836 if (!ethernet_address_cast (e0->dst_address) &&
837 (hi->hw_address != 0) &&
838 !eth_mac_equal ((u8 *) e0, hi->hw_address))
839 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
840 if (!ethernet_address_cast (e1->dst_address) &&
841 (hi->hw_address != 0) &&
842 !eth_mac_equal ((u8 *) e1, hi->hw_address))
843 error1 = ETHERNET_ERROR_L3_MAC_MISMATCH;
844 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
845 determine_next_node (em, variant, 0, type0, b0,
847 vlib_buffer_advance (b1, sizeof (ethernet_header_t));
848 determine_next_node (em, variant, 0, type1, b1,
854 /* Slow-path for the tagged case */
856 parse_header (variant,
859 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
861 parse_header (variant,
864 &orig_type1, &outer_id1, &inner_id1, &match_flags1);
866 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
867 old_sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
869 eth_vlan_table_lookups (em,
876 &main_intf0, &vlan_intf0, &qinq_intf0);
878 eth_vlan_table_lookups (em,
885 &main_intf1, &vlan_intf1, &qinq_intf1);
887 identify_subint (hi0,
892 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
894 identify_subint (hi1,
899 qinq_intf1, &new_sw_if_index1, &error1, &is_l21);
901 // Save RX sw_if_index for later nodes
902 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
904 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
905 vnet_buffer (b1)->sw_if_index[VLIB_RX] =
907 ETHERNET_ERROR_NONE ? old_sw_if_index1 : new_sw_if_index1;
909 // Check if there is a stat to take (valid and non-main sw_if_index for pkt 0 or pkt 1)
910 if (((new_sw_if_index0 != ~0)
911 && (new_sw_if_index0 != old_sw_if_index0))
912 || ((new_sw_if_index1 != ~0)
913 && (new_sw_if_index1 != old_sw_if_index1)))
916 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
917 - vnet_buffer (b0)->l2_hdr_offset;
918 len1 = vlib_buffer_length_in_chain (vm, b1) + b1->current_data
919 - vnet_buffer (b1)->l2_hdr_offset;
921 stats_n_packets += 2;
922 stats_n_bytes += len0 + len1;
925 (!(new_sw_if_index0 == stats_sw_if_index
926 && new_sw_if_index1 == stats_sw_if_index)))
928 stats_n_packets -= 2;
929 stats_n_bytes -= len0 + len1;
931 if (new_sw_if_index0 != old_sw_if_index0
932 && new_sw_if_index0 != ~0)
933 vlib_increment_combined_counter (vnm->
934 interface_main.combined_sw_if_counters
936 VNET_INTERFACE_COUNTER_RX,
940 if (new_sw_if_index1 != old_sw_if_index1
941 && new_sw_if_index1 != ~0)
942 vlib_increment_combined_counter (vnm->
943 interface_main.combined_sw_if_counters
945 VNET_INTERFACE_COUNTER_RX,
950 if (new_sw_if_index0 == new_sw_if_index1)
952 if (stats_n_packets > 0)
954 vlib_increment_combined_counter
955 (vnm->interface_main.combined_sw_if_counters
956 + VNET_INTERFACE_COUNTER_RX,
959 stats_n_packets, stats_n_bytes);
960 stats_n_packets = stats_n_bytes = 0;
962 stats_sw_if_index = new_sw_if_index0;
967 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
970 determine_next_node (em, variant, is_l20, type0, b0, &error0,
972 determine_next_node (em, variant, is_l21, type1, b1, &error1,
976 b0->error = error_node->errors[error0];
977 b1->error = error_node->errors[error1];
979 // verify speculative enqueue
980 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
981 n_left_to_next, bi0, bi1, next0,
985 while (n_left_from > 0 && n_left_to_next > 0)
990 u16 type0, orig_type0;
991 u16 outer_id0, inner_id0;
993 u32 old_sw_if_index0, new_sw_if_index0, len0;
994 vnet_hw_interface_t *hi0;
995 main_intf_t *main_intf0;
996 vlan_intf_t *vlan_intf0;
997 qinq_intf_t *qinq_intf0;
998 ethernet_header_t *e0;
1001 // Prefetch next iteration
1002 if (n_left_from > 1)
1006 p2 = vlib_get_buffer (vm, from[1]);
1007 vlib_prefetch_buffer_header (p2, STORE);
1008 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, LOAD);
1016 n_left_to_next -= 1;
1018 b0 = vlib_get_buffer (vm, bi0);
1020 error0 = ETHERNET_ERROR_NONE;
1021 e0 = vlib_buffer_get_current (b0);
1022 type0 = clib_net_to_host_u16 (e0->type);
1024 /* Set the L2 header offset for all packets */
1025 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
1026 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1028 /* Speed-path for the untagged case */
1029 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
1030 && !ethernet_frame_is_tagged (type0)))
1033 subint_config_t *subint0;
1036 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1037 is_l20 = cached_is_l2;
1039 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
1041 cached_sw_if_index = sw_if_index0;
1042 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
1043 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1044 subint0 = &intf0->untagged_subint;
1045 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
1049 if (PREDICT_TRUE (is_l20 != 0))
1051 vnet_buffer (b0)->l3_hdr_offset =
1052 vnet_buffer (b0)->l2_hdr_offset +
1053 sizeof (ethernet_header_t);
1054 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
1055 next0 = em->l2_next;
1056 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
1060 if (!ethernet_address_cast (e0->dst_address) &&
1061 (hi->hw_address != 0) &&
1062 !eth_mac_equal ((u8 *) e0, hi->hw_address))
1063 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
1064 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
1065 determine_next_node (em, variant, 0, type0, b0,
1071 /* Slow-path for the tagged case */
1072 parse_header (variant,
1075 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
1077 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1079 eth_vlan_table_lookups (em,
1086 &main_intf0, &vlan_intf0, &qinq_intf0);
1088 identify_subint (hi0,
1093 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
1095 // Save RX sw_if_index for later nodes
1096 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1098 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
1100 // Increment subinterface stats
1101 // Note that interface-level counters have already been incremented
1102 // prior to calling this function. Thus only subinterface counters
1103 // are incremented here.
1105 // Interface level counters include packets received on the main
1106 // interface and all subinterfaces. Subinterface level counters
1107 // include only those packets received on that subinterface
1108 // Increment stats if the subint is valid and it is not the main intf
1109 if ((new_sw_if_index0 != ~0)
1110 && (new_sw_if_index0 != old_sw_if_index0))
1113 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
1114 - vnet_buffer (b0)->l2_hdr_offset;
1116 stats_n_packets += 1;
1117 stats_n_bytes += len0;
1119 // Batch stat increments from the same subinterface so counters
1120 // don't need to be incremented for every packet.
1121 if (PREDICT_FALSE (new_sw_if_index0 != stats_sw_if_index))
1123 stats_n_packets -= 1;
1124 stats_n_bytes -= len0;
1126 if (new_sw_if_index0 != ~0)
1127 vlib_increment_combined_counter
1128 (vnm->interface_main.combined_sw_if_counters
1129 + VNET_INTERFACE_COUNTER_RX,
1130 thread_index, new_sw_if_index0, 1, len0);
1131 if (stats_n_packets > 0)
1133 vlib_increment_combined_counter
1134 (vnm->interface_main.combined_sw_if_counters
1135 + VNET_INTERFACE_COUNTER_RX,
1137 stats_sw_if_index, stats_n_packets, stats_n_bytes);
1138 stats_n_packets = stats_n_bytes = 0;
1140 stats_sw_if_index = new_sw_if_index0;
1144 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
1147 determine_next_node (em, variant, is_l20, type0, b0, &error0,
1151 b0->error = error_node->errors[error0];
1153 // verify speculative enqueue
1154 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1155 to_next, n_left_to_next,
1159 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1162 // Increment any remaining batched stats
1163 if (stats_n_packets > 0)
1165 vlib_increment_combined_counter
1166 (vnm->interface_main.combined_sw_if_counters
1167 + VNET_INTERFACE_COUNTER_RX,
1168 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
1169 node->runtime_data[0] = stats_sw_if_index;
1173 static_always_inline void
1174 eth_input_enqueue_untagged (vlib_main_t * vm, vlib_node_runtime_t * node,
1175 eth_input_data_t * d, int ip4_cksum_ok, int is_l3)
1177 ethernet_main_t *em = ðernet_main;
1182 if (d->n_bufs_by_etype[id])
1186 next_index = em->l3_next.input_next_ip4;
1187 if (next_index == ETHERNET_INPUT_NEXT_IP4_INPUT && ip4_cksum_ok)
1188 next_index = ETHERNET_INPUT_NEXT_IP4_INPUT_NCS;
1191 next_index = em->l2_next;
1193 vlib_buffer_enqueue_to_single_next (vm, node, d->bufs_by_etype[id],
1194 next_index, d->n_bufs_by_etype[id]);
1198 if (d->n_bufs_by_etype[id])
1200 next_index = is_l3 ? em->l3_next.input_next_ip6 : em->l2_next;
1201 vlib_buffer_enqueue_to_single_next (vm, node, d->bufs_by_etype[id],
1202 next_index, d->n_bufs_by_etype[id]);
1206 if (d->n_bufs_by_etype[id])
1208 next_index = is_l3 ? em->l3_next.input_next_mpls : em->l2_next;
1209 vlib_buffer_enqueue_to_single_next (vm, node, d->bufs_by_etype[id],
1210 next_index, d->n_bufs_by_etype[id]);
1213 id = ETYPE_ID_UNKNOWN;
1214 if (d->n_bufs_by_etype[id])
1216 /* in case of l3 interfaces, we already advanced buffer so we need to
1219 eth_input_advance_and_flags (vm, d->bufs_by_etype[id],
1220 d->n_bufs_by_etype[id],
1221 -(i16) sizeof (ethernet_header_t),
1222 ~VNET_BUFFER_F_L3_HDR_OFFSET_VALID, 0);
1223 ethernet_input_inline (vm, node, d->bufs_by_etype[id],
1224 d->n_bufs_by_etype[id],
1225 ETHERNET_INPUT_VARIANT_ETHERNET);
1229 VLIB_NODE_FN (ethernet_input_node) (vlib_main_t * vm,
1230 vlib_node_runtime_t * node,
1231 vlib_frame_t * frame)
1233 vnet_main_t *vnm = vnet_get_main ();
1234 ethernet_main_t *em = ðernet_main;
1235 u32 *from = vlib_frame_vector_args (frame);
1236 u32 n_packets = frame->n_vectors;
1238 ethernet_input_trace (vm, node, frame);
1240 if (frame->flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
1242 eth_input_data_t data, *d = &data;
1243 ethernet_input_frame_t *ef = vlib_frame_scalar_args (frame);
1244 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, ef->hw_if_index);
1245 main_intf_t *intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1246 subint_config_t *subint0 = &intf0->untagged_subint;
1247 int ip4_cksum_ok = (frame->flags & ETH_INPUT_FRAME_F_IP4_CKSUM_OK) != 0;
1249 if (subint0->flags & SUBINT_CONFIG_L2)
1251 /* untagged packets are treated as L2 */
1252 eth_input_process_frame (vm, from, d->etypes, n_packets, 0);
1253 eth_input_sort (vm, from, n_packets, d);
1254 eth_input_enqueue_untagged (vm, node, d, ip4_cksum_ok, 0);
1258 ethernet_interface_t *ei;
1259 ei = pool_elt_at_index (em->interfaces, hi->hw_instance);
1261 /* currently only slowpath deals with dmac check */
1262 if (ei->flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL)
1265 /* untagged packets are treated as L3 */
1266 eth_input_process_frame (vm, from, d->etypes, n_packets, 1);
1267 eth_input_sort (vm, from, n_packets, d);
1268 eth_input_enqueue_untagged (vm, node, d, ip4_cksum_ok, 1);
1274 ethernet_input_inline (vm, node, from, n_packets,
1275 ETHERNET_INPUT_VARIANT_ETHERNET);
1279 VLIB_NODE_FN (ethernet_input_type_node) (vlib_main_t * vm,
1280 vlib_node_runtime_t * node,
1281 vlib_frame_t * from_frame)
1283 u32 *from = vlib_frame_vector_args (from_frame);
1284 u32 n_packets = from_frame->n_vectors;
1285 ethernet_input_trace (vm, node, from_frame);
1286 ethernet_input_inline (vm, node, from, n_packets,
1287 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE);
1291 VLIB_NODE_FN (ethernet_input_not_l2_node) (vlib_main_t * vm,
1292 vlib_node_runtime_t * node,
1293 vlib_frame_t * from_frame)
1295 u32 *from = vlib_frame_vector_args (from_frame);
1296 u32 n_packets = from_frame->n_vectors;
1297 ethernet_input_trace (vm, node, from_frame);
1298 ethernet_input_inline (vm, node, from, n_packets,
1299 ETHERNET_INPUT_VARIANT_NOT_L2);
1304 // Return the subinterface config struct for the given sw_if_index
1305 // Also return via parameter the appropriate match flags for the
1306 // configured number of tags.
1307 // On error (unsupported or not ethernet) return 0.
1308 static subint_config_t *
1309 ethernet_sw_interface_get_config (vnet_main_t * vnm,
1311 u32 * flags, u32 * unsupported)
1313 ethernet_main_t *em = ðernet_main;
1314 vnet_hw_interface_t *hi;
1315 vnet_sw_interface_t *si;
1316 main_intf_t *main_intf;
1317 vlan_table_t *vlan_table;
1318 qinq_table_t *qinq_table;
1319 subint_config_t *subint = 0;
1321 hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
1323 if (!hi || (hi->hw_class_index != ethernet_hw_interface_class.index))
1326 goto done; // non-ethernet interface
1329 // ensure there's an entry for the main intf (shouldn't really be necessary)
1330 vec_validate (em->main_intfs, hi->hw_if_index);
1331 main_intf = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1333 // Locate the subint for the given ethernet config
1334 si = vnet_get_sw_interface (vnm, sw_if_index);
1336 if (si->type == VNET_SW_INTERFACE_TYPE_P2P)
1338 p2p_ethernet_main_t *p2pm = &p2p_main;
1339 u32 p2pe_sw_if_index =
1340 p2p_ethernet_lookup (hi->hw_if_index, si->p2p.client_mac);
1341 if (p2pe_sw_if_index == ~0)
1343 pool_get (p2pm->p2p_subif_pool, subint);
1344 si->p2p.pool_index = subint - p2pm->p2p_subif_pool;
1347 subint = vec_elt_at_index (p2pm->p2p_subif_pool, si->p2p.pool_index);
1348 *flags = SUBINT_CONFIG_P2P;
1350 else if (si->type == VNET_SW_INTERFACE_TYPE_PIPE)
1354 pipe = pipe_get (sw_if_index);
1355 subint = &pipe->subint;
1356 *flags = SUBINT_CONFIG_P2P;
1358 else if (si->sub.eth.flags.default_sub)
1360 subint = &main_intf->default_subint;
1361 *flags = SUBINT_CONFIG_MATCH_1_TAG |
1362 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
1364 else if ((si->sub.eth.flags.no_tags) || (si->sub.eth.raw_flags == 0))
1366 // if no flags are set then this is a main interface
1367 // so treat as untagged
1368 subint = &main_intf->untagged_subint;
1369 *flags = SUBINT_CONFIG_MATCH_0_TAG;
1374 // first get the vlan table
1375 if (si->sub.eth.flags.dot1ad)
1377 if (main_intf->dot1ad_vlans == 0)
1379 // Allocate a vlan table from the pool
1380 pool_get (em->vlan_pool, vlan_table);
1381 main_intf->dot1ad_vlans = vlan_table - em->vlan_pool;
1385 // Get ptr to existing vlan table
1387 vec_elt_at_index (em->vlan_pool, main_intf->dot1ad_vlans);
1392 if (main_intf->dot1q_vlans == 0)
1394 // Allocate a vlan table from the pool
1395 pool_get (em->vlan_pool, vlan_table);
1396 main_intf->dot1q_vlans = vlan_table - em->vlan_pool;
1400 // Get ptr to existing vlan table
1402 vec_elt_at_index (em->vlan_pool, main_intf->dot1q_vlans);
1406 if (si->sub.eth.flags.one_tag)
1408 *flags = si->sub.eth.flags.exact_match ?
1409 SUBINT_CONFIG_MATCH_1_TAG :
1410 (SUBINT_CONFIG_MATCH_1_TAG |
1411 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1413 if (si->sub.eth.flags.outer_vlan_id_any)
1415 // not implemented yet
1421 // a single vlan, a common case
1423 &vlan_table->vlans[si->sub.eth.
1424 outer_vlan_id].single_tag_subint;
1431 *flags = si->sub.eth.flags.exact_match ?
1432 SUBINT_CONFIG_MATCH_2_TAG :
1433 (SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1435 if (si->sub.eth.flags.outer_vlan_id_any
1436 && si->sub.eth.flags.inner_vlan_id_any)
1438 // not implemented yet
1443 if (si->sub.eth.flags.inner_vlan_id_any)
1445 // a specific outer and "any" inner
1446 // don't need a qinq table for this
1448 &vlan_table->vlans[si->sub.eth.
1449 outer_vlan_id].inner_any_subint;
1450 if (si->sub.eth.flags.exact_match)
1452 *flags = SUBINT_CONFIG_MATCH_2_TAG;
1456 *flags = SUBINT_CONFIG_MATCH_2_TAG |
1457 SUBINT_CONFIG_MATCH_3_TAG;
1462 // a specific outer + specifc innner vlan id, a common case
1464 // get the qinq table
1465 if (vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs == 0)
1467 // Allocate a qinq table from the pool
1468 pool_get (em->qinq_pool, qinq_table);
1469 vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs =
1470 qinq_table - em->qinq_pool;
1474 // Get ptr to existing qinq table
1476 vec_elt_at_index (em->qinq_pool,
1477 vlan_table->vlans[si->sub.
1481 subint = &qinq_table->vlans[si->sub.eth.inner_vlan_id].subint;
1490 static clib_error_t *
1491 ethernet_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
1493 subint_config_t *subint;
1496 clib_error_t *error = 0;
1498 // Find the config for this subinterface
1500 ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
1505 // not implemented yet or not ethernet
1509 subint->sw_if_index =
1510 ((flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? sw_if_index : ~0);
1516 VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (ethernet_sw_interface_up_down);
1519 #ifndef CLIB_MARCH_VARIANT
1520 // Set the L2/L3 mode for the subinterface
1522 ethernet_sw_interface_set_l2_mode (vnet_main_t * vnm, u32 sw_if_index, u32 l2)
1524 subint_config_t *subint;
1528 vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, sw_if_index);
1530 is_port = !(sw->type == VNET_SW_INTERFACE_TYPE_SUB);
1532 // Find the config for this subinterface
1534 ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
1539 // unimplemented or not ethernet
1543 // Double check that the config we found is for our interface (or the interface is down)
1544 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
1548 subint->flags |= SUBINT_CONFIG_L2;
1551 SUBINT_CONFIG_MATCH_0_TAG | SUBINT_CONFIG_MATCH_1_TAG
1552 | SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
1556 subint->flags &= ~SUBINT_CONFIG_L2;
1559 ~(SUBINT_CONFIG_MATCH_1_TAG | SUBINT_CONFIG_MATCH_2_TAG
1560 | SUBINT_CONFIG_MATCH_3_TAG);
1568 * Set the L2/L3 mode for the subinterface regardless of port
1571 ethernet_sw_interface_set_l2_mode_noport (vnet_main_t * vnm,
1572 u32 sw_if_index, u32 l2)
1574 subint_config_t *subint;
1578 /* Find the config for this subinterface */
1580 ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
1585 /* unimplemented or not ethernet */
1590 * Double check that the config we found is for our interface (or the
1591 * interface is down)
1593 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
1597 subint->flags |= SUBINT_CONFIG_L2;
1601 subint->flags &= ~SUBINT_CONFIG_L2;
1609 static clib_error_t *
1610 ethernet_sw_interface_add_del (vnet_main_t * vnm,
1611 u32 sw_if_index, u32 is_create)
1613 clib_error_t *error = 0;
1614 subint_config_t *subint;
1616 u32 unsupported = 0;
1618 // Find the config for this subinterface
1620 ethernet_sw_interface_get_config (vnm, sw_if_index, &match_flags,
1625 // not implemented yet or not ethernet
1628 // this is the NYI case
1629 error = clib_error_return (0, "not implemented yet");
1640 // Initialize the subint
1641 if (subint->flags & SUBINT_CONFIG_VALID)
1643 // Error vlan already in use
1644 error = clib_error_return (0, "vlan is already in use");
1648 // Note that config is L3 by default
1649 subint->flags = SUBINT_CONFIG_VALID | match_flags;
1650 subint->sw_if_index = ~0; // because interfaces are initially down
1657 VNET_SW_INTERFACE_ADD_DEL_FUNCTION (ethernet_sw_interface_add_del);
1659 static char *ethernet_error_strings[] = {
1660 #define ethernet_error(n,c,s) s,
1661 #include "error.def"
1662 #undef ethernet_error
1666 VLIB_REGISTER_NODE (ethernet_input_node) = {
1667 .name = "ethernet-input",
1668 /* Takes a vector of packets. */
1669 .vector_size = sizeof (u32),
1670 .scalar_size = sizeof (ethernet_input_frame_t),
1671 .n_errors = ETHERNET_N_ERROR,
1672 .error_strings = ethernet_error_strings,
1673 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
1675 #define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
1676 foreach_ethernet_input_next
1679 .format_buffer = format_ethernet_header_with_length,
1680 .format_trace = format_ethernet_input_trace,
1681 .unformat_buffer = unformat_ethernet_header,
1684 VLIB_REGISTER_NODE (ethernet_input_type_node) = {
1685 .name = "ethernet-input-type",
1686 /* Takes a vector of packets. */
1687 .vector_size = sizeof (u32),
1688 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
1690 #define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
1691 foreach_ethernet_input_next
1696 VLIB_REGISTER_NODE (ethernet_input_not_l2_node) = {
1697 .name = "ethernet-input-not-l2",
1698 /* Takes a vector of packets. */
1699 .vector_size = sizeof (u32),
1700 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
1702 #define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
1703 foreach_ethernet_input_next
1709 #ifndef CLIB_MARCH_VARIANT
1711 ethernet_set_rx_redirect (vnet_main_t * vnm,
1712 vnet_hw_interface_t * hi, u32 enable)
1714 // Insure all packets go to ethernet-input (i.e. untagged ipv4 packets
1715 // don't go directly to ip4-input)
1716 vnet_hw_interface_rx_redirect_to_node
1717 (vnm, hi->hw_if_index, enable ? ethernet_input_node.index : ~0);
1722 * Initialization and registration for the next_by_ethernet structure
1726 next_by_ethertype_init (next_by_ethertype_t * l3_next)
1728 l3_next->input_next_by_type = sparse_vec_new
1729 ( /* elt bytes */ sizeof (l3_next->input_next_by_type[0]),
1730 /* bits in index */ BITS (((ethernet_header_t *) 0)->type));
1732 vec_validate (l3_next->sparse_index_by_input_next_index,
1733 ETHERNET_INPUT_NEXT_DROP);
1734 vec_validate (l3_next->sparse_index_by_input_next_index,
1735 ETHERNET_INPUT_NEXT_PUNT);
1736 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_DROP] =
1737 SPARSE_VEC_INVALID_INDEX;
1738 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_PUNT] =
1739 SPARSE_VEC_INVALID_INDEX;
1742 * Make sure we don't wipe out an ethernet registration by mistake
1743 * Can happen if init function ordering constraints are missing.
1747 ethernet_main_t *em = ðernet_main;
1748 ASSERT (em->next_by_ethertype_register_called == 0);
1754 // Add an ethertype -> next index mapping to the structure
1756 next_by_ethertype_register (next_by_ethertype_t * l3_next,
1757 u32 ethertype, u32 next_index)
1761 ethernet_main_t *em = ðernet_main;
1765 ethernet_main_t *em = ðernet_main;
1766 em->next_by_ethertype_register_called = 1;
1769 /* Setup ethernet type -> next index sparse vector mapping. */
1770 n = sparse_vec_validate (l3_next->input_next_by_type, ethertype);
1773 /* Rebuild next index -> sparse index inverse mapping when sparse vector
1775 vec_validate (l3_next->sparse_index_by_input_next_index, next_index);
1776 for (i = 1; i < vec_len (l3_next->input_next_by_type); i++)
1778 sparse_index_by_input_next_index[l3_next->input_next_by_type[i]] = i;
1780 // do not allow the cached next index's to be updated if L3
1781 // redirect is enabled, as it will have overwritten them
1782 if (!em->redirect_l3)
1784 // Cache common ethertypes directly
1785 if (ethertype == ETHERNET_TYPE_IP4)
1787 l3_next->input_next_ip4 = next_index;
1789 else if (ethertype == ETHERNET_TYPE_IP6)
1791 l3_next->input_next_ip6 = next_index;
1793 else if (ethertype == ETHERNET_TYPE_MPLS)
1795 l3_next->input_next_mpls = next_index;
1802 static clib_error_t *
1803 ethernet_input_init (vlib_main_t * vm)
1805 ethernet_main_t *em = ðernet_main;
1806 __attribute__ ((unused)) vlan_table_t *invalid_vlan_table;
1807 __attribute__ ((unused)) qinq_table_t *invalid_qinq_table;
1809 ethernet_setup_node (vm, ethernet_input_node.index);
1810 ethernet_setup_node (vm, ethernet_input_type_node.index);
1811 ethernet_setup_node (vm, ethernet_input_not_l2_node.index);
1813 next_by_ethertype_init (&em->l3_next);
1815 // Initialize pools and vector for vlan parsing
1816 vec_validate (em->main_intfs, 10); // 10 main interfaces
1817 pool_alloc (em->vlan_pool, 10);
1818 pool_alloc (em->qinq_pool, 1);
1820 // The first vlan pool will always be reserved for an invalid table
1821 pool_get (em->vlan_pool, invalid_vlan_table); // first id = 0
1822 // The first qinq pool will always be reserved for an invalid table
1823 pool_get (em->qinq_pool, invalid_qinq_table); // first id = 0
1828 VLIB_INIT_FUNCTION (ethernet_input_init);
1831 ethernet_register_input_type (vlib_main_t * vm,
1832 ethernet_type_t type, u32 node_index)
1834 ethernet_main_t *em = ðernet_main;
1835 ethernet_type_info_t *ti;
1839 clib_error_t *error = vlib_call_init_function (vm, ethernet_init);
1841 clib_error_report (error);
1844 ti = ethernet_get_type_info (em, type);
1845 ti->node_index = node_index;
1846 ti->next_index = vlib_node_add_next (vm,
1847 ethernet_input_node.index, node_index);
1848 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
1849 ASSERT (i == ti->next_index);
1851 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
1852 ASSERT (i == ti->next_index);
1854 // Add the L3 node for this ethertype to the next nodes structure
1855 next_by_ethertype_register (&em->l3_next, type, ti->next_index);
1857 // Call the registration functions for other nodes that want a mapping
1858 l2bvi_register_input_type (vm, type, node_index);
1862 ethernet_register_l2_input (vlib_main_t * vm, u32 node_index)
1864 ethernet_main_t *em = ðernet_main;
1868 vlib_node_add_next (vm, ethernet_input_node.index, node_index);
1871 * Even if we never use these arcs, we have to align the next indices...
1873 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
1875 ASSERT (i == em->l2_next);
1877 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
1878 ASSERT (i == em->l2_next);
1881 // Register a next node for L3 redirect, and enable L3 redirect
1883 ethernet_register_l3_redirect (vlib_main_t * vm, u32 node_index)
1885 ethernet_main_t *em = ðernet_main;
1888 em->redirect_l3 = 1;
1889 em->redirect_l3_next = vlib_node_add_next (vm,
1890 ethernet_input_node.index,
1893 * Change the cached next nodes to the redirect node
1895 em->l3_next.input_next_ip4 = em->redirect_l3_next;
1896 em->l3_next.input_next_ip6 = em->redirect_l3_next;
1897 em->l3_next.input_next_mpls = em->redirect_l3_next;
1900 * Even if we never use these arcs, we have to align the next indices...
1902 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
1904 ASSERT (i == em->redirect_l3_next);
1906 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
1908 ASSERT (i == em->redirect_l3_next);
1913 * fd.io coding-style-patch-verification: ON
1916 * eval: (c-set-style "gnu")