2 * node.c: gre packet processing
4 * Copyright (c) 2012 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/gre/gre.h>
21 #include <vppinfra/sparse_vec.h>
23 #define foreach_gre_input_next \
24 _(PUNT, "error-punt") \
25 _(DROP, "error-drop") \
26 _(IP4_INPUT, "ip4-input") \
27 _(IP6_INPUT, "ip6-input")
30 #define _(s,n) GRE_INPUT_NEXT_##s,
31 foreach_gre_input_next
43 u8 * format_gre_rx_trace (u8 * s, va_list * args)
45 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
46 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
47 gre_rx_trace_t * t = va_arg (*args, gre_rx_trace_t *);
49 s = format (s, "GRE: tunnel %d len %d src %U dst %U",
50 t->tunnel_id, clib_net_to_host_u16(t->length),
51 format_ip4_address, &t->src.as_u8,
52 format_ip4_address, &t->dst.as_u8);
57 /* Sparse vector mapping gre protocol in network byte order
59 u16 * next_by_protocol;
60 } gre_input_runtime_t;
63 gre_input (vlib_main_t * vm,
64 vlib_node_runtime_t * node,
65 vlib_frame_t * from_frame)
67 gre_main_t * gm = &gre_main;
68 gre_input_runtime_t * rt = (void *) node->runtime_data;
69 __attribute__((unused)) u32 n_left_from, next_index, * from, * to_next;
70 u64 cached_tunnel_key = (u64) ~0;
71 u32 cached_tunnel_sw_if_index = 0, tunnel_sw_if_index;
72 u32 cached_tunnel_fib_index = 0, tunnel_fib_index;
74 u32 cpu_index = os_get_cpu_number();
76 from = vlib_frame_vector_args (from_frame);
77 n_left_from = from_frame->n_vectors;
79 next_index = node->cached_next_index;
81 while (n_left_from > 0)
85 vlib_get_next_frame (vm, node, next_index,
86 to_next, n_left_to_next);
88 while (n_left_from >= 4 && n_left_to_next >= 2)
91 vlib_buffer_t * b0, * b1;
92 gre_header_t * h0, * h1;
93 u16 version0, version1;
95 u32 i0, i1, next0, next1, protocol0, protocol1;
96 ip4_header_t *ip0, *ip1;
98 /* Prefetch next iteration. */
100 vlib_buffer_t * p2, * p3;
102 p2 = vlib_get_buffer (vm, from[2]);
103 p3 = vlib_get_buffer (vm, from[3]);
105 vlib_prefetch_buffer_header (p2, LOAD);
106 vlib_prefetch_buffer_header (p3, LOAD);
108 CLIB_PREFETCH (p2->data, sizeof (h0[0]), LOAD);
109 CLIB_PREFETCH (p3->data, sizeof (h1[0]), LOAD);
121 b0 = vlib_get_buffer (vm, bi0);
122 b1 = vlib_get_buffer (vm, bi1);
124 /* ip4_local hands us the ip header, not the gre header */
125 ip0 = vlib_buffer_get_current (b0);
126 ip1 = vlib_buffer_get_current (b1);
128 /* Save src + dst ip4 address, e.g. for mpls-o-gre */
129 vnet_buffer(b0)->gre.src = ip0->src_address.as_u32;
130 vnet_buffer(b0)->gre.dst = ip0->dst_address.as_u32;
131 vnet_buffer(b1)->gre.src = ip1->src_address.as_u32;
132 vnet_buffer(b1)->gre.dst = ip1->dst_address.as_u32;
134 vlib_buffer_advance (b0, sizeof (*ip0));
135 vlib_buffer_advance (b1, sizeof (*ip1));
137 h0 = vlib_buffer_get_current (b0);
138 h1 = vlib_buffer_get_current (b1);
140 /* Index sparse array with network byte order. */
141 protocol0 = h0->protocol;
142 protocol1 = h1->protocol;
143 sparse_vec_index2 (rt->next_by_protocol, protocol0, protocol1,
145 next0 = vec_elt(rt->next_by_protocol, i0);
146 next1 = vec_elt(rt->next_by_protocol, i1);
148 b0->error = node->errors[i0 == SPARSE_VEC_INVALID_INDEX ? GRE_ERROR_UNKNOWN_PROTOCOL : GRE_ERROR_NONE];
149 b1->error = node->errors[i1 == SPARSE_VEC_INVALID_INDEX ? GRE_ERROR_UNKNOWN_PROTOCOL : GRE_ERROR_NONE];
151 version0 = clib_net_to_host_u16 (h0->flags_and_version);
152 verr0 = version0 & GRE_VERSION_MASK;
153 version1 = clib_net_to_host_u16 (h1->flags_and_version);
154 verr1 = version1 & GRE_VERSION_MASK;
156 b0->error = verr0 ? node->errors[GRE_ERROR_UNSUPPORTED_VERSION]
158 next0 = verr0 ? GRE_INPUT_NEXT_DROP : next0;
159 b1->error = verr1 ? node->errors[GRE_ERROR_UNSUPPORTED_VERSION]
161 next1 = verr1 ? GRE_INPUT_NEXT_DROP : next1;
163 /* RPF check for ip4/ip6 input */
164 if (PREDICT_FALSE(next0 == GRE_INPUT_NEXT_IP4_INPUT
165 || next0 == GRE_INPUT_NEXT_IP6_INPUT))
167 u64 key = ((u64)(vnet_buffer(b0)->gre.dst) << 32) |
168 (u64)(vnet_buffer(b0)->gre.src);
170 if (cached_tunnel_key != key)
172 vnet_hw_interface_t * hi;
176 ip4_main_t * ip4m = &ip4_main;
177 p = hash_get (gm->tunnel_by_key, key);
180 next0 = GRE_INPUT_NEXT_DROP;
181 b0->error = node->errors[GRE_ERROR_NO_SUCH_TUNNEL];
184 t = pool_elt_at_index (gm->tunnels, p[0]);
185 hi = vnet_get_hw_interface (gm->vnet_main,
187 tunnel_sw_if_index = hi->sw_if_index;
188 tunnel_fib_index = vec_elt (ip4m->fib_index_by_sw_if_index,
191 cached_tunnel_sw_if_index = tunnel_sw_if_index;
192 cached_tunnel_fib_index = tunnel_fib_index;
196 tunnel_sw_if_index = cached_tunnel_sw_if_index;
197 tunnel_fib_index = cached_tunnel_fib_index;
200 u32 len = vlib_buffer_length_in_chain (vm, b0);
201 vnet_interface_main_t *im = &gm->vnet_main->interface_main;
202 vlib_increment_combined_counter (im->combined_sw_if_counters
203 + VNET_INTERFACE_COUNTER_RX,
209 vnet_buffer(b0)->sw_if_index[VLIB_TX] = tunnel_fib_index;
213 if (PREDICT_FALSE(next1 == GRE_INPUT_NEXT_IP4_INPUT
214 || next1 == GRE_INPUT_NEXT_IP6_INPUT))
216 u64 key = ((u64)(vnet_buffer(b1)->gre.dst) << 32) |
217 (u64)(vnet_buffer(b1)->gre.src);
219 if (cached_tunnel_key != key)
221 vnet_hw_interface_t * hi;
225 ip4_main_t * ip4m = &ip4_main;
226 p = hash_get (gm->tunnel_by_key, key);
229 next1 = GRE_INPUT_NEXT_DROP;
230 b1->error = node->errors[GRE_ERROR_NO_SUCH_TUNNEL];
233 t = pool_elt_at_index (gm->tunnels, p[0]);
234 hi = vnet_get_hw_interface (gm->vnet_main,
236 tunnel_sw_if_index = hi->sw_if_index;
237 tunnel_fib_index = vec_elt (ip4m->fib_index_by_sw_if_index,
240 cached_tunnel_sw_if_index = tunnel_sw_if_index;
241 cached_tunnel_fib_index = tunnel_fib_index;
245 tunnel_sw_if_index = cached_tunnel_sw_if_index;
246 tunnel_fib_index = cached_tunnel_fib_index;
249 u32 len = vlib_buffer_length_in_chain (vm, b1);
250 vnet_interface_main_t *im = &gm->vnet_main->interface_main;
251 vlib_increment_combined_counter (im->combined_sw_if_counters
252 + VNET_INTERFACE_COUNTER_RX,
258 vnet_buffer(b1)->sw_if_index[VLIB_TX] = tunnel_fib_index;
261 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
263 gre_rx_trace_t *tr = vlib_add_trace (vm, node,
266 tr->length = ip0->length;
267 tr->src.as_u32 = ip0->src_address.as_u32;
268 tr->dst.as_u32 = ip0->dst_address.as_u32;
271 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
273 gre_rx_trace_t *tr = vlib_add_trace (vm, node,
276 tr->length = ip1->length;
277 tr->src.as_u32 = ip1->src_address.as_u32;
278 tr->dst.as_u32 = ip1->dst_address.as_u32;
281 vlib_buffer_advance (b0, sizeof (*h0));
282 vlib_buffer_advance (b1, sizeof (*h1));
284 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
285 to_next, n_left_to_next,
286 bi0, bi1, next0, next1);
289 while (n_left_from > 0 && n_left_to_next > 0)
306 b0 = vlib_get_buffer (vm, bi0);
307 ip0 = vlib_buffer_get_current (b0);
309 vnet_buffer(b0)->gre.src = ip0->src_address.as_u32;
310 vnet_buffer(b0)->gre.dst = ip0->dst_address.as_u32;
312 vlib_buffer_advance (b0, sizeof (*ip0));
314 h0 = vlib_buffer_get_current (b0);
316 i0 = sparse_vec_index (rt->next_by_protocol, h0->protocol);
317 next0 = vec_elt(rt->next_by_protocol, i0);
320 node->errors[i0 == SPARSE_VEC_INVALID_INDEX
321 ? GRE_ERROR_UNKNOWN_PROTOCOL : GRE_ERROR_NONE];
323 version0 = clib_net_to_host_u16 (h0->flags_and_version);
324 verr0 = version0 & GRE_VERSION_MASK;
325 b0->error = verr0 ? node->errors[GRE_ERROR_UNSUPPORTED_VERSION]
327 next0 = verr0 ? GRE_INPUT_NEXT_DROP : next0;
329 /* For IP payload we need to find source interface
330 so we can increase counters and help forward node to
332 if (PREDICT_FALSE(next0 == GRE_INPUT_NEXT_IP4_INPUT
333 || next0 == GRE_INPUT_NEXT_IP6_INPUT))
335 u64 key = ((u64)(vnet_buffer(b0)->gre.dst) << 32) |
336 (u64)(vnet_buffer(b0)->gre.src);
338 if (cached_tunnel_key != key)
340 vnet_hw_interface_t * hi;
344 ip4_main_t * ip4m = &ip4_main;
345 p = hash_get (gm->tunnel_by_key, key);
348 next0 = GRE_INPUT_NEXT_DROP;
349 b0->error = node->errors[GRE_ERROR_NO_SUCH_TUNNEL];
352 t = pool_elt_at_index (gm->tunnels, p[0]);
353 hi = vnet_get_hw_interface (gm->vnet_main,
355 tunnel_sw_if_index = hi->sw_if_index;
356 tunnel_fib_index = vec_elt (ip4m->fib_index_by_sw_if_index,
359 cached_tunnel_sw_if_index = tunnel_sw_if_index;
360 cached_tunnel_fib_index = tunnel_fib_index;
364 tunnel_sw_if_index = cached_tunnel_sw_if_index;
365 tunnel_fib_index = cached_tunnel_fib_index;
368 u32 len = vlib_buffer_length_in_chain (vm, b0);
369 vnet_interface_main_t *im = &gm->vnet_main->interface_main;
370 vlib_increment_combined_counter (im->combined_sw_if_counters
371 + VNET_INTERFACE_COUNTER_RX,
377 vnet_buffer(b0)->sw_if_index[VLIB_TX] = tunnel_fib_index;
381 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
383 gre_rx_trace_t *tr = vlib_add_trace (vm, node,
386 tr->length = ip0->length;
387 tr->src.as_u32 = ip0->src_address.as_u32;
388 tr->dst.as_u32 = ip0->dst_address.as_u32;
391 vlib_buffer_advance (b0, sizeof (*h0));
393 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
394 to_next, n_left_to_next,
398 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
400 vlib_node_increment_counter (vm, gre_input_node.index,
401 GRE_ERROR_PKTS_DECAP, from_frame->n_vectors);
402 return from_frame->n_vectors;
405 static char * gre_error_strings[] = {
406 #define gre_error(n,s) s,
411 VLIB_REGISTER_NODE (gre_input_node) = {
412 .function = gre_input,
414 /* Takes a vector of packets. */
415 .vector_size = sizeof (u32),
417 .runtime_data_bytes = sizeof (gre_input_runtime_t),
419 .n_errors = GRE_N_ERROR,
420 .error_strings = gre_error_strings,
422 .n_next_nodes = GRE_INPUT_N_NEXT,
424 #define _(s,n) [GRE_INPUT_NEXT_##s] = n,
425 foreach_gre_input_next
429 .format_buffer = format_gre_header_with_length,
430 .format_trace = format_gre_rx_trace,
431 .unformat_buffer = unformat_gre_header,
434 VLIB_NODE_FUNCTION_MULTIARCH (gre_input_node, gre_input)
437 gre_register_input_protocol (vlib_main_t * vm,
438 gre_protocol_t protocol,
441 gre_main_t * em = &gre_main;
442 gre_protocol_info_t * pi;
443 gre_input_runtime_t * rt;
447 clib_error_t * error = vlib_call_init_function (vm, gre_input_init);
449 clib_error_report (error);
452 pi = gre_get_protocol_info (em, protocol);
453 pi->node_index = node_index;
454 pi->next_index = vlib_node_add_next (vm,
455 gre_input_node.index,
458 /* Setup gre protocol -> next index sparse vector mapping. */
459 rt = vlib_node_get_runtime_data (vm, gre_input_node.index);
460 n = sparse_vec_validate (rt->next_by_protocol,
461 clib_host_to_net_u16 (protocol));
462 n[0] = pi->next_index;
466 gre_setup_node (vlib_main_t * vm, u32 node_index)
468 vlib_node_t * n = vlib_get_node (vm, node_index);
469 pg_node_t * pn = pg_get_node (node_index);
471 n->format_buffer = format_gre_header_with_length;
472 n->unformat_buffer = unformat_gre_header;
473 pn->unformat_edit = unformat_pg_gre_header;
476 static clib_error_t * gre_input_init (vlib_main_t * vm)
478 gre_input_runtime_t * rt;
479 vlib_node_t *ip4_input, *ip6_input, *mpls_unicast_input;
482 clib_error_t * error;
483 error = vlib_call_init_function (vm, gre_init);
485 clib_error_report (error);
488 gre_setup_node (vm, gre_input_node.index);
490 rt = vlib_node_get_runtime_data (vm, gre_input_node.index);
492 rt->next_by_protocol = sparse_vec_new
493 (/* elt bytes */ sizeof (rt->next_by_protocol[0]),
494 /* bits in index */ BITS (((gre_header_t *) 0)->protocol));
496 /* These could be moved to the supported protocol input node defn's */
497 ip4_input = vlib_get_node_by_name (vm, (u8 *)"ip4-input");
499 ip6_input = vlib_get_node_by_name (vm, (u8 *)"ip6-input");
501 mpls_unicast_input = vlib_get_node_by_name (vm, (u8 *)"mpls-gre-input");
502 ASSERT(mpls_unicast_input);
504 gre_register_input_protocol (vm, GRE_PROTOCOL_ip4,
507 gre_register_input_protocol (vm, GRE_PROTOCOL_ip6,
510 gre_register_input_protocol (vm, GRE_PROTOCOL_mpls_unicast,
511 mpls_unicast_input->index);
513 ip4_register_protocol (IP_PROTOCOL_GRE, gre_input_node.index);
518 VLIB_INIT_FUNCTION (gre_input_init);