4 * Copyright (c) 2012 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/gre/gre.h>
20 #include <vnet/adj/adj.h>
26 ip4_and_gre_header_t ip4_and_gre;
29 } ip4_and_gre_union_t;
32 /* Packet trace structure */
34 /* Tunnel-id / index in tunnel vector */
40 /* tunnel ip4 addresses */
45 u8 * format_gre_tx_trace (u8 * s, va_list * args)
47 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
48 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
49 gre_tx_trace_t * t = va_arg (*args, gre_tx_trace_t *);
51 s = format (s, "GRE: tunnel %d len %d src %U dst %U",
52 t->tunnel_id, clib_net_to_host_u16 (t->length),
53 format_ip4_address, &t->src.as_u8,
54 format_ip4_address, &t->dst.as_u8);
58 u8 * format_gre_protocol (u8 * s, va_list * args)
60 gre_protocol_t p = va_arg (*args, u32);
61 gre_main_t * gm = &gre_main;
62 gre_protocol_info_t * pi = gre_get_protocol_info (gm, p);
65 s = format (s, "%s", pi->name);
67 s = format (s, "0x%04x", p);
72 u8 * format_gre_header_with_length (u8 * s, va_list * args)
74 gre_main_t * gm = &gre_main;
75 gre_header_t * h = va_arg (*args, gre_header_t *);
76 u32 max_header_bytes = va_arg (*args, u32);
77 gre_protocol_t p = clib_net_to_host_u16 (h->protocol);
78 uword indent, header_bytes;
80 header_bytes = sizeof (h[0]);
81 if (max_header_bytes != 0 && header_bytes > max_header_bytes)
82 return format (s, "gre header truncated");
84 indent = format_get_indent (s);
86 s = format (s, "GRE %U", format_gre_protocol, p);
88 if (max_header_bytes != 0 && header_bytes > max_header_bytes)
90 gre_protocol_info_t * pi = gre_get_protocol_info (gm, p);
91 vlib_node_t * node = vlib_get_node (gm->vlib_main, pi->node_index);
92 if (node->format_buffer)
93 s = format (s, "\n%U%U",
94 format_white_space, indent,
95 node->format_buffer, (void *) (h + 1),
96 max_header_bytes - header_bytes);
102 u8 * format_gre_header (u8 * s, va_list * args)
104 gre_header_t * h = va_arg (*args, gre_header_t *);
105 return format (s, "%U", format_gre_header_with_length, h, 0);
108 /* Returns gre protocol as an int in host byte order. */
110 unformat_gre_protocol_host_byte_order (unformat_input_t * input,
113 u16 * result = va_arg (*args, u16 *);
114 gre_main_t * gm = &gre_main;
118 if (unformat_user (input, unformat_vlib_number_by_name,
119 gm->protocol_info_by_name, &i))
121 gre_protocol_info_t * pi = vec_elt_at_index (gm->protocol_infos, i);
122 *result = pi->protocol;
130 unformat_gre_protocol_net_byte_order (unformat_input_t * input,
133 u16 * result = va_arg (*args, u16 *);
134 if (! unformat_user (input, unformat_gre_protocol_host_byte_order, result))
136 *result = clib_host_to_net_u16 ((u16) *result);
141 unformat_gre_header (unformat_input_t * input, va_list * args)
143 u8 ** result = va_arg (*args, u8 **);
144 gre_header_t _h, * h = &_h;
147 if (! unformat (input, "%U",
148 unformat_gre_protocol_host_byte_order, &p))
151 h->protocol = clib_host_to_net_u16 (p);
153 /* Add header to result. */
156 u32 n_bytes = sizeof (h[0]);
158 vec_add2 (*result, p, n_bytes);
159 clib_memcpy (p, h, n_bytes);
165 static uword gre_set_rewrite (vnet_main_t * vnm,
170 uword max_rewrite_bytes)
173 * Conundrum: packets from tun/tap destined for the tunnel
174 * actually have this rewrite applied. Transit packets do not.
175 * To make the two cases equivalent, don't generate a
176 * rewrite here, build the entire header in the fast path.
180 #ifdef THINGS_WORKED_AS_ONE_MIGHT_LIKE
181 ip4_and_gre_header_t * h = rewrite;
182 gre_protocol_t protocol;
184 if (max_rewrite_bytes < sizeof (h[0]))
188 #define _(a,b) case VNET_L3_PACKET_TYPE_##a: protocol = GRE_PROTOCOL_##b; break
196 memset (h, 0, sizeof (*h));
197 h->ip4.ip_version_and_header_length = 0x45;
199 h->ip4.protocol = IP_PROTOCOL_GRE;
200 h->gre.protocol = clib_host_to_net_u16 (protocol);
202 return sizeof (h[0]);
207 gre_interface_tx (vlib_main_t * vm,
208 vlib_node_runtime_t * node,
209 vlib_frame_t * frame)
211 gre_main_t * gm = &gre_main;
213 u32 * from, * to_next, n_left_from, n_left_to_next;
214 vnet_interface_output_runtime_t * rd = (void *) node->runtime_data;
215 gre_tunnel_t *t = pool_elt_at_index (gm->tunnels, rd->dev_instance);
217 /* Vector of buffer / pkt indices we're supposed to process */
218 from = vlib_frame_vector_args (frame);
220 /* Number of buffers / pkts */
221 n_left_from = frame->n_vectors;
223 /* Speculatively send the first buffer to the last disposition we used */
224 next_index = node->cached_next_index;
226 while (n_left_from > 0)
228 /* set up to enqueue to our disposition with index = next_index */
229 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
235 while (n_left_from > 0 && n_left_to_next > 0)
237 u32 bi0, adj_index0, next0;
238 const ip_adjacency_t * adj0;
239 const dpo_id_t *dpo0;
250 b0 = vlib_get_buffer(vm, bi0);
251 ip0 = vlib_buffer_get_current (b0);
253 /* Fixup the checksum and len fields in the GRE tunnel encap
254 * that was applied at the midchain node */
256 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
257 ip0->checksum = ip4_header_checksum (ip0);
259 /* Follow the DPO on which the midchain is stacked */
260 adj_index0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
261 adj0 = adj_get(adj_index0);
262 dpo0 = &adj0->sub_type.midchain.next_dpo;
263 next0 = dpo0->dpoi_next_node;
264 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
266 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
268 gre_tx_trace_t *tr = vlib_add_trace (vm, node,
270 tr->tunnel_id = t - gm->tunnels;
271 tr->length = ip0->length;
272 tr->src.as_u32 = ip0->src_address.as_u32;
273 tr->dst.as_u32 = ip0->dst_address.as_u32;
276 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
277 to_next, n_left_to_next,
281 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
284 vlib_node_increment_counter (vm, gre_input_node.index,
285 GRE_ERROR_PKTS_ENCAP, frame->n_vectors);
287 return frame->n_vectors;
291 gre_l2_interface_tx (vlib_main_t * vm,
292 vlib_node_runtime_t * node,
293 vlib_frame_t * frame)
295 gre_main_t * gm = &gre_main;
297 u32 * from, * to_next, n_left_from, n_left_to_next;
298 vnet_interface_output_runtime_t * rd = (void *) node->runtime_data;
299 const gre_tunnel_t *gt = pool_elt_at_index (gm->tunnels, rd->dev_instance);
301 /* Vector of buffer / pkt indices we're supposed to process */
302 from = vlib_frame_vector_args (frame);
304 /* Number of buffers / pkts */
305 n_left_from = frame->n_vectors;
307 /* Speculatively send the first buffer to the last disposition we used */
308 next_index = node->cached_next_index;
310 while (n_left_from > 0)
312 /* set up to enqueue to our disposition with index = next_index */
313 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
319 while (n_left_from > 0 && n_left_to_next > 0)
331 b0 = vlib_get_buffer(vm, bi0);
333 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = gt->adj_index[FIB_LINK_ETHERNET];
335 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
337 gre_tx_trace_t *tr = vlib_add_trace (vm, node,
339 tr->tunnel_id = gt - gm->tunnels;
340 tr->length = vlib_buffer_length_in_chain (vm, b0);
341 tr->src.as_u32 = gt->tunnel_src.as_u32;
342 tr->dst.as_u32 = gt->tunnel_src.as_u32;
345 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
346 to_next, n_left_to_next,
350 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
353 vlib_node_increment_counter (vm, gre_input_node.index,
354 GRE_ERROR_PKTS_ENCAP, frame->n_vectors);
356 return frame->n_vectors;
359 static clib_error_t *
360 gre_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
362 gre_main_t * gm = &gre_main;
363 vnet_hw_interface_t * hi;
367 hi = vnet_get_hw_interface (vnm, hw_if_index);
369 if (NULL == gm->tunnel_index_by_sw_if_index ||
370 hi->sw_if_index >= vec_len(gm->tunnel_index_by_sw_if_index))
373 ti = gm->tunnel_index_by_sw_if_index[hi->sw_if_index];
376 /* not one of ours */
379 t = pool_elt_at_index(gm->tunnels, ti);
381 if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
382 vnet_hw_interface_set_flags (vnm, hw_if_index, VNET_HW_INTERFACE_FLAG_LINK_UP);
384 vnet_hw_interface_set_flags (vnm, hw_if_index, 0 /* down */);
388 return /* no error */ 0;
391 static u8 * format_gre_tunnel_name (u8 * s, va_list * args)
393 u32 dev_instance = va_arg (*args, u32);
394 return format (s, "gre%d", dev_instance);
397 static u8 * format_gre_device (u8 * s, va_list * args)
399 u32 dev_instance = va_arg (*args, u32);
400 CLIB_UNUSED (int verbose) = va_arg (*args, int);
402 s = format (s, "GRE tunnel: id %d\n", dev_instance);
406 static u8 * format_gre_l2_device (u8 * s, va_list * args)
408 u32 dev_instance = va_arg (*args, u32);
409 CLIB_UNUSED (int verbose) = va_arg (*args, int);
411 s = format (s, "GRE L2-tunnel: id %d\n", dev_instance);
415 VNET_DEVICE_CLASS (gre_device_class) = {
416 .name = "GRE tunnel device",
417 .format_device_name = format_gre_tunnel_name,
418 .format_device = format_gre_device,
419 .format_tx_trace = format_gre_tx_trace,
420 .tx_function = gre_interface_tx,
421 .admin_up_down_function = gre_interface_admin_up_down,
427 VLIB_DEVICE_TX_FUNCTION_MULTIARCH (gre_device_class,
430 VNET_DEVICE_CLASS (gre_l2_device_class) = {
431 .name = "GRE L2 tunnel device",
432 .format_device_name = format_gre_tunnel_name,
433 .format_device = format_gre_l2_device,
434 .format_tx_trace = format_gre_tx_trace,
435 .tx_function = gre_l2_interface_tx,
436 .admin_up_down_function = gre_interface_admin_up_down,
442 VLIB_DEVICE_TX_FUNCTION_MULTIARCH (gre_l2_device_class,
446 VNET_HW_INTERFACE_CLASS (gre_hw_interface_class) = {
448 .format_header = format_gre_header_with_length,
449 .unformat_header = unformat_gre_header,
450 .set_rewrite = gre_set_rewrite,
453 static void add_protocol (gre_main_t * gm,
454 gre_protocol_t protocol,
455 char * protocol_name)
457 gre_protocol_info_t * pi;
460 vec_add2 (gm->protocol_infos, pi, 1);
461 i = pi - gm->protocol_infos;
463 pi->name = protocol_name;
464 pi->protocol = protocol;
465 pi->next_index = pi->node_index = ~0;
467 hash_set (gm->protocol_info_by_protocol, protocol, i);
468 hash_set_mem (gm->protocol_info_by_name, pi->name, i);
471 static clib_error_t * gre_init (vlib_main_t * vm)
473 gre_main_t * gm = &gre_main;
474 clib_error_t * error;
475 ip_main_t * im = &ip_main;
476 ip_protocol_info_t * pi;
478 memset (gm, 0, sizeof (gm[0]));
480 gm->vnet_main = vnet_get_main();
482 if ((error = vlib_call_init_function (vm, ip_main_init)))
485 if ((error = vlib_call_init_function (vm, ip4_lookup_init)))
488 /* Set up the ip packet generator */
489 pi = ip_get_protocol_info (im, IP_PROTOCOL_GRE);
490 pi->format_header = format_gre_header;
491 pi->unformat_pg_edit = unformat_pg_gre_header;
493 gm->protocol_info_by_name = hash_create_string (0, sizeof (uword));
494 gm->protocol_info_by_protocol = hash_create (0, sizeof (uword));
495 gm->tunnel_by_key = hash_create (0, sizeof (uword));
497 #define _(n,s) add_protocol (gm, GRE_PROTOCOL_##s, #s);
501 return vlib_call_init_function (vm, gre_input_init);
504 VLIB_INIT_FUNCTION (gre_init);
506 gre_main_t * gre_get_main (vlib_main_t * vm)
508 vlib_call_init_function (vm, gre_init);