dpdk: Add support for Mellanox ConnectX-4 devices
[vpp.git] / vnet / vnet / gre / node.c
1 /*
2  * node.c: gre packet processing
3  *
4  * Copyright (c) 2012 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/gre/gre.h>
21 #include <vnet/mpls/mpls.h>
22 #include <vppinfra/sparse_vec.h>
23
24 #define foreach_gre_input_next                  \
25 _(PUNT, "error-punt")                           \
26 _(DROP, "error-drop")                           \
27 _(ETHERNET_INPUT, "ethernet-input")             \
28 _(IP4_INPUT, "ip4-input")                       \
29 _(IP6_INPUT, "ip6-input")                       \
30 _(MPLS_INPUT, "mpls-input")
31
32 typedef enum {
33 #define _(s,n) GRE_INPUT_NEXT_##s,
34   foreach_gre_input_next
35 #undef _
36   GRE_INPUT_N_NEXT,
37 } gre_input_next_t;
38
39 typedef struct {
40   u32 tunnel_id;
41   u32 length;
42   ip4_address_t src;
43   ip4_address_t dst;
44 } gre_rx_trace_t;
45
46 u8 * format_gre_rx_trace (u8 * s, va_list * args)
47 {
48   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
49   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
50   gre_rx_trace_t * t = va_arg (*args, gre_rx_trace_t *);
51     
52   s = format (s, "GRE: tunnel %d len %d src %U dst %U",
53               t->tunnel_id, clib_net_to_host_u16(t->length),
54               format_ip4_address, &t->src.as_u8,
55               format_ip4_address, &t->dst.as_u8);
56   return s;
57 }
58
59 typedef struct {
60   /* Sparse vector mapping gre protocol in network byte order
61      to next index. */
62   u16 * next_by_protocol;
63 } gre_input_runtime_t;
64
65 static uword
66 gre_input (vlib_main_t * vm,
67            vlib_node_runtime_t * node,
68            vlib_frame_t * from_frame)
69 {
70   gre_main_t * gm = &gre_main;
71   gre_input_runtime_t * rt = (void *) node->runtime_data;
72   __attribute__((unused)) u32 n_left_from, next_index, * from, * to_next;
73   u64 cached_tunnel_key = (u64) ~0;
74   u32 cached_tunnel_sw_if_index = 0, tunnel_sw_if_index = 0;
75
76   u32 cpu_index = os_get_cpu_number();
77   u32 len;
78   vnet_interface_main_t *im = &gm->vnet_main->interface_main;
79
80   from = vlib_frame_vector_args (from_frame);
81   n_left_from = from_frame->n_vectors;
82
83   next_index = node->cached_next_index;
84
85   while (n_left_from > 0)
86     {
87       u32 n_left_to_next;
88
89       vlib_get_next_frame (vm, node, next_index,
90                            to_next, n_left_to_next);
91
92       while (n_left_from >= 4 && n_left_to_next >= 2)
93         {
94           u32 bi0, bi1;
95           vlib_buffer_t * b0, * b1;
96           gre_header_t * h0, * h1;
97           u16 version0, version1;
98           int verr0, verr1;
99           u32 i0, i1, next0, next1, protocol0, protocol1;
100           ip4_header_t *ip0, *ip1;
101
102           /* Prefetch next iteration. */
103           {
104             vlib_buffer_t * p2, * p3;
105
106             p2 = vlib_get_buffer (vm, from[2]);
107             p3 = vlib_get_buffer (vm, from[3]);
108
109             vlib_prefetch_buffer_header (p2, LOAD);
110             vlib_prefetch_buffer_header (p3, LOAD);
111
112             CLIB_PREFETCH (p2->data, sizeof (h0[0]), LOAD);
113             CLIB_PREFETCH (p3->data, sizeof (h1[0]), LOAD);
114           }
115
116           bi0 = from[0];
117           bi1 = from[1];
118           to_next[0] = bi0;
119           to_next[1] = bi1;
120           from += 2;
121           to_next += 2;
122           n_left_to_next -= 2;
123           n_left_from -= 2;
124
125           b0 = vlib_get_buffer (vm, bi0);
126           b1 = vlib_get_buffer (vm, bi1);
127
128           /* ip4_local hands us the ip header, not the gre header */
129           ip0 = vlib_buffer_get_current (b0);
130           ip1 = vlib_buffer_get_current (b1);
131
132           /* Save src + dst ip4 address, e.g. for mpls-o-gre */
133           vnet_buffer(b0)->gre.src = ip0->src_address.as_u32;
134           vnet_buffer(b0)->gre.dst = ip0->dst_address.as_u32;
135           vnet_buffer(b1)->gre.src = ip1->src_address.as_u32;
136           vnet_buffer(b1)->gre.dst = ip1->dst_address.as_u32;
137
138           vlib_buffer_advance (b0, sizeof (*ip0));
139           vlib_buffer_advance (b1, sizeof (*ip1));
140
141           h0 = vlib_buffer_get_current (b0);
142           h1 = vlib_buffer_get_current (b1);
143
144           /* Index sparse array with network byte order. */
145           protocol0 = h0->protocol;
146           protocol1 = h1->protocol;
147           sparse_vec_index2 (rt->next_by_protocol, protocol0, protocol1,
148                              &i0, &i1);
149           next0 = vec_elt(rt->next_by_protocol, i0);
150           next1 = vec_elt(rt->next_by_protocol, i1);
151
152           b0->error = node->errors[i0 == SPARSE_VEC_INVALID_INDEX ? GRE_ERROR_UNKNOWN_PROTOCOL : GRE_ERROR_NONE];
153           b1->error = node->errors[i1 == SPARSE_VEC_INVALID_INDEX ? GRE_ERROR_UNKNOWN_PROTOCOL : GRE_ERROR_NONE];
154           
155           version0 = clib_net_to_host_u16 (h0->flags_and_version);
156           verr0 =  version0 & GRE_VERSION_MASK;
157           version1 = clib_net_to_host_u16 (h1->flags_and_version);
158           verr1 =  version1 & GRE_VERSION_MASK;
159
160           b0->error = verr0 ? node->errors[GRE_ERROR_UNSUPPORTED_VERSION]
161               : b0->error;
162           next0 = verr0 ? GRE_INPUT_NEXT_DROP : next0;
163           b1->error = verr1 ? node->errors[GRE_ERROR_UNSUPPORTED_VERSION]
164               : b1->error;
165           next1 = verr1 ? GRE_INPUT_NEXT_DROP : next1;
166
167
168           /* RPF check for ip4/ip6 input */
169           if (PREDICT_TRUE(next0 == GRE_INPUT_NEXT_IP4_INPUT
170                            || next0 == GRE_INPUT_NEXT_IP6_INPUT
171                            || next0 == GRE_INPUT_NEXT_ETHERNET_INPUT
172                            || next0 == GRE_INPUT_NEXT_MPLS_INPUT))
173             {
174               u64 key = ((u64)(vnet_buffer(b0)->gre.dst) << 32) |
175                          (u64)(vnet_buffer(b0)->gre.src);
176
177               if (cached_tunnel_key != key)
178                 {
179                   vnet_hw_interface_t * hi;
180                   gre_tunnel_t * t;
181                   uword * p;
182
183                   p = hash_get (gm->tunnel_by_key, key);
184                   if (!p)
185                     {
186                       next0 = GRE_INPUT_NEXT_DROP;
187                       b0->error = node->errors[GRE_ERROR_NO_SUCH_TUNNEL];
188                       goto drop0;
189                     }
190                   t = pool_elt_at_index (gm->tunnels, p[0]);
191                   hi = vnet_get_hw_interface (gm->vnet_main,
192                             t->hw_if_index);
193                   tunnel_sw_if_index = hi->sw_if_index;
194
195                   cached_tunnel_sw_if_index = tunnel_sw_if_index;
196                 }
197               else
198                 {
199                   tunnel_sw_if_index = cached_tunnel_sw_if_index;
200                 }
201             }
202           else
203             {
204                 next0 = GRE_INPUT_NEXT_DROP;
205                 goto drop0;
206             }
207           len = vlib_buffer_length_in_chain (vm, b0);
208           vlib_increment_combined_counter (im->combined_sw_if_counters
209                                            + VNET_INTERFACE_COUNTER_RX,
210                                            cpu_index,
211                                            tunnel_sw_if_index,
212                                            1 /* packets */,
213                                            len /* bytes */);
214
215           vnet_buffer(b0)->sw_if_index[VLIB_RX] = tunnel_sw_if_index;
216
217 drop0:
218           if (PREDICT_TRUE(next1 == GRE_INPUT_NEXT_IP4_INPUT
219                            || next1 == GRE_INPUT_NEXT_IP6_INPUT
220                            || next1 == GRE_INPUT_NEXT_ETHERNET_INPUT
221                            || next1 == GRE_INPUT_NEXT_MPLS_INPUT))
222             {
223               u64 key = ((u64)(vnet_buffer(b1)->gre.dst) << 32) |
224                          (u64)(vnet_buffer(b1)->gre.src);
225
226               if (cached_tunnel_key != key)
227                 {
228                   vnet_hw_interface_t * hi;
229                   gre_tunnel_t * t;
230                   uword * p;
231
232                   p = hash_get (gm->tunnel_by_key, key);
233                   if (!p)
234                     {
235                       next1 = GRE_INPUT_NEXT_DROP;
236                       b1->error = node->errors[GRE_ERROR_NO_SUCH_TUNNEL];
237                       goto drop1;
238                     }
239                   t = pool_elt_at_index (gm->tunnels, p[0]);
240                   hi = vnet_get_hw_interface (gm->vnet_main,
241                             t->hw_if_index);
242                   tunnel_sw_if_index = hi->sw_if_index;
243
244                   cached_tunnel_sw_if_index = tunnel_sw_if_index;
245                 }
246               else
247                 {
248                   tunnel_sw_if_index = cached_tunnel_sw_if_index;
249                 }
250             }
251           else
252             {
253                 next1 = GRE_INPUT_NEXT_DROP;
254                 goto drop1;
255             }
256           len = vlib_buffer_length_in_chain (vm, b1);
257           vlib_increment_combined_counter (im->combined_sw_if_counters
258                                            + VNET_INTERFACE_COUNTER_RX,
259                                            cpu_index,
260                                            tunnel_sw_if_index,
261                                            1 /* packets */,
262                                            len /* bytes */);
263
264           vnet_buffer(b1)->sw_if_index[VLIB_RX] = tunnel_sw_if_index;
265
266 drop1:
267           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
268             {
269               gre_rx_trace_t *tr = vlib_add_trace (vm, node,
270                                                    b0, sizeof (*tr));
271               tr->tunnel_id = tunnel_sw_if_index;
272               tr->length = ip0->length;
273               tr->src.as_u32 = ip0->src_address.as_u32;
274               tr->dst.as_u32 = ip0->dst_address.as_u32;
275             }
276
277           if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
278             {
279               gre_rx_trace_t *tr = vlib_add_trace (vm, node,
280                                                    b1, sizeof (*tr));
281               tr->tunnel_id = tunnel_sw_if_index;
282               tr->length = ip1->length;
283               tr->src.as_u32 = ip1->src_address.as_u32;
284               tr->dst.as_u32 = ip1->dst_address.as_u32;
285             }
286
287           vlib_buffer_advance (b0, sizeof (*h0));
288           vlib_buffer_advance (b1, sizeof (*h1));
289
290           vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
291                                            to_next, n_left_to_next,
292                                            bi0, bi1, next0, next1);
293         }
294     
295       while (n_left_from > 0 && n_left_to_next > 0)
296         {
297           u32 bi0;
298           vlib_buffer_t * b0;
299           gre_header_t * h0;
300           ip4_header_t * ip0;
301           u16 version0;
302           int verr0;
303           u32 i0, next0;
304
305           bi0 = from[0];
306           to_next[0] = bi0;
307           from += 1;
308           to_next += 1;
309           n_left_from -= 1;
310           n_left_to_next -= 1;
311
312           b0 = vlib_get_buffer (vm, bi0);
313           ip0 = vlib_buffer_get_current (b0);
314
315           vnet_buffer(b0)->gre.src = ip0->src_address.as_u32;
316           vnet_buffer(b0)->gre.dst = ip0->dst_address.as_u32;
317
318           vlib_buffer_advance (b0, sizeof (*ip0));
319
320           h0 = vlib_buffer_get_current (b0);
321
322           i0 = sparse_vec_index (rt->next_by_protocol, h0->protocol);
323           next0 = vec_elt(rt->next_by_protocol, i0);
324
325           b0->error = 
326               node->errors[i0 == SPARSE_VEC_INVALID_INDEX 
327                            ? GRE_ERROR_UNKNOWN_PROTOCOL : GRE_ERROR_NONE];
328           
329           version0 = clib_net_to_host_u16 (h0->flags_and_version);
330           verr0 =  version0 & GRE_VERSION_MASK;
331           b0->error = verr0 ? node->errors[GRE_ERROR_UNSUPPORTED_VERSION] 
332               : b0->error;
333           next0 = verr0 ? GRE_INPUT_NEXT_DROP : next0;
334
335
336           /* For IP payload we need to find source interface
337              so we can increase counters and help forward node to
338              pick right FIB */
339           /* RPF check for ip4/ip6 input */
340           if (PREDICT_TRUE(next0 == GRE_INPUT_NEXT_IP4_INPUT
341                            || next0 == GRE_INPUT_NEXT_IP6_INPUT
342                            || next0 == GRE_INPUT_NEXT_ETHERNET_INPUT
343                            || next0 == GRE_INPUT_NEXT_MPLS_INPUT))
344             {
345               u64 key = ((u64)(vnet_buffer(b0)->gre.dst) << 32) |
346                          (u64)(vnet_buffer(b0)->gre.src);
347
348               if (cached_tunnel_key != key)
349                 {
350                   vnet_hw_interface_t * hi;
351                   gre_tunnel_t * t;
352                   uword * p;
353
354                   p = hash_get (gm->tunnel_by_key, key);
355                   if (!p)
356                     {
357                       next0 = GRE_INPUT_NEXT_DROP;
358                       b0->error = node->errors[GRE_ERROR_NO_SUCH_TUNNEL];
359                       goto drop;
360                     }
361                   t = pool_elt_at_index (gm->tunnels, p[0]);
362                   hi = vnet_get_hw_interface (gm->vnet_main,
363                             t->hw_if_index);
364                   tunnel_sw_if_index = hi->sw_if_index;
365
366                   cached_tunnel_sw_if_index = tunnel_sw_if_index;
367                 }
368               else
369                 {
370                   tunnel_sw_if_index = cached_tunnel_sw_if_index;
371                 }
372             }
373           else
374             {
375                 next0 = GRE_INPUT_NEXT_DROP;
376                 goto drop;
377             }
378           len = vlib_buffer_length_in_chain (vm, b0);
379           vlib_increment_combined_counter (im->combined_sw_if_counters
380                                            + VNET_INTERFACE_COUNTER_RX,
381                                            cpu_index,
382                                            tunnel_sw_if_index,
383                                            1 /* packets */,
384                                            len /* bytes */);
385
386           vnet_buffer(b0)->sw_if_index[VLIB_RX] = tunnel_sw_if_index;
387
388 drop:
389           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) 
390             {
391               gre_rx_trace_t *tr = vlib_add_trace (vm, node, 
392                                                    b0, sizeof (*tr));
393               tr->tunnel_id = tunnel_sw_if_index;
394               tr->length = ip0->length;
395               tr->src.as_u32 = ip0->src_address.as_u32;
396               tr->dst.as_u32 = ip0->dst_address.as_u32;
397             }
398
399           vlib_buffer_advance (b0, sizeof (*h0));
400
401           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
402                                            to_next, n_left_to_next,
403                                            bi0, next0);
404         }
405
406       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
407     }
408   vlib_node_increment_counter (vm, gre_input_node.index,
409                                GRE_ERROR_PKTS_DECAP, from_frame->n_vectors);
410   return from_frame->n_vectors;
411 }
412
413 static char * gre_error_strings[] = {
414 #define gre_error(n,s) s,
415 #include "error.def"
416 #undef gre_error
417 };
418
419 VLIB_REGISTER_NODE (gre_input_node) = {
420   .function = gre_input,
421   .name = "gre-input",
422   /* Takes a vector of packets. */
423   .vector_size = sizeof (u32),
424
425   .runtime_data_bytes = sizeof (gre_input_runtime_t),
426
427   .n_errors = GRE_N_ERROR,
428   .error_strings = gre_error_strings,
429
430   .n_next_nodes = GRE_INPUT_N_NEXT,
431   .next_nodes = {
432 #define _(s,n) [GRE_INPUT_NEXT_##s] = n,
433     foreach_gre_input_next
434 #undef _
435   },
436
437   .format_buffer = format_gre_header_with_length,
438   .format_trace = format_gre_rx_trace,
439   .unformat_buffer = unformat_gre_header,
440 };
441
442 VLIB_NODE_FUNCTION_MULTIARCH (gre_input_node, gre_input)
443
444 void
445 gre_register_input_protocol (vlib_main_t * vm,
446                              gre_protocol_t protocol,
447                              u32 node_index)
448 {
449   gre_main_t * em = &gre_main;
450   gre_protocol_info_t * pi;
451   gre_input_runtime_t * rt;
452   u16 * n;
453
454   {
455     clib_error_t * error = vlib_call_init_function (vm, gre_input_init);
456     if (error)
457       clib_error_report (error);
458   }
459
460   pi = gre_get_protocol_info (em, protocol);
461   pi->node_index = node_index;
462   pi->next_index = vlib_node_add_next (vm, 
463                                        gre_input_node.index,
464                                        node_index);
465
466   /* Setup gre protocol -> next index sparse vector mapping. */
467   rt = vlib_node_get_runtime_data (vm, gre_input_node.index);
468   n = sparse_vec_validate (rt->next_by_protocol, 
469                            clib_host_to_net_u16 (protocol));
470   n[0] = pi->next_index;
471 }
472
473 static void
474 gre_setup_node (vlib_main_t * vm, u32 node_index)
475 {
476   vlib_node_t * n = vlib_get_node (vm, node_index);
477   pg_node_t * pn = pg_get_node (node_index);
478
479   n->format_buffer = format_gre_header_with_length;
480   n->unformat_buffer = unformat_gre_header;
481   pn->unformat_edit = unformat_pg_gre_header;
482 }
483
484 static clib_error_t * gre_input_init (vlib_main_t * vm)
485 {
486   gre_input_runtime_t * rt;
487   vlib_node_t *ethernet_input, *ip4_input, *ip6_input, *mpls_unicast_input;
488
489   {
490     clib_error_t * error; 
491     error = vlib_call_init_function (vm, gre_init);
492     if (error)
493       clib_error_report (error);
494   }
495
496   gre_setup_node (vm, gre_input_node.index);
497
498   rt = vlib_node_get_runtime_data (vm, gre_input_node.index);
499
500   rt->next_by_protocol = sparse_vec_new
501     (/* elt bytes */ sizeof (rt->next_by_protocol[0]),
502      /* bits in index */ BITS (((gre_header_t *) 0)->protocol));
503
504   /* These could be moved to the supported protocol input node defn's */
505   ethernet_input = vlib_get_node_by_name (vm, (u8 *)"ethernet-input");
506   ASSERT(ethernet_input);
507   ip4_input = vlib_get_node_by_name (vm, (u8 *)"ip4-input");
508   ASSERT(ip4_input);
509   ip6_input = vlib_get_node_by_name (vm, (u8 *)"ip6-input");
510   ASSERT(ip6_input);
511   mpls_unicast_input = vlib_get_node_by_name (vm, (u8 *)"mpls-input");
512   ASSERT(mpls_unicast_input);
513
514   gre_register_input_protocol (vm, GRE_PROTOCOL_teb,
515                                ethernet_input->index);
516
517   gre_register_input_protocol (vm, GRE_PROTOCOL_ip4, 
518                                ip4_input->index);
519
520   gre_register_input_protocol (vm, GRE_PROTOCOL_ip6, 
521                                ip6_input->index);
522
523   gre_register_input_protocol (vm, GRE_PROTOCOL_mpls_unicast,
524                                mpls_unicast_input->index);
525
526   ip4_register_protocol (IP_PROTOCOL_GRE, gre_input_node.index);
527
528   return 0;
529 }
530
531 VLIB_INIT_FUNCTION (gre_input_init);