ARP un-unmbered called when no interfaces are unnumbered
[vpp.git] / vnet / vnet / vxlan / decap.c
1 /*
2  * decap.c: vxlan tunnel decap packet processing
3  *
4  * Copyright (c) 2013 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/vxlan/vxlan.h>
21
22 vlib_node_registration_t vxlan4_input_node;
23 vlib_node_registration_t vxlan6_input_node;
24
25 typedef struct {
26   u32 next_index;
27   u32 tunnel_index;
28   u32 error;
29   u32 vni;
30 } vxlan_rx_trace_t;
31
32 static u8 * format_vxlan_rx_trace (u8 * s, va_list * args)
33 {
34   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
35   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
36   vxlan_rx_trace_t * t = va_arg (*args, vxlan_rx_trace_t *);
37
38   if (t->tunnel_index != ~0)
39     {
40       s = format (s, "VXLAN decap from vxlan_tunnel%d vni %d next %d error %d",
41                   t->tunnel_index, t->vni, t->next_index, t->error);
42     }
43   else
44     {
45       s = format (s, "VXLAN decap error - tunnel for vni %d does not exist", 
46                   t->vni);
47     }
48   return s;
49 }
50
51 always_inline uword
52 vxlan_input (vlib_main_t * vm,
53              vlib_node_runtime_t * node,
54              vlib_frame_t * from_frame,
55              char is_ip4)
56 {
57   u32 n_left_from, next_index, * from, * to_next;
58   vxlan_main_t * vxm = &vxlan_main;
59   vnet_main_t * vnm = vxm->vnet_main;
60   vnet_interface_main_t * im = &vnm->interface_main;
61   u32 last_tunnel_index = ~0;
62   vxlan4_tunnel_key_t last_key4;
63   vxlan6_tunnel_key_t last_key6;
64   u32 pkts_decapsulated = 0;
65   u32 cpu_index = os_get_cpu_number();
66   u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
67
68   if (is_ip4)
69     last_key4.as_u64 = ~0;
70   else
71     memset (&last_key6, 0xff, sizeof (last_key6));
72
73   from = vlib_frame_vector_args (from_frame);
74   n_left_from = from_frame->n_vectors;
75
76   next_index = node->cached_next_index;
77   stats_sw_if_index = node->runtime_data[0];
78   stats_n_packets = stats_n_bytes = 0;
79
80   while (n_left_from > 0)
81     {
82       u32 n_left_to_next;
83
84       vlib_get_next_frame (vm, node, next_index,
85                            to_next, n_left_to_next);
86       while (n_left_from >= 4 && n_left_to_next >= 2)
87         {
88           u32 bi0, bi1;
89           vlib_buffer_t * b0, * b1;
90           u32 next0, next1;
91           ip4_header_t * ip4_0, * ip4_1;
92           ip6_header_t * ip6_0, * ip6_1;
93           vxlan_header_t * vxlan0, * vxlan1;
94           uword * p0, * p1;
95           u32 tunnel_index0, tunnel_index1;
96           vxlan_tunnel_t * t0, * t1;
97           vxlan4_tunnel_key_t key4_0, key4_1;
98           vxlan6_tunnel_key_t key6_0, key6_1;
99           u32 error0, error1;
100           u32 sw_if_index0, sw_if_index1, len0, len1;
101
102           /* Prefetch next iteration. */
103           {
104             vlib_buffer_t * p2, * p3;
105
106             p2 = vlib_get_buffer (vm, from[2]);
107             p3 = vlib_get_buffer (vm, from[3]);
108
109             vlib_prefetch_buffer_header (p2, LOAD);
110             vlib_prefetch_buffer_header (p3, LOAD);
111
112             CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
113             CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
114           }
115
116           bi0 = from[0];
117           bi1 = from[1];
118           to_next[0] = bi0;
119           to_next[1] = bi1;
120           from += 2;
121           to_next += 2;
122           n_left_to_next -= 2;
123           n_left_from -= 2;
124
125           b0 = vlib_get_buffer (vm, bi0);
126           b1 = vlib_get_buffer (vm, bi1);
127
128           /* udp leaves current_data pointing at the vxlan header */
129           vxlan0 = vlib_buffer_get_current (b0);
130           vxlan1 = vlib_buffer_get_current (b1);
131
132           next0 = next1 = VXLAN_INPUT_NEXT_L2_INPUT;
133
134           if (is_ip4) {
135             vlib_buffer_advance
136               (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
137             vlib_buffer_advance
138               (b1, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
139             ip4_0 = vlib_buffer_get_current (b0);
140             ip4_1 = vlib_buffer_get_current (b1);
141           } else {
142             vlib_buffer_advance
143               (b0, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
144             vlib_buffer_advance
145               (b1, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
146             ip6_0 = vlib_buffer_get_current (b0);
147             ip6_1 = vlib_buffer_get_current (b1);
148           }
149
150           /* pop (ip, udp, vxlan) */
151           if (is_ip4) {
152             vlib_buffer_advance
153               (b0, sizeof(*ip4_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
154             vlib_buffer_advance
155               (b1, sizeof(*ip4_1)+sizeof(udp_header_t)+sizeof(*vxlan1));
156           } else {
157             vlib_buffer_advance
158               (b0, sizeof(*ip6_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
159             vlib_buffer_advance
160               (b1, sizeof(*ip6_1)+sizeof(udp_header_t)+sizeof(*vxlan1));
161           }
162
163           tunnel_index0 = ~0;
164           error0 = 0;
165
166           tunnel_index1 = ~0;
167           error1 = 0;
168
169           if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
170             {
171               error0 = VXLAN_ERROR_BAD_FLAGS;
172               next0 = VXLAN_INPUT_NEXT_DROP;
173               goto trace0;
174             }
175
176           if (is_ip4) {
177             key4_0.src = ip4_0->src_address.as_u32;
178             key4_0.vni = vxlan0->vni_reserved;
179
180             if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
181               {
182                 p0 = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
183
184                 if (p0 == 0)
185                   {
186                     error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
187                     next0 = VXLAN_INPUT_NEXT_DROP;
188                     goto trace0;
189                   }
190
191                 last_key4.as_u64 = key4_0.as_u64;
192                 tunnel_index0 = last_tunnel_index = p0[0];
193               }
194             else
195               tunnel_index0 = last_tunnel_index;
196           } else /* !is_ip4 */ {
197             key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
198             key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
199             key6_0.vni = vxlan0->vni_reserved;
200
201             if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
202               {
203                 p0 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_0);
204
205                 if (p0 == 0)
206                   {
207                     error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
208                     next0 = VXLAN_INPUT_NEXT_DROP;
209                     goto trace0;
210                   }
211
212                 clib_memcpy (&last_key6, &key6_0, sizeof(key6_0));
213                 tunnel_index0 = last_tunnel_index = p0[0];
214               }
215             else
216               tunnel_index0 = last_tunnel_index;
217           }
218
219           t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
220
221           sw_if_index0 = t0->sw_if_index;
222           len0 = vlib_buffer_length_in_chain (vm, b0);
223
224           /* Required to make the l2 tag push / pop code work on l2 subifs */
225           if (PREDICT_TRUE(next0 == VXLAN_INPUT_NEXT_L2_INPUT))
226             vnet_update_l2_len (b0);
227
228           /* Set input sw_if_index to VXLAN tunnel for learning */
229           vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
230
231           pkts_decapsulated ++;
232           stats_n_packets += 1;
233           stats_n_bytes += len0;
234
235           /* Batch stats increment on the same vxlan tunnel so counter
236              is not incremented per packet */
237           if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index)) 
238             {
239               stats_n_packets -= 1;
240               stats_n_bytes -= len0;
241               if (stats_n_packets)
242                 vlib_increment_combined_counter 
243                   (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
244                    cpu_index, stats_sw_if_index, 
245                    stats_n_packets, stats_n_bytes);
246               stats_n_packets = 1;
247               stats_n_bytes = len0;
248               stats_sw_if_index = sw_if_index0;
249             }
250
251         trace0:
252           b0->error = error0 ? node->errors[error0] : 0;
253
254           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) 
255             {
256               vxlan_rx_trace_t *tr 
257                 = vlib_add_trace (vm, node, b0, sizeof (*tr));
258               tr->next_index = next0;
259               tr->error = error0;
260               tr->tunnel_index = tunnel_index0;
261               tr->vni = vnet_get_vni (vxlan0);
262             }
263
264           if (PREDICT_FALSE (vxlan1->flags != VXLAN_FLAGS_I))
265             {
266               error1 = VXLAN_ERROR_BAD_FLAGS;
267               next1 = VXLAN_INPUT_NEXT_DROP;
268               goto trace1;
269             }
270
271           if (is_ip4) {
272             key4_1.src = ip4_1->src_address.as_u32;
273             key4_1.vni = vxlan1->vni_reserved;
274
275             if (PREDICT_FALSE (key4_1.as_u64 != last_key4.as_u64))
276               {
277                 p1 = hash_get (vxm->vxlan4_tunnel_by_key, key4_1.as_u64);
278
279                 if (p1 == 0)
280                   {
281                     error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
282                     next1 = VXLAN_INPUT_NEXT_DROP;
283                     goto trace1;
284                   }
285
286                 last_key4.as_u64 = key4_1.as_u64;
287                 tunnel_index1 = last_tunnel_index = p1[0];
288               }
289             else
290               tunnel_index1 = last_tunnel_index;
291           } else /* !is_ip4 */ {
292             key6_1.src.as_u64[0] = ip6_1->src_address.as_u64[0];
293             key6_1.src.as_u64[1] = ip6_1->src_address.as_u64[1];
294             key6_1.vni = vxlan1->vni_reserved;
295
296             if (PREDICT_FALSE (memcmp(&key6_1, &last_key6, sizeof(last_key6)) != 0))
297               {
298                 p1 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_1);
299
300                 if (p1 == 0)
301                   {
302                     error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
303                     next1 = VXLAN_INPUT_NEXT_DROP;
304                     goto trace1;
305                   }
306
307                 clib_memcpy (&last_key6, &key6_1, sizeof(key6_1));
308                 tunnel_index1 = last_tunnel_index = p1[0];
309               }
310             else
311               tunnel_index1 = last_tunnel_index;
312           }
313
314           t1 = pool_elt_at_index (vxm->tunnels, tunnel_index1);
315
316           sw_if_index1 = t1->sw_if_index;
317           len1 = vlib_buffer_length_in_chain (vm, b1);
318
319           /* Required to make the l2 tag push / pop code work on l2 subifs */
320           if (PREDICT_TRUE(next1 == VXLAN_INPUT_NEXT_L2_INPUT))
321             vnet_update_l2_len (b1);
322
323           /* Set input sw_if_index to VXLAN tunnel for learning */
324           vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
325
326           pkts_decapsulated ++;
327           stats_n_packets += 1;
328           stats_n_bytes += len1;
329
330           /* Batch stats increment on the same vxlan tunnel so counter
331              is not incremented per packet */
332           if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index)) 
333             {
334               stats_n_packets -= 1;
335               stats_n_bytes -= len1;
336               if (stats_n_packets)
337                 vlib_increment_combined_counter 
338                   (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
339                    cpu_index, stats_sw_if_index, 
340                    stats_n_packets, stats_n_bytes);
341               stats_n_packets = 1;
342               stats_n_bytes = len1;
343               stats_sw_if_index = sw_if_index1;
344             }
345
346         trace1:
347           b1->error = error1 ? node->errors[error1] : 0;
348
349           if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) 
350             {
351               vxlan_rx_trace_t *tr 
352                 = vlib_add_trace (vm, node, b1, sizeof (*tr));
353               tr->next_index = next1;
354               tr->error = error1;
355               tr->tunnel_index = tunnel_index1;
356               tr->vni = vnet_get_vni (vxlan1);
357             }
358
359           vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
360                                            to_next, n_left_to_next,
361                                            bi0, bi1, next0, next1);
362         }
363
364       while (n_left_from > 0 && n_left_to_next > 0)
365         {
366           u32 bi0;
367           vlib_buffer_t * b0;
368           u32 next0;
369           ip4_header_t * ip4_0;
370           ip6_header_t * ip6_0;
371           vxlan_header_t * vxlan0;
372           uword * p0;
373           u32 tunnel_index0;
374           vxlan_tunnel_t * t0;
375           vxlan4_tunnel_key_t key4_0;
376           vxlan6_tunnel_key_t key6_0;
377           u32 error0;
378           u32 sw_if_index0, len0;
379
380           bi0 = from[0];
381           to_next[0] = bi0;
382           from += 1;
383           to_next += 1;
384           n_left_from -= 1;
385           n_left_to_next -= 1;
386
387           b0 = vlib_get_buffer (vm, bi0);
388
389           /* udp leaves current_data pointing at the vxlan header */
390           vxlan0 = vlib_buffer_get_current (b0);
391
392           next0 = VXLAN_INPUT_NEXT_L2_INPUT;
393
394           if (is_ip4) {
395             vlib_buffer_advance
396               (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
397             ip4_0 = vlib_buffer_get_current (b0);
398           } else {
399             vlib_buffer_advance
400               (b0, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
401             ip6_0 = vlib_buffer_get_current (b0);
402           }
403
404           /* pop (ip, udp, vxlan) */
405           if (is_ip4) {
406             vlib_buffer_advance
407               (b0, sizeof(*ip4_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
408           } else {
409             vlib_buffer_advance
410               (b0, sizeof(*ip6_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
411           }
412
413           tunnel_index0 = ~0;
414           error0 = 0;
415
416           if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
417             {
418               error0 = VXLAN_ERROR_BAD_FLAGS;
419               next0 = VXLAN_INPUT_NEXT_DROP;
420               goto trace00;
421             }
422
423           if (is_ip4) {
424             key4_0.src = ip4_0->src_address.as_u32;
425             key4_0.vni = vxlan0->vni_reserved;
426
427             if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
428               {
429                 p0 = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
430
431                 if (p0 == 0)
432                   {
433                     error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
434                     next0 = VXLAN_INPUT_NEXT_DROP;
435                     goto trace00;
436                   }
437
438                 last_key4.as_u64 = key4_0.as_u64;
439                 tunnel_index0 = last_tunnel_index = p0[0];
440               }
441             else
442               tunnel_index0 = last_tunnel_index;
443           } else /* !is_ip4 */ {
444             key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
445             key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
446             key6_0.vni = vxlan0->vni_reserved;
447
448             if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
449               {
450                 p0 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_0);
451
452                 if (p0 == 0)
453                   {
454                     error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
455                     next0 = VXLAN_INPUT_NEXT_DROP;
456                     goto trace00;
457                   }
458
459                 clib_memcpy (&last_key6, &key6_0, sizeof(key6_0));
460                 tunnel_index0 = last_tunnel_index = p0[0];
461               }
462             else
463               tunnel_index0 = last_tunnel_index;
464           }
465
466           t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
467
468           sw_if_index0 = t0->sw_if_index;
469           len0 = vlib_buffer_length_in_chain (vm, b0);
470
471           /* Required to make the l2 tag push / pop code work on l2 subifs */
472           if (PREDICT_TRUE(next0 == VXLAN_INPUT_NEXT_L2_INPUT))
473             vnet_update_l2_len (b0);
474
475           /* Set input sw_if_index to VXLAN tunnel for learning */
476           vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
477
478           pkts_decapsulated ++;
479           stats_n_packets += 1;
480           stats_n_bytes += len0;
481
482           /* Batch stats increment on the same vxlan tunnel so counter
483              is not incremented per packet */
484           if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index)) 
485             {
486               stats_n_packets -= 1;
487               stats_n_bytes -= len0;
488               if (stats_n_packets)
489                 vlib_increment_combined_counter 
490                   (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
491                    cpu_index, stats_sw_if_index, 
492                    stats_n_packets, stats_n_bytes);
493               stats_n_packets = 1;
494               stats_n_bytes = len0;
495               stats_sw_if_index = sw_if_index0;
496             }
497
498         trace00:
499           b0->error = error0 ? node->errors[error0] : 0;
500
501           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) 
502             {
503               vxlan_rx_trace_t *tr 
504                 = vlib_add_trace (vm, node, b0, sizeof (*tr));
505               tr->next_index = next0;
506               tr->error = error0;
507               tr->tunnel_index = tunnel_index0;
508               tr->vni = vnet_get_vni (vxlan0);
509             }
510           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
511                                            to_next, n_left_to_next,
512                                            bi0, next0);
513         }
514
515       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
516     }
517   /* Do we still need this now that tunnel tx stats is kept? */
518   vlib_node_increment_counter (vm, is_ip4? 
519                                vxlan4_input_node.index:vxlan6_input_node.index,
520                                VXLAN_ERROR_DECAPSULATED, 
521                                pkts_decapsulated);
522
523   /* Increment any remaining batch stats */
524   if (stats_n_packets)
525     {
526       vlib_increment_combined_counter 
527         (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
528          cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
529       node->runtime_data[0] = stats_sw_if_index;
530     }
531
532   return from_frame->n_vectors;
533 }
534
535 static uword
536 vxlan4_input (vlib_main_t * vm,
537              vlib_node_runtime_t * node,
538              vlib_frame_t * from_frame)
539 {
540         return vxlan_input(vm, node, from_frame, /* is_ip4 */ 1);
541 }
542
543 static uword
544 vxlan6_input (vlib_main_t * vm,
545              vlib_node_runtime_t * node,
546              vlib_frame_t * from_frame)
547 {
548         return vxlan_input(vm, node, from_frame, /* is_ip4 */ 0);
549 }
550
551 static char * vxlan_error_strings[] = {
552 #define vxlan_error(n,s) s,
553 #include <vnet/vxlan/vxlan_error.def>
554 #undef vxlan_error
555 #undef _
556 };
557
558 VLIB_REGISTER_NODE (vxlan4_input_node) = {
559   .function = vxlan4_input,
560   .name = "vxlan4-input",
561   /* Takes a vector of packets. */
562   .vector_size = sizeof (u32),
563
564   .n_errors = VXLAN_N_ERROR,
565   .error_strings = vxlan_error_strings,
566
567   .n_next_nodes = VXLAN_INPUT_N_NEXT,
568   .next_nodes = {
569 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
570     foreach_vxlan_input_next
571 #undef _
572   },
573
574 //temp  .format_buffer = format_vxlan_header,
575   .format_trace = format_vxlan_rx_trace,
576   // $$$$ .unformat_buffer = unformat_vxlan_header,
577 };
578
579 VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_input_node, vxlan4_input)
580
581 VLIB_REGISTER_NODE (vxlan6_input_node) = {
582   .function = vxlan6_input,
583   .name = "vxlan6-input",
584   /* Takes a vector of packets. */
585   .vector_size = sizeof (u32),
586
587   .n_errors = VXLAN_N_ERROR,
588   .error_strings = vxlan_error_strings,
589
590   .n_next_nodes = VXLAN_INPUT_N_NEXT,
591   .next_nodes = {
592 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
593     foreach_vxlan_input_next
594 #undef _
595   },
596
597 //temp  .format_buffer = format_vxlan_header,
598   .format_trace = format_vxlan_rx_trace,
599   // $$$$ .unformat_buffer = unformat_vxlan_header,
600 };
601
602 VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_input_node, vxlan6_input)
603