Repair Doxygen build infrastructure
[vpp.git] / src / plugins / ioam / lib-vxlan-gpe / ioam_decap.c
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vppinfra/error.h>
16 #include <vppinfra/hash.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/ip.h>
19 #include <vnet/ethernet/ethernet.h>
20 #include <vnet/vxlan-gpe/vxlan_gpe.h>
21 #include <vnet/vxlan-gpe/vxlan_gpe.h>
22 #include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_packet.h>
23 #include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>
24 #include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam_util.h>
25
26 /* Statistics (not really errors) */
27 #define foreach_vxlan_gpe_decap_ioam_v4_error    \
28 _(DECAPSULATED, "good packets decapsulated")
29
30 static char *vxlan_gpe_decap_ioam_v4_error_strings[] = {
31 #define _(sym,string) string,
32   foreach_vxlan_gpe_decap_ioam_v4_error
33 #undef _
34 };
35
36 typedef enum
37 {
38 #define _(sym,str) VXLAN_GPE_DECAP_IOAM_V4_ERROR_##sym,
39   foreach_vxlan_gpe_decap_ioam_v4_error
40 #undef _
41     VXLAN_GPE_DECAP_IOAM_V4_N_ERROR,
42 } vxlan_gpe_decap_ioam_v4_error_t;
43
44
45 always_inline void
46 vxlan_gpe_decap_ioam_v4_two_inline (vlib_main_t * vm,
47                                     vlib_node_runtime_t * node,
48                                     vxlan_gpe_main_t * ngm,
49                                     vlib_buffer_t * b0, vlib_buffer_t * b1,
50                                     u32 * next0, u32 * next1)
51 {
52   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
53
54   next0[0] = next1[0] = hm->decap_v4_next_override;
55   vxlan_gpe_encap_decap_ioam_v4_one_inline (vm, node, b0, &next0[0],
56                                             VXLAN_GPE_DECAP_IOAM_V4_NEXT_DROP,
57                                             0 /* use_adj */ );
58   vxlan_gpe_encap_decap_ioam_v4_one_inline (vm, node, b1, &next0[1],
59                                             VXLAN_GPE_DECAP_IOAM_V4_NEXT_DROP,
60                                             0 /* use_adj */ );
61 }
62
63
64
65 static uword
66 vxlan_gpe_decap_ioam (vlib_main_t * vm,
67                       vlib_node_runtime_t * node,
68                       vlib_frame_t * from_frame, u8 is_ipv6)
69 {
70   u32 n_left_from, next_index, *from, *to_next;
71   vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
72   vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;
73
74   from = vlib_frame_vector_args (from_frame);
75   n_left_from = from_frame->n_vectors;
76
77   next_index = node->cached_next_index;
78
79   while (n_left_from > 0)
80     {
81       u32 n_left_to_next;
82
83       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
84
85       while (n_left_from >= 4 && n_left_to_next >= 2)
86         {
87           u32 bi0, bi1;
88           vlib_buffer_t *b0, *b1;
89           u32 next0, next1;
90
91           next0 = next1 = hm->decap_v4_next_override;
92
93           /* Prefetch next iteration. */
94           {
95             vlib_buffer_t *p2, *p3;
96
97             p2 = vlib_get_buffer (vm, from[2]);
98             p3 = vlib_get_buffer (vm, from[3]);
99
100             vlib_prefetch_buffer_header (p2, LOAD);
101             vlib_prefetch_buffer_header (p3, LOAD);
102
103             CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
104             CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
105           }
106
107           bi0 = from[0];
108           bi1 = from[1];
109           to_next[0] = bi0;
110           to_next[1] = bi1;
111           from += 2;
112           to_next += 2;
113           n_left_to_next -= 2;
114           n_left_from -= 2;
115
116           b0 = vlib_get_buffer (vm, bi0);
117           b1 = vlib_get_buffer (vm, bi1);
118
119
120           vlib_buffer_advance (b0,
121                                -(word) (sizeof (udp_header_t) +
122                                         sizeof (ip4_header_t) +
123                                         sizeof (vxlan_gpe_header_t)));
124           vlib_buffer_advance (b1,
125                                -(word) (sizeof (udp_header_t) +
126                                         sizeof (ip4_header_t) +
127                                         sizeof (vxlan_gpe_header_t)));
128
129           vxlan_gpe_decap_ioam_v4_two_inline (vm, node, ngm, b0, b1,
130                                               &next0, &next1);
131
132
133           vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
134                                            n_left_to_next, bi0, bi1, next0,
135                                            next1);
136
137           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
138             {
139               vxlan_gpe_ioam_v4_trace_t *tr = vlib_add_trace (vm, node, b0,
140                                                               sizeof (*tr));
141             }
142         }
143
144       while (n_left_from > 0 && n_left_to_next > 0)
145         {
146           u32 bi0;
147           vlib_buffer_t *b0;
148           u32 next0 = hm->decap_v4_next_override;
149
150           bi0 = from[0];
151           to_next[0] = bi0;
152           from += 1;
153           to_next += 1;
154           n_left_from -= 1;
155           n_left_to_next -= 1;
156
157           b0 = vlib_get_buffer (vm, bi0);
158
159
160           vlib_buffer_advance (b0,
161                                -(word) (sizeof (udp_header_t) +
162                                         sizeof (ip4_header_t) +
163                                         sizeof (vxlan_gpe_header_t)));
164
165           next0 = hm->decap_v4_next_override;
166           vxlan_gpe_encap_decap_ioam_v4_one_inline (vm, node, b0,
167                                                     &next0,
168                                                     VXLAN_GPE_DECAP_IOAM_V4_NEXT_DROP,
169                                                     0 /* use_adj */ );
170
171           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
172             {
173               vxlan_gpe_ioam_v4_trace_t *tr = vlib_add_trace (vm, node, b0,
174                                                               sizeof (*tr));
175             }
176           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
177                                            n_left_to_next, bi0, next0);
178         }
179
180       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
181     }
182
183   return from_frame->n_vectors;
184 }
185
186
187 static uword
188 vxlan_gpe_decap_ioam_v4 (vlib_main_t * vm,
189                          vlib_node_runtime_t * node,
190                          vlib_frame_t * from_frame)
191 {
192   return vxlan_gpe_decap_ioam (vm, node, from_frame, 0);
193 }
194
195
196 /* *INDENT-OFF* */
197 VLIB_REGISTER_NODE (vxlan_gpe_decap_ioam_v4_node) = {
198   .function = vxlan_gpe_decap_ioam_v4,
199   .name = "vxlan-gpe-decap-ioam-v4",
200   .vector_size = sizeof (u32),
201   .format_trace = format_vxlan_gpe_ioam_v4_trace,
202   .type = VLIB_NODE_TYPE_INTERNAL,
203
204   .n_errors = ARRAY_LEN(vxlan_gpe_decap_ioam_v4_error_strings),
205   .error_strings = vxlan_gpe_decap_ioam_v4_error_strings,
206
207   .n_next_nodes = VXLAN_GPE_DECAP_IOAM_V4_N_NEXT,
208
209   .next_nodes = {
210     [VXLAN_GPE_DECAP_IOAM_V4_NEXT_POP] = "vxlan-gpe-pop-ioam-v4",
211     [VXLAN_GPE_DECAP_IOAM_V4_NEXT_DROP] = "error-drop",
212   },
213 };
214 /* *INDENT-ON* */
215
216
217 /*
218  * fd.io coding-style-patch-verification: ON
219  *
220  * Local Variables:
221  * eval: (c-set-style "gnu")
222  * End:
223  */