ip: optimize ip4_header_checksum, take 2
[vpp.git] / src / plugins / adl / ip6_allowlist.c
1 /*
2  * Copyright (c) 2016,2020 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <plugins/adl/adl.h>
16 #include <vnet/fib/ip6_fib.h>
17 #include <vnet/dpo/load_balance.h>
18
19 typedef struct {
20   u32 next_index;
21   u32 sw_if_index;
22 } ip6_adl_allowlist_trace_t;
23
24 /* packet trace format function */
25 static u8 * format_ip6_adl_allowlist_trace (u8 * s, va_list * args)
26 {
27   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
28   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
29   ip6_adl_allowlist_trace_t * t = va_arg (*args, ip6_adl_allowlist_trace_t *);
30
31   s = format (s, "IP6_ADL_ALLOWLIST: sw_if_index %d, next index %d",
32               t->sw_if_index, t->next_index);
33   return s;
34 }
35
36 #define foreach_ip6_adl_allowlist_error         \
37 _(ALLOWED, "ip6 allowlist allowed")             \
38 _(DROPPED, "ip6 allowlist dropped")
39
40 typedef enum {
41 #define _(sym,str) IP6_ADL_ALLOWLIST_ERROR_##sym,
42   foreach_ip6_adl_allowlist_error
43 #undef _
44   IP6_ADL_ALLOWLIST_N_ERROR,
45 } ip6_adl_allowlist_error_t;
46
47 static char * ip6_adl_allowlist_error_strings[] = {
48 #define _(sym,string) string,
49   foreach_ip6_adl_allowlist_error
50 #undef _
51 };
52
53 VLIB_NODE_FN (ip6_adl_allowlist_node) (vlib_main_t * vm,
54                   vlib_node_runtime_t * node,
55                   vlib_frame_t * frame)
56 {
57   u32 n_left_from, * from, * to_next;
58   adl_feature_type_t next_index;
59   adl_main_t *cm = &adl_main;
60   vlib_combined_counter_main_t * vcm = &load_balance_main.lbm_via_counters;
61   u32 thread_index = vm->thread_index;
62   u32 allowed_packets;
63
64   from = vlib_frame_vector_args (frame);
65   n_left_from = frame->n_vectors;
66   allowed_packets = n_left_from;
67   next_index = node->cached_next_index;
68
69   while (n_left_from > 0)
70     {
71       u32 n_left_to_next;
72
73       vlib_get_next_frame (vm, node, next_index,
74                            to_next, n_left_to_next);
75
76       while (n_left_from >= 4 && n_left_to_next >= 2)
77         {
78           u32 bi0, bi1;
79           vlib_buffer_t * b0, * b1;
80           u32 next0, next1;
81           u32 sw_if_index0, sw_if_index1;
82           ip6_header_t * ip0, * ip1;
83           adl_config_main_t * ccm0, * ccm1;
84           adl_config_data_t * c0, * c1;
85           u32 lb_index0, lb_index1;
86           const load_balance_t * lb0, *lb1;
87           const dpo_id_t *dpo0, *dpo1;
88
89           /* Prefetch next iteration. */
90           {
91             vlib_buffer_t * p2, * p3;
92
93             p2 = vlib_get_buffer (vm, from[2]);
94             p3 = vlib_get_buffer (vm, from[3]);
95
96             vlib_prefetch_buffer_header (p2, LOAD);
97             vlib_prefetch_buffer_header (p3, LOAD);
98
99             CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
100             CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
101           }
102
103           /* speculatively enqueue b0 and b1 to the current next frame */
104           to_next[0] = bi0 = from[0];
105           to_next[1] = bi1 = from[1];
106           from += 2;
107           to_next += 2;
108           n_left_from -= 2;
109           n_left_to_next -= 2;
110
111           b0 = vlib_get_buffer (vm, bi0);
112           sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
113
114           ip0 = vlib_buffer_get_current (b0);
115
116           ccm0 = cm->adl_config_mains + VNET_ADL_IP6;
117
118           c0 = vnet_get_config_data
119               (&ccm0->config_main,
120                &adl_buffer (b0)->adl.current_config_index,
121                &next0,
122                sizeof (c0[0]));
123
124           lb_index0 = ip6_fib_table_fwding_lookup (c0->fib_index,
125                                                     &ip0->src_address);
126           lb0 = load_balance_get (lb_index0);
127           dpo0 = load_balance_get_bucket_i(lb0, 0);
128
129           if (PREDICT_FALSE(dpo0->dpoi_type != DPO_RECEIVE))
130             {
131               b0->error = node->errors[IP6_ADL_ALLOWLIST_ERROR_DROPPED];
132               allowed_packets--;
133               next0 = RX_ADL_DROP;
134             }
135
136           b1 = vlib_get_buffer (vm, bi1);
137           sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
138
139           ip1 = vlib_buffer_get_current (b1);
140
141           ccm1 = cm->adl_config_mains + VNET_ADL_IP6;
142
143           c1 = vnet_get_config_data
144               (&ccm1->config_main,
145                &adl_buffer (b1)->adl.current_config_index,
146                &next1,
147                sizeof (c1[0]));
148
149           lb_index1 = ip6_fib_table_fwding_lookup (c1->fib_index,
150                                                     &ip1->src_address);
151
152           lb1 = load_balance_get (lb_index1);
153           dpo1 = load_balance_get_bucket_i(lb1, 0);
154
155           vlib_increment_combined_counter
156               (vcm, thread_index, lb_index0, 1,
157                vlib_buffer_length_in_chain (vm, b0)
158                + sizeof(ethernet_header_t));
159
160           vlib_increment_combined_counter
161               (vcm, thread_index, lb_index1, 1,
162                vlib_buffer_length_in_chain (vm, b1)
163                + sizeof(ethernet_header_t));
164
165           if (PREDICT_FALSE(dpo1->dpoi_type != DPO_RECEIVE))
166             {
167               b1->error = node->errors[IP6_ADL_ALLOWLIST_ERROR_DROPPED];
168               allowed_packets--;
169               next1 = RX_ADL_DROP;
170             }
171
172           if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
173                             && (b0->flags & VLIB_BUFFER_IS_TRACED)))
174             {
175               ip6_adl_allowlist_trace_t *t =
176                  vlib_add_trace (vm, node, b0, sizeof (*t));
177               t->sw_if_index = sw_if_index0;
178               t->next_index = next0;
179             }
180
181           if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
182                             && (b1->flags & VLIB_BUFFER_IS_TRACED)))
183             {
184               ip6_adl_allowlist_trace_t *t =
185                  vlib_add_trace (vm, node, b1, sizeof (*t));
186               t->sw_if_index = sw_if_index1;
187               t->next_index = next1;
188             }
189
190           /* verify speculative enqueues, maybe switch current next frame */
191           vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
192                                            to_next, n_left_to_next,
193                                            bi0, bi1, next0, next1);
194         }
195
196       while (n_left_from > 0 && n_left_to_next > 0)
197         {
198           u32 bi0;
199           vlib_buffer_t * b0;
200           u32 next0;
201           u32 sw_if_index0;
202           ip6_header_t * ip0;
203           adl_config_main_t *ccm0;
204           adl_config_data_t *c0;
205           u32 lb_index0;
206           const load_balance_t * lb0;
207           const dpo_id_t *dpo0;
208
209           /* speculatively enqueue b0 to the current next frame */
210           bi0 = from[0];
211           to_next[0] = bi0;
212           from += 1;
213           to_next += 1;
214           n_left_from -= 1;
215           n_left_to_next -= 1;
216
217           b0 = vlib_get_buffer (vm, bi0);
218           sw_if_index0 = adl_buffer(b0)->sw_if_index[VLIB_RX];
219
220           ip0 = vlib_buffer_get_current (b0);
221
222           ccm0 = cm->adl_config_mains + VNET_ADL_IP6;
223
224           c0 = vnet_get_config_data
225               (&ccm0->config_main,
226                &adl_buffer (b0)->adl.current_config_index,
227                &next0,
228                sizeof (c0[0]));
229
230           lb_index0 = ip6_fib_table_fwding_lookup (c0->fib_index,
231                                                     &ip0->src_address);
232
233           lb0 = load_balance_get (lb_index0);
234           dpo0 = load_balance_get_bucket_i(lb0, 0);
235
236           vlib_increment_combined_counter
237               (vcm, thread_index, lb_index0, 1,
238                vlib_buffer_length_in_chain (vm, b0)
239                + sizeof(ethernet_header_t));
240
241           if (PREDICT_FALSE(dpo0->dpoi_type != DPO_RECEIVE))
242             {
243               b0->error = node->errors[IP6_ADL_ALLOWLIST_ERROR_DROPPED];
244               allowed_packets--;
245               next0 = RX_ADL_DROP;
246             }
247
248           if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
249                             && (b0->flags & VLIB_BUFFER_IS_TRACED)))
250             {
251               ip6_adl_allowlist_trace_t *t =
252                  vlib_add_trace (vm, node, b0, sizeof (*t));
253               t->sw_if_index = sw_if_index0;
254               t->next_index = next0;
255             }
256
257           /* verify speculative enqueue, maybe switch current next frame */
258           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
259                                            to_next, n_left_to_next,
260                                            bi0, next0);
261         }
262
263       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
264     }
265   vlib_node_increment_counter (vm, node->node_index,
266                                IP6_ADL_ALLOWLIST_ERROR_ALLOWED,
267                                allowed_packets);
268   return frame->n_vectors;
269 }
270
271 VLIB_REGISTER_NODE (ip6_adl_allowlist_node) = {
272   .name = "ip6-adl-allowlist",
273   .vector_size = sizeof (u32),
274   .format_trace = format_ip6_adl_allowlist_trace,
275   .type = VLIB_NODE_TYPE_INTERNAL,
276
277   .n_errors = ARRAY_LEN(ip6_adl_allowlist_error_strings),
278   .error_strings = ip6_adl_allowlist_error_strings,
279
280   .n_next_nodes = ADL_RX_N_FEATURES,
281
282   /* edit / add dispositions here */
283   .next_nodes = {
284     [IP4_RX_ADL_ALLOWLIST] = "ip4-adl-allowlist",
285     [IP6_RX_ADL_ALLOWLIST] = "ip6-adl-allowlist",
286     [DEFAULT_RX_ADL_ALLOWLIST] = "default-adl-allowlist",
287     [IP4_RX_ADL_INPUT] = "ip4-input",
288     [IP6_RX_ADL_INPUT] = "ip6-input",
289     [DEFAULT_RX_ADL_INPUT] = "ethernet-input",
290     [RX_ADL_DROP] = "error-drop",
291   },
292 };
293
294 static clib_error_t *
295 ip6_allowlist_init (vlib_main_t * vm)
296 {
297   return 0;
298 }
299
300 VLIB_INIT_FUNCTION (ip6_allowlist_init);