nat: static mappings in flow hash
[vpp.git] / src / plugins / nat / nat44-ed / nat44_ed_inlines.h
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /**
16  * @brief The NAT inline functions
17  */
18
19 #ifndef __included_nat44_ed_inlines_h__
20 #define __included_nat44_ed_inlines_h__
21
22 #include <float.h>
23 #include <vppinfra/clib.h>
24 #include <vnet/fib/ip4_fib.h>
25
26 #include <nat/lib/log.h>
27 #include <nat/nat44-ed/nat44_ed.h>
28
29 always_inline void
30 init_ed_k (clib_bihash_kv_16_8_t *kv, u32 l_addr, u16 l_port, u32 r_addr,
31            u16 r_port, u32 fib_index, ip_protocol_t proto)
32 {
33   kv->key[0] = (u64) r_addr << 32 | l_addr;
34   kv->key[1] =
35     (u64) r_port << 48 | (u64) l_port << 32 | fib_index << 8 | proto;
36 }
37
38 always_inline void
39 init_ed_kv (clib_bihash_kv_16_8_t *kv, u32 l_addr, u16 l_port, u32 r_addr,
40             u16 r_port, u32 fib_index, u8 proto, u32 thread_index,
41             u32 session_index)
42 {
43   init_ed_k (kv, l_addr, l_port, r_addr, r_port, fib_index, proto);
44   kv->value = (u64) thread_index << 32 | session_index;
45 }
46
47 always_inline void
48 nat44_ed_sm_init_i2o_kv (clib_bihash_kv_16_8_t *kv, u32 addr, u16 port,
49                          u32 fib_index, u8 proto, u32 sm_index)
50 {
51   return init_ed_kv (kv, addr, port, 0, 0, fib_index, proto, 0, sm_index);
52 }
53
54 always_inline void
55 nat44_ed_sm_init_o2i_kv (clib_bihash_kv_16_8_t *kv, u32 e_addr, u16 e_port,
56                          u32 fib_index, u8 proto, u32 sm_index)
57 {
58   return init_ed_kv (kv, 0, 0, e_addr, e_port, fib_index, proto, 0, sm_index);
59 }
60
61 always_inline void
62 nat44_ed_sm_init_i2o_k (clib_bihash_kv_16_8_t *kv, u32 addr, u16 port,
63                         u32 fib_index, u8 proto)
64 {
65   return nat44_ed_sm_init_i2o_kv (kv, addr, port, fib_index, proto, 0);
66 }
67
68 always_inline void
69 nat44_ed_sm_init_o2i_k (clib_bihash_kv_16_8_t *kv, u32 e_addr, u16 e_port,
70                         u32 fib_index, u8 proto)
71 {
72   return nat44_ed_sm_init_o2i_kv (kv, e_addr, e_port, fib_index, proto, 0);
73 }
74
75 always_inline u32
76 ed_value_get_thread_index (clib_bihash_kv_16_8_t *value)
77 {
78   return value->value >> 32;
79 }
80
81 always_inline u32
82 ed_value_get_session_index (clib_bihash_kv_16_8_t *value)
83 {
84   return value->value & ~(u32) 0;
85 }
86
87 always_inline void
88 split_ed_kv (clib_bihash_kv_16_8_t *kv, ip4_address_t *l_addr,
89              ip4_address_t *r_addr, u8 *proto, u32 *fib_index, u16 *l_port,
90              u16 *r_port)
91 {
92   if (l_addr)
93     {
94       l_addr->as_u32 = kv->key[0] & (u32) ~0;
95     }
96   if (r_addr)
97     {
98       r_addr->as_u32 = kv->key[0] >> 32;
99     }
100   if (r_port)
101     {
102       *r_port = kv->key[1] >> 48;
103     }
104   if (l_port)
105     {
106       *l_port = (kv->key[1] >> 32) & (u16) ~0;
107     }
108   if (fib_index)
109     {
110       *fib_index = (kv->key[1] >> 8) & ((1 << 24) - 1);
111     }
112   if (proto)
113     {
114       *proto = kv->key[1] & (u8) ~0;
115     }
116 }
117
118 static_always_inline int
119 nat_get_icmp_session_lookup_values (vlib_buffer_t *b, ip4_header_t *ip0,
120                                     ip4_address_t *lookup_saddr,
121                                     u16 *lookup_sport,
122                                     ip4_address_t *lookup_daddr,
123                                     u16 *lookup_dport, u8 *lookup_protocol)
124 {
125   icmp46_header_t *icmp0;
126   icmp_echo_header_t *echo0, *inner_echo0 = 0;
127   ip4_header_t *inner_ip0 = 0;
128   void *l4_header = 0;
129   icmp46_header_t *inner_icmp0;
130
131   icmp0 = (icmp46_header_t *) ip4_next_header (ip0);
132   echo0 = (icmp_echo_header_t *) (icmp0 + 1);
133
134   // avoid warning about unused variables in caller by setting to bogus values
135   *lookup_sport = 0;
136   *lookup_dport = 0;
137
138   if (!icmp_type_is_error_message (
139         vnet_buffer (b)->ip.reass.icmp_type_or_tcp_flags))
140     {
141       *lookup_protocol = IP_PROTOCOL_ICMP;
142       lookup_saddr->as_u32 = ip0->src_address.as_u32;
143       *lookup_sport = vnet_buffer (b)->ip.reass.l4_src_port;
144       lookup_daddr->as_u32 = ip0->dst_address.as_u32;
145       *lookup_dport = vnet_buffer (b)->ip.reass.l4_dst_port;
146     }
147   else
148     {
149       inner_ip0 = (ip4_header_t *) (echo0 + 1);
150       l4_header = ip4_next_header (inner_ip0);
151       *lookup_protocol = inner_ip0->protocol;
152       lookup_saddr->as_u32 = inner_ip0->dst_address.as_u32;
153       lookup_daddr->as_u32 = inner_ip0->src_address.as_u32;
154       switch (inner_ip0->protocol)
155         {
156         case IP_PROTOCOL_ICMP:
157           inner_icmp0 = (icmp46_header_t *) l4_header;
158           inner_echo0 = (icmp_echo_header_t *) (inner_icmp0 + 1);
159           *lookup_sport = inner_echo0->identifier;
160           *lookup_dport = inner_echo0->identifier;
161           break;
162         case IP_PROTOCOL_UDP:
163         case IP_PROTOCOL_TCP:
164           *lookup_sport = ((tcp_udp_header_t *) l4_header)->dst_port;
165           *lookup_dport = ((tcp_udp_header_t *) l4_header)->src_port;
166           break;
167         default:
168           return NAT_IN2OUT_ED_ERROR_UNSUPPORTED_PROTOCOL;
169         }
170     }
171   return 0;
172 }
173
174 always_inline u32
175 nat44_session_get_timeout (snat_main_t *sm, snat_session_t *s)
176 {
177   switch (s->proto)
178     {
179     case IP_PROTOCOL_ICMP:
180       /* fallthrough */
181     case IP_PROTOCOL_ICMP6:
182       return sm->timeouts.icmp;
183     case IP_PROTOCOL_UDP:
184       return sm->timeouts.udp;
185     case IP_PROTOCOL_TCP:
186       {
187         if (s->state)
188           return sm->timeouts.tcp.transitory;
189         else
190           return sm->timeouts.tcp.established;
191       }
192     default:
193       return sm->timeouts.udp;
194     }
195
196   return 0;
197 }
198
199 static_always_inline u8
200 nat44_ed_maximum_sessions_exceeded (snat_main_t *sm, u32 fib_index,
201                                     u32 thread_index)
202 {
203   u32 translations;
204   translations = pool_elts (sm->per_thread_data[thread_index].sessions);
205   if (vec_len (sm->max_translations_per_fib) <= fib_index)
206     fib_index = 0;
207   return translations >= sm->max_translations_per_fib[fib_index];
208 }
209
210 static_always_inline int
211 nat_ed_lru_insert (snat_main_per_thread_data_t *tsm, snat_session_t *s,
212                    f64 now, u8 proto)
213 {
214   dlist_elt_t *lru_list_elt;
215   pool_get (tsm->lru_pool, lru_list_elt);
216   s->lru_index = lru_list_elt - tsm->lru_pool;
217   switch (proto)
218     {
219     case IP_PROTOCOL_UDP:
220       s->lru_head_index = tsm->udp_lru_head_index;
221       break;
222     case IP_PROTOCOL_TCP:
223       s->lru_head_index = tsm->tcp_trans_lru_head_index;
224       break;
225     case IP_PROTOCOL_ICMP:
226       s->lru_head_index = tsm->icmp_lru_head_index;
227       break;
228     default:
229       s->lru_head_index = tsm->unk_proto_lru_head_index;
230       break;
231     }
232   clib_dlist_addtail (tsm->lru_pool, s->lru_head_index, s->lru_index);
233   lru_list_elt->value = s - tsm->sessions;
234   s->last_lru_update = now;
235   return 1;
236 }
237
238 static_always_inline void
239 nat_6t_flow_to_ed_k (clib_bihash_kv_16_8_t *kv, nat_6t_flow_t *f)
240 {
241   init_ed_k (kv, f->match.saddr.as_u32, f->match.sport, f->match.daddr.as_u32,
242              f->match.dport, f->match.fib_index, f->match.proto);
243 }
244
245 static_always_inline void
246 nat_6t_flow_to_ed_kv (clib_bihash_kv_16_8_t *kv, nat_6t_flow_t *f,
247                       u32 thread_idx, u32 session_idx)
248 {
249   init_ed_kv (kv, f->match.saddr.as_u32, f->match.sport, f->match.daddr.as_u32,
250               f->match.dport, f->match.fib_index, f->match.proto, thread_idx,
251               session_idx);
252 }
253
254 static_always_inline int
255 nat_ed_ses_i2o_flow_hash_add_del (snat_main_t *sm, u32 thread_idx,
256                                   snat_session_t *s, int is_add)
257 {
258   snat_main_per_thread_data_t *tsm =
259     vec_elt_at_index (sm->per_thread_data, thread_idx);
260   clib_bihash_kv_16_8_t kv;
261   if (0 == is_add)
262     {
263       nat_6t_flow_to_ed_k (&kv, &s->i2o);
264     }
265   else
266     {
267       nat_6t_flow_to_ed_kv (&kv, &s->i2o, thread_idx, s - tsm->sessions);
268       nat_6t_l3_l4_csum_calc (&s->i2o);
269     }
270
271   ASSERT (thread_idx == s->thread_index);
272   return clib_bihash_add_del_16_8 (&sm->flow_hash, &kv, is_add);
273 }
274
275 static_always_inline int
276 nat_ed_ses_o2i_flow_hash_add_del (snat_main_t *sm, u32 thread_idx,
277                                   snat_session_t *s, int is_add)
278 {
279   snat_main_per_thread_data_t *tsm =
280     vec_elt_at_index (sm->per_thread_data, thread_idx);
281   clib_bihash_kv_16_8_t kv;
282   if (0 == is_add)
283     {
284       nat_6t_flow_to_ed_k (&kv, &s->o2i);
285     }
286   else
287     {
288       nat_6t_flow_to_ed_kv (&kv, &s->o2i, thread_idx, s - tsm->sessions);
289       if (!(s->flags & SNAT_SESSION_FLAG_STATIC_MAPPING))
290         {
291           if (nat44_ed_sm_o2i_lookup (sm, s->o2i.match.daddr,
292                                       s->o2i.match.dport, 0,
293                                       s->o2i.match.proto))
294             {
295               return -1;
296             }
297         }
298       nat_6t_l3_l4_csum_calc (&s->o2i);
299     }
300   ASSERT (thread_idx == s->thread_index);
301   return clib_bihash_add_del_16_8 (&sm->flow_hash, &kv, is_add);
302 }
303
304 always_inline void
305 nat_ed_session_delete (snat_main_t *sm, snat_session_t *ses, u32 thread_index,
306                        int lru_delete
307                        /* delete from global LRU list */)
308 {
309   snat_main_per_thread_data_t *tsm =
310     vec_elt_at_index (sm->per_thread_data, thread_index);
311
312   if (lru_delete)
313     {
314       clib_dlist_remove (tsm->lru_pool, ses->lru_index);
315     }
316   pool_put_index (tsm->lru_pool, ses->lru_index);
317   if (nat_ed_ses_i2o_flow_hash_add_del (sm, thread_index, ses, 0))
318     nat_elog_warn (sm, "flow hash del failed");
319   if (nat_ed_ses_o2i_flow_hash_add_del (sm, thread_index, ses, 0))
320     nat_elog_warn (sm, "flow hash del failed");
321   pool_put (tsm->sessions, ses);
322   vlib_set_simple_counter (&sm->total_sessions, thread_index, 0,
323                            pool_elts (tsm->sessions));
324 }
325
326 static_always_inline int
327 nat_lru_free_one_with_head (snat_main_t *sm, int thread_index, f64 now,
328                             u32 head_index)
329 {
330   snat_session_t *s = NULL;
331   dlist_elt_t *oldest_elt;
332   f64 sess_timeout_time;
333   u32 oldest_index;
334   snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index];
335   oldest_index = clib_dlist_remove_head (tsm->lru_pool, head_index);
336   if (~0 != oldest_index)
337     {
338       oldest_elt = pool_elt_at_index (tsm->lru_pool, oldest_index);
339       s = pool_elt_at_index (tsm->sessions, oldest_elt->value);
340
341       sess_timeout_time =
342         s->last_heard + (f64) nat44_session_get_timeout (sm, s);
343       if (now >= sess_timeout_time ||
344           (s->tcp_closed_timestamp && now >= s->tcp_closed_timestamp))
345         {
346           nat44_ed_free_session_data (sm, s, thread_index, 0);
347           nat_ed_session_delete (sm, s, thread_index, 0);
348           return 1;
349         }
350       else
351         {
352           clib_dlist_addhead (tsm->lru_pool, head_index, oldest_index);
353         }
354     }
355   return 0;
356 }
357
358 static_always_inline int
359 nat_lru_free_one (snat_main_t *sm, int thread_index, f64 now)
360 {
361   snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index];
362   int rc = 0;
363 #define _(p)                                                                  \
364   if ((rc = nat_lru_free_one_with_head (sm, thread_index, now,                \
365                                         tsm->p##_lru_head_index)))            \
366     {                                                                         \
367       return rc;                                                              \
368     }
369   _ (tcp_trans);
370   _ (udp);
371   _ (unk_proto);
372   _ (icmp);
373   _ (tcp_estab);
374 #undef _
375   return 0;
376 }
377
378 static_always_inline snat_session_t *
379 nat_ed_session_alloc (snat_main_t *sm, u32 thread_index, f64 now, u8 proto)
380 {
381   snat_session_t *s;
382   snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index];
383
384   nat_lru_free_one (sm, thread_index, now);
385
386   pool_get (tsm->sessions, s);
387   clib_memset (s, 0, sizeof (*s));
388
389   nat_ed_lru_insert (tsm, s, now, proto);
390
391   s->ha_last_refreshed = now;
392   vlib_set_simple_counter (&sm->total_sessions, thread_index, 0,
393                            pool_elts (tsm->sessions));
394 #if CLIB_ASSERT_ENABLE
395   s->thread_index = thread_index;
396 #endif
397   return s;
398 }
399
400 // slow path
401 static_always_inline void
402 per_vrf_sessions_cleanup (u32 thread_index)
403 {
404   snat_main_t *sm = &snat_main;
405   snat_main_per_thread_data_t *tsm =
406     vec_elt_at_index (sm->per_thread_data, thread_index);
407   per_vrf_sessions_t *per_vrf_sessions;
408   u32 *to_free = 0, *i;
409
410   vec_foreach (per_vrf_sessions, tsm->per_vrf_sessions_vec)
411     {
412       if (per_vrf_sessions->expired)
413         {
414           if (per_vrf_sessions->ses_count == 0)
415             {
416               vec_add1 (to_free, per_vrf_sessions - tsm->per_vrf_sessions_vec);
417             }
418         }
419     }
420
421   if (vec_len (to_free))
422     {
423       vec_foreach (i, to_free)
424         {
425           vec_del1 (tsm->per_vrf_sessions_vec, *i);
426         }
427     }
428
429   vec_free (to_free);
430 }
431
432 // slow path
433 static_always_inline void
434 per_vrf_sessions_register_session (snat_session_t *s, u32 thread_index)
435 {
436   snat_main_t *sm = &snat_main;
437   snat_main_per_thread_data_t *tsm =
438     vec_elt_at_index (sm->per_thread_data, thread_index);
439   per_vrf_sessions_t *per_vrf_sessions;
440
441   per_vrf_sessions_cleanup (thread_index);
442
443   // s->per_vrf_sessions_index == ~0 ... reuse of old session
444
445   vec_foreach (per_vrf_sessions, tsm->per_vrf_sessions_vec)
446     {
447       // ignore already expired registrations
448       if (per_vrf_sessions->expired)
449         continue;
450
451       if ((s->in2out.fib_index == per_vrf_sessions->rx_fib_index) &&
452           (s->out2in.fib_index == per_vrf_sessions->tx_fib_index))
453         {
454           goto done;
455         }
456       if ((s->in2out.fib_index == per_vrf_sessions->tx_fib_index) &&
457           (s->out2in.fib_index == per_vrf_sessions->rx_fib_index))
458         {
459           goto done;
460         }
461     }
462
463   // create a new registration
464   vec_add2 (tsm->per_vrf_sessions_vec, per_vrf_sessions, 1);
465   clib_memset (per_vrf_sessions, 0, sizeof (*per_vrf_sessions));
466
467   per_vrf_sessions->rx_fib_index = s->in2out.fib_index;
468   per_vrf_sessions->tx_fib_index = s->out2in.fib_index;
469
470 done:
471   s->per_vrf_sessions_index = per_vrf_sessions - tsm->per_vrf_sessions_vec;
472   per_vrf_sessions->ses_count++;
473 }
474
475 // fast path
476 static_always_inline void
477 per_vrf_sessions_unregister_session (snat_session_t *s, u32 thread_index)
478 {
479   snat_main_t *sm = &snat_main;
480   snat_main_per_thread_data_t *tsm;
481   per_vrf_sessions_t *per_vrf_sessions;
482
483   ASSERT (s->per_vrf_sessions_index != ~0);
484
485   tsm = vec_elt_at_index (sm->per_thread_data, thread_index);
486   per_vrf_sessions =
487     vec_elt_at_index (tsm->per_vrf_sessions_vec, s->per_vrf_sessions_index);
488
489   ASSERT (per_vrf_sessions->ses_count != 0);
490
491   per_vrf_sessions->ses_count--;
492   s->per_vrf_sessions_index = ~0;
493 }
494
495 // fast path
496 static_always_inline u8
497 per_vrf_sessions_is_expired (snat_session_t *s, u32 thread_index)
498 {
499   snat_main_t *sm = &snat_main;
500   snat_main_per_thread_data_t *tsm;
501   per_vrf_sessions_t *per_vrf_sessions;
502
503   ASSERT (s->per_vrf_sessions_index != ~0);
504
505   tsm = vec_elt_at_index (sm->per_thread_data, thread_index);
506   per_vrf_sessions =
507     vec_elt_at_index (tsm->per_vrf_sessions_vec, s->per_vrf_sessions_index);
508   return per_vrf_sessions->expired;
509 }
510
511 static_always_inline void
512 nat_6t_flow_init (nat_6t_flow_t *f, u32 thread_idx, ip4_address_t saddr,
513                   u16 sport, ip4_address_t daddr, u16 dport, u32 fib_index,
514                   u8 proto, u32 session_idx)
515 {
516   clib_memset (f, 0, sizeof (*f));
517   f->match.saddr = saddr;
518   f->match.sport = sport;
519   f->match.daddr = daddr;
520   f->match.dport = dport;
521   f->match.proto = proto;
522   f->match.fib_index = fib_index;
523 }
524
525 static_always_inline void
526 nat_6t_i2o_flow_init (snat_main_t *sm, u32 thread_idx, snat_session_t *s,
527                       ip4_address_t saddr, u16 sport, ip4_address_t daddr,
528                       u16 dport, u32 fib_index, u8 proto)
529 {
530   snat_main_per_thread_data_t *tsm =
531     vec_elt_at_index (sm->per_thread_data, thread_idx);
532   nat_6t_flow_init (&s->i2o, thread_idx, saddr, sport, daddr, dport, fib_index,
533                     proto, s - tsm->sessions);
534 }
535
536 static_always_inline void
537 nat_6t_o2i_flow_init (snat_main_t *sm, u32 thread_idx, snat_session_t *s,
538                       ip4_address_t saddr, u16 sport, ip4_address_t daddr,
539                       u16 dport, u32 fib_index, u8 proto)
540 {
541   snat_main_per_thread_data_t *tsm =
542     vec_elt_at_index (sm->per_thread_data, thread_idx);
543   nat_6t_flow_init (&s->o2i, thread_idx, saddr, sport, daddr, dport, fib_index,
544                     proto, s - tsm->sessions);
545 }
546
547 static_always_inline int
548 nat_6t_t_eq (nat_6t_t *t1, nat_6t_t *t2)
549 {
550   return t1->as_u64[0] == t2->as_u64[0] && t1->as_u64[1] == t2->as_u64[1];
551 }
552
553 static inline uword
554 nat_pre_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
555                         vlib_frame_t *frame, u32 def_next)
556 {
557   u32 n_left_from, *from;
558
559   from = vlib_frame_vector_args (frame);
560   n_left_from = frame->n_vectors;
561
562   vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
563   u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
564   vlib_get_buffers (vm, from, b, n_left_from);
565
566   while (n_left_from >= 2)
567     {
568       u32 next0, next1;
569       u32 arc_next0, arc_next1;
570       vlib_buffer_t *b0, *b1;
571
572       b0 = *b;
573       b++;
574       b1 = *b;
575       b++;
576
577       /* Prefetch next iteration. */
578       if (PREDICT_TRUE (n_left_from >= 4))
579         {
580           vlib_buffer_t *p2, *p3;
581
582           p2 = *b;
583           p3 = *(b + 1);
584
585           vlib_prefetch_buffer_header (p2, LOAD);
586           vlib_prefetch_buffer_header (p3, LOAD);
587
588           clib_prefetch_load (p2->data);
589           clib_prefetch_load (p3->data);
590         }
591
592       next0 = def_next;
593       next1 = def_next;
594
595       vnet_feature_next (&arc_next0, b0);
596       vnet_feature_next (&arc_next1, b1);
597
598       vnet_buffer2 (b0)->nat.arc_next = arc_next0;
599       vnet_buffer2 (b1)->nat.arc_next = arc_next1;
600
601       if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
602         {
603           if (b0->flags & VLIB_BUFFER_IS_TRACED)
604             {
605               nat_pre_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
606               t->next_index = next0;
607               t->arc_next_index = arc_next0;
608             }
609           if (b1->flags & VLIB_BUFFER_IS_TRACED)
610             {
611               nat_pre_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
612               t->next_index = next1;
613               t->arc_next_index = arc_next1;
614             }
615         }
616
617       n_left_from -= 2;
618       next[0] = next0;
619       next[1] = next1;
620       next += 2;
621     }
622
623   while (n_left_from > 0)
624     {
625       u32 next0;
626       u32 arc_next0;
627       vlib_buffer_t *b0;
628
629       b0 = *b;
630       b++;
631
632       next0 = def_next;
633       vnet_feature_next (&arc_next0, b0);
634       vnet_buffer2 (b0)->nat.arc_next = arc_next0;
635
636       if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
637                          (b0->flags & VLIB_BUFFER_IS_TRACED)))
638         {
639           nat_pre_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
640           t->next_index = next0;
641           t->arc_next_index = arc_next0;
642         }
643
644       n_left_from--;
645       next[0] = next0;
646       next++;
647     }
648   vlib_buffer_enqueue_to_next (vm, node, from, (u16 *) nexts,
649                                frame->n_vectors);
650
651   return frame->n_vectors;
652 }
653
654 static_always_inline u16
655 snat_random_port (u16 min, u16 max)
656 {
657   snat_main_t *sm = &snat_main;
658   u32 rwide;
659   u16 r;
660
661   rwide = random_u32 (&sm->random_seed);
662   r = rwide & 0xFFFF;
663   if (r >= min && r <= max)
664     return r;
665
666   return min + (rwide % (max - min + 1));
667 }
668
669 always_inline u8
670 is_interface_addr (snat_main_t *sm, vlib_node_runtime_t *node,
671                    u32 sw_if_index0, u32 ip4_addr)
672 {
673   snat_runtime_t *rt = (snat_runtime_t *) node->runtime_data;
674   u8 ip4_addr_exists;
675
676   if (PREDICT_FALSE (rt->cached_sw_if_index != sw_if_index0))
677     {
678       ip_lookup_main_t *lm = &sm->ip4_main->lookup_main;
679       ip_interface_address_t *ia;
680       ip4_address_t *a;
681
682       rt->cached_sw_if_index = ~0;
683       hash_free (rt->cached_presence_by_ip4_address);
684
685       foreach_ip_interface_address (
686         lm, ia, sw_if_index0, 1 /* honor unnumbered */, ({
687           a = ip_interface_address_get_address (lm, ia);
688           hash_set (rt->cached_presence_by_ip4_address, a->as_u32, 1);
689           rt->cached_sw_if_index = sw_if_index0;
690         }));
691
692       if (rt->cached_sw_if_index == ~0)
693         return 0;
694     }
695
696   ip4_addr_exists = !!hash_get (rt->cached_presence_by_ip4_address, ip4_addr);
697   if (PREDICT_FALSE (ip4_addr_exists))
698     return 1;
699   else
700     return 0;
701 }
702
703 always_inline void
704 nat44_set_tcp_session_state_i2o (snat_main_t *sm, f64 now, snat_session_t *ses,
705                                  vlib_buffer_t *b, u32 thread_index)
706 {
707   snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index];
708   u8 tcp_flags = vnet_buffer (b)->ip.reass.icmp_type_or_tcp_flags;
709   u32 tcp_ack_number = vnet_buffer (b)->ip.reass.tcp_ack_number;
710   u32 tcp_seq_number = vnet_buffer (b)->ip.reass.tcp_seq_number;
711   if ((ses->state == 0) && (tcp_flags & TCP_FLAG_RST))
712     ses->state = NAT44_SES_RST;
713   if ((ses->state == NAT44_SES_RST) && !(tcp_flags & TCP_FLAG_RST))
714     ses->state = 0;
715   if ((tcp_flags & TCP_FLAG_ACK) && (ses->state & NAT44_SES_I2O_SYN) &&
716       (ses->state & NAT44_SES_O2I_SYN))
717     ses->state = 0;
718   if (tcp_flags & TCP_FLAG_SYN)
719     ses->state |= NAT44_SES_I2O_SYN;
720   if (tcp_flags & TCP_FLAG_FIN)
721     {
722       ses->i2o_fin_seq = clib_net_to_host_u32 (tcp_seq_number);
723       ses->state |= NAT44_SES_I2O_FIN;
724     }
725   if ((tcp_flags & TCP_FLAG_ACK) && (ses->state & NAT44_SES_O2I_FIN))
726     {
727       if (clib_net_to_host_u32 (tcp_ack_number) > ses->o2i_fin_seq)
728         {
729           ses->state |= NAT44_SES_O2I_FIN_ACK;
730           if (nat44_is_ses_closed (ses))
731             { // if session is now closed, save the timestamp
732               ses->tcp_closed_timestamp = now + sm->timeouts.tcp.transitory;
733               ses->last_lru_update = now;
734             }
735         }
736     }
737
738   // move the session to proper LRU
739   if (ses->state)
740     {
741       ses->lru_head_index = tsm->tcp_trans_lru_head_index;
742     }
743   else
744     {
745       ses->lru_head_index = tsm->tcp_estab_lru_head_index;
746     }
747   clib_dlist_remove (tsm->lru_pool, ses->lru_index);
748   clib_dlist_addtail (tsm->lru_pool, ses->lru_head_index, ses->lru_index);
749 }
750
751 always_inline void
752 nat44_set_tcp_session_state_o2i (snat_main_t *sm, f64 now, snat_session_t *ses,
753                                  u8 tcp_flags, u32 tcp_ack_number,
754                                  u32 tcp_seq_number, u32 thread_index)
755 {
756   snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index];
757   if ((ses->state == 0) && (tcp_flags & TCP_FLAG_RST))
758     ses->state = NAT44_SES_RST;
759   if ((ses->state == NAT44_SES_RST) && !(tcp_flags & TCP_FLAG_RST))
760     ses->state = 0;
761   if ((tcp_flags & TCP_FLAG_ACK) && (ses->state & NAT44_SES_I2O_SYN) &&
762       (ses->state & NAT44_SES_O2I_SYN))
763     ses->state = 0;
764   if (tcp_flags & TCP_FLAG_SYN)
765     ses->state |= NAT44_SES_O2I_SYN;
766   if (tcp_flags & TCP_FLAG_FIN)
767     {
768       ses->o2i_fin_seq = clib_net_to_host_u32 (tcp_seq_number);
769       ses->state |= NAT44_SES_O2I_FIN;
770     }
771   if ((tcp_flags & TCP_FLAG_ACK) && (ses->state & NAT44_SES_I2O_FIN))
772     {
773       if (clib_net_to_host_u32 (tcp_ack_number) > ses->i2o_fin_seq)
774         ses->state |= NAT44_SES_I2O_FIN_ACK;
775       if (nat44_is_ses_closed (ses))
776         { // if session is now closed, save the timestamp
777           ses->tcp_closed_timestamp = now + sm->timeouts.tcp.transitory;
778           ses->last_lru_update = now;
779         }
780     }
781   // move the session to proper LRU
782   if (ses->state)
783     {
784       ses->lru_head_index = tsm->tcp_trans_lru_head_index;
785     }
786   else
787     {
788       ses->lru_head_index = tsm->tcp_estab_lru_head_index;
789     }
790   clib_dlist_remove (tsm->lru_pool, ses->lru_index);
791   clib_dlist_addtail (tsm->lru_pool, ses->lru_head_index, ses->lru_index);
792 }
793
794 always_inline void
795 nat44_session_update_counters (snat_session_t *s, f64 now, uword bytes,
796                                u32 thread_index)
797 {
798   s->last_heard = now;
799   s->total_pkts++;
800   s->total_bytes += bytes;
801 }
802
803 /** \brief Per-user LRU list maintenance */
804 always_inline void
805 nat44_session_update_lru (snat_main_t *sm, snat_session_t *s, u32 thread_index)
806 {
807   /* don't update too often - timeout is in magnitude of seconds anyway */
808   if (s->last_heard > s->last_lru_update + 1)
809     {
810       clib_dlist_remove (sm->per_thread_data[thread_index].lru_pool,
811                          s->lru_index);
812       clib_dlist_addtail (sm->per_thread_data[thread_index].lru_pool,
813                           s->lru_head_index, s->lru_index);
814       s->last_lru_update = s->last_heard;
815     }
816 }
817
818 static_always_inline int
819 nat44_ed_is_unk_proto (u8 proto)
820 {
821   static const int lookup_table[256] = {
822     [IP_PROTOCOL_TCP] = 1,
823     [IP_PROTOCOL_UDP] = 1,
824     [IP_PROTOCOL_ICMP] = 1,
825     [IP_PROTOCOL_ICMP6] = 1,
826   };
827
828   return 1 - lookup_table[proto];
829 }
830
831 #endif /* __included_nat44_ed_inlines_h__ */
832
833 /*
834  * fd.io coding-style-patch-verification: ON
835  *
836  * Local Variables:
837  * eval: (c-set-style "gnu")
838  * End:
839  */