4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <vnet/vnet.h>
20 #include <vppinfra/error.h>
21 #include <srv6-ad-flow/ad-flow.h>
23 /****************************** Packet tracing ******************************/
28 } srv6_ad_flow_localsid_trace_t;
33 ip6_address_t src, dst;
34 } srv6_ad_flow_rewrite_trace_t;
37 format_srv6_ad_flow_localsid_trace (u8 *s, va_list *args)
39 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
40 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
41 srv6_ad_flow_localsid_trace_t *t =
42 va_arg (*args, srv6_ad_flow_localsid_trace_t *);
44 return format (s, "SRv6-AD-Flow-localsid: localsid_index %d",
49 format_srv6_ad_flow_rewrite_trace (u8 *s, va_list *args)
51 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
52 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
53 srv6_ad_flow_rewrite_trace_t *t =
54 va_arg (*args, srv6_ad_flow_rewrite_trace_t *);
56 if (PREDICT_FALSE (t->error != 0))
58 return format (s, "SRv6-AD-Flow-rewrite: cache is empty");
61 return format (s, "SRv6-AD-Flow-rewrite: src %U dst %U", format_ip6_address,
62 &t->src, format_ip6_address, &t->dst);
65 /**************************** Nodes registration *****************************/
67 vlib_node_registration_t srv6_ad4_flow_rewrite_node;
68 vlib_node_registration_t srv6_ad6_flow_rewrite_node;
70 /****************************** Packet counters ******************************/
72 #define foreach_srv6_ad_flow_rewrite_counter \
73 _ (PROCESSED, "srv6-ad-flow rewritten packets") \
74 _ (NO_RW, "(Error) No header for rewriting.")
78 #define _(sym, str) SRV6_AD_FLOW_REWRITE_COUNTER_##sym,
79 foreach_srv6_ad_flow_rewrite_counter
81 SRV6_AD_FLOW_REWRITE_N_COUNTERS,
82 } srv6_ad_flow_rewrite_counters;
84 static char *srv6_ad_flow_rewrite_counter_strings[] = {
85 #define _(sym, string) string,
86 foreach_srv6_ad_flow_rewrite_counter
90 /******************************** Next nodes *********************************/
94 SRV6_AD_FLOW_LOCALSID_NEXT_ERROR,
95 SRV6_AD_FLOW_LOCALSID_NEXT_REWRITE4,
96 SRV6_AD_FLOW_LOCALSID_NEXT_REWRITE6,
97 SRV6_AD_FLOW_LOCALSID_NEXT_BYPASS,
98 SRV6_AD_FLOW_LOCALSID_NEXT_PUNT,
99 SRV6_AD_FLOW_LOCALSID_N_NEXT,
100 } srv6_ad_flow_localsid_next_t;
104 SRV6_AD_FLOW_REWRITE_NEXT_ERROR,
105 SRV6_AD_FLOW_REWRITE_NEXT_LOOKUP,
106 SRV6_AD_FLOW_REWRITE_N_NEXT,
107 } srv6_ad_flow_rewrite_next_t;
109 /***************************** Inline functions ******************************/
111 static_always_inline int
112 ad_flow_lru_insert (srv6_ad_flow_localsid_t *ls, srv6_ad_flow_entry_t *e,
115 dlist_elt_t *lru_list_elt;
116 pool_get (ls->lru_pool, lru_list_elt);
117 e->lru_index = lru_list_elt - ls->lru_pool;
118 clib_dlist_addtail (ls->lru_pool, ls->lru_head_index, e->lru_index);
119 lru_list_elt->value = e - ls->cache;
120 e->last_lru_update = now;
125 ad_flow_entry_update_lru (srv6_ad_flow_localsid_t *ls, srv6_ad_flow_entry_t *e)
127 /* don't update too often - timeout is in magnitude of seconds anyway */
128 if (e->last_heard > e->last_lru_update + 1)
130 clib_dlist_remove (ls->lru_pool, e->lru_index);
131 clib_dlist_addtail (ls->lru_pool, ls->lru_head_index, e->lru_index);
132 e->last_lru_update = e->last_heard;
137 ad_flow_entry_delete (srv6_ad_flow_localsid_t *ls, srv6_ad_flow_entry_t *e,
140 clib_bihash_kv_40_8_t kv;
142 if (ls->inner_type == AD_TYPE_IP4)
144 kv.key[0] = ((u64) e->key.s_addr.ip4.as_u32 << 32) |
145 (u64) e->key.d_addr.ip4.as_u32;
146 kv.key[1] = ((u64) e->key.s_port << 16) | ((u64) e->key.d_port);
153 kv.key[0] = e->key.s_addr.ip6.as_u64[0];
154 kv.key[1] = e->key.s_addr.ip6.as_u64[1];
155 kv.key[2] = e->key.d_addr.ip6.as_u64[0];
156 kv.key[3] = e->key.d_addr.ip6.as_u64[1];
157 kv.key[4] = ((u64) e->key.s_port << 16) | ((u64) e->key.d_port);
160 clib_bihash_add_del_40_8 (&ls->ftable, &kv, 0);
162 vec_free (e->rw_data);
166 clib_dlist_remove (ls->lru_pool, e->lru_index);
168 pool_put_index (ls->lru_pool, e->lru_index);
169 pool_put (ls->cache, e);
172 static_always_inline int
173 ad_flow_lru_free_one (srv6_ad_flow_localsid_t *ls, f64 now)
175 srv6_ad_flow_entry_t *e = NULL;
176 dlist_elt_t *oldest_elt;
177 f64 entry_timeout_time;
179 oldest_index = clib_dlist_remove_head (ls->lru_pool, ls->lru_head_index);
180 if (~0 != oldest_index)
182 oldest_elt = pool_elt_at_index (ls->lru_pool, oldest_index);
183 e = pool_elt_at_index (ls->cache, oldest_elt->value);
185 entry_timeout_time = e->last_heard + (f64) SRV6_AD_CACHE_TIMEOUT;
186 if (now >= entry_timeout_time)
188 ad_flow_entry_delete (ls, e, 0);
193 clib_dlist_addhead (ls->lru_pool, ls->lru_head_index, oldest_index);
199 static_always_inline srv6_ad_flow_entry_t *
200 ad_flow_entry_alloc (srv6_ad_flow_localsid_t *ls, f64 now)
202 srv6_ad_flow_entry_t *e;
204 ad_flow_lru_free_one (ls, now);
206 pool_get (ls->cache, e);
207 clib_memset (e, 0, sizeof *e);
209 ad_flow_lru_insert (ls, e, now);
215 ad_flow_value_get_session_index (clib_bihash_kv_40_8_t *value)
217 return value->value & ~(u32) 0;
221 ad_flow_is_idle_entry_cb (clib_bihash_kv_40_8_t *kv, void *arg)
223 srv6_ad_is_idle_entry_ctx_t *ctx = arg;
224 srv6_ad_flow_entry_t *e;
225 u64 entry_timeout_time;
226 srv6_ad_flow_localsid_t *ls = ctx->ls;
228 e = pool_elt_at_index (ls->cache, ad_flow_value_get_session_index (kv));
229 entry_timeout_time = e->last_heard + (f64) SRV6_AD_CACHE_TIMEOUT;
230 if (ctx->now >= entry_timeout_time)
232 ad_flow_entry_delete (ls, e, 1);
238 /****************************** Local SID node *******************************/
241 * @brief Function doing SRH processing for AD behavior
243 static_always_inline int
244 end_ad_flow_walk_expect_first_hdr (vlib_main_t *vm, vlib_buffer_t *b,
245 ip6_ext_header_t *first_hdr,
246 u8 first_hdr_type, u8 expected_hdr_type,
247 u32 *encap_length, u8 **found_hdr)
249 if (PREDICT_TRUE (first_hdr_type == expected_hdr_type))
251 *found_hdr = (void *) first_hdr;
255 u8 ext_hdr_type = first_hdr_type;
256 ip6_ext_header_t *ext_hdr = first_hdr;
258 if (!ip6_ext_hdr (ext_hdr_type))
264 u32 ext_hdr_length = ip6_ext_header_len (ext_hdr);
265 if (!vlib_object_within_buffer_data (vm, b, ext_hdr, ext_hdr_length))
270 *encap_length += ext_hdr_length;
271 ext_hdr_type = ext_hdr->next_hdr;
273 while (ext_hdr_type != expected_hdr_type && ip6_ext_hdr (ext_hdr_type))
275 ext_hdr = ip6_ext_next_header (ext_hdr);
276 ext_hdr_length = ip6_ext_header_len (ext_hdr);
277 if (!vlib_object_within_buffer_data (vm, b, ext_hdr, ext_hdr_length))
282 *encap_length += ext_hdr_length;
283 ext_hdr_type = ext_hdr->next_hdr;
286 if (ext_hdr_type != expected_hdr_type)
292 *found_hdr = ip6_ext_next_header (ext_hdr);
299 * @brief Function doing SRH processing for per-flow AD behavior (IPv6 inner
302 static_always_inline void
303 end_ad_flow_processing_v6 (vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip,
304 srv6_ad_flow_localsid_t *ls_mem, u32 *next,
305 vlib_combined_counter_main_t **cnt, u32 *cnt_idx,
308 ip6_sr_main_t *srm = &sr_main;
309 srv6_ad_flow_main_t *sm = &srv6_ad_flow_main;
310 ip6_address_t *new_dst;
311 u32 encap_length = sizeof (ip6_header_t);
312 ip6_sr_header_t *srh;
313 clib_bihash_40_8_t *h = &ls_mem->ftable;
314 ip6_header_t *ulh = NULL;
315 u16 src_port = 0, dst_port = 0;
316 srv6_ad_flow_entry_t *e = NULL;
317 clib_bihash_kv_40_8_t kv, value;
318 srv6_ad_is_idle_entry_ctx_t ctx;
320 /* Find SRH in the extension header chain */
321 end_ad_flow_walk_expect_first_hdr (vm, b, (void *) (ip + 1), ip->protocol,
322 IP_PROTOCOL_IPV6_ROUTE, &encap_length,
325 /* Punt the packet if no SRH or SRH with SL = 0 */
326 if (PREDICT_FALSE (srh == NULL || srh->type != ROUTING_HEADER_TYPE_SR ||
327 srh->segments_left == 0))
329 *next = SRV6_AD_FLOW_LOCALSID_NEXT_PUNT;
330 *cnt = &(sm->sid_punt_counters);
331 *cnt_idx = ls_mem->index;
335 /* Decrement Segments Left and update Destination Address */
336 srh->segments_left -= 1;
337 new_dst = (ip6_address_t *) (srh->segments) + srh->segments_left;
338 ip->dst_address.as_u64[0] = new_dst->as_u64[0];
339 ip->dst_address.as_u64[1] = new_dst->as_u64[1];
341 /* Compute the total encapsulation size and determine ULH type */
342 encap_length += ip6_ext_header_len ((ip6_ext_header_t *) srh);
344 /* Find the inner IPv6 header (ULH) */
345 int ret = end_ad_flow_walk_expect_first_hdr (
346 vm, b, ip6_ext_next_header ((ip6_ext_header_t *) srh), srh->protocol,
347 IP_PROTOCOL_IPV6, &encap_length, (u8 **) &ulh);
349 if (PREDICT_FALSE (ulh == NULL))
351 if (ret == -1) /* Bypass the NF if ULH is not of expected type */
353 *next = SRV6_AD_FLOW_LOCALSID_NEXT_BYPASS;
354 *cnt = &(sm->sid_bypass_counters);
355 *cnt_idx = ls_mem->index;
359 *next = SRV6_AD_FLOW_LOCALSID_NEXT_ERROR;
360 *cnt = &(srm->sr_ls_invalid_counters);
365 /* Compute flow hash on ULH */
366 if (PREDICT_TRUE (ulh->protocol == IP_PROTOCOL_UDP ||
367 ulh->protocol == IP_PROTOCOL_TCP))
369 udp_header_t *ulh_l4_hdr = (udp_header_t *) (ulh + 1);
370 src_port = ulh_l4_hdr->src_port;
371 dst_port = ulh_l4_hdr->dst_port;
374 kv.key[0] = ulh->src_address.as_u64[0];
375 kv.key[1] = ulh->src_address.as_u64[1];
376 kv.key[2] = ulh->dst_address.as_u64[0];
377 kv.key[3] = ulh->dst_address.as_u64[1];
378 kv.key[4] = ((u64) src_port << 16) | ((u64) dst_port);
380 /* Lookup flow in hashtable */
381 if (!clib_bihash_search_40_8 (h, &kv, &value))
383 e = pool_elt_at_index (ls_mem->cache,
384 ad_flow_value_get_session_index (&value));
389 if (pool_elts (ls_mem->cache) >= ls_mem->cache_size)
391 if (!ad_flow_lru_free_one (ls_mem, now))
393 *next = SRV6_AD_FLOW_LOCALSID_NEXT_ERROR;
394 *cnt = &(sm->sid_cache_full_counters);
395 *cnt_idx = ls_mem->index;
400 e = ad_flow_entry_alloc (ls_mem, now);
402 e->key.s_addr.ip6.as_u64[0] = ulh->src_address.as_u64[0];
403 e->key.s_addr.ip6.as_u64[1] = ulh->src_address.as_u64[1];
404 e->key.d_addr.ip6.as_u64[0] = ulh->dst_address.as_u64[0];
405 e->key.d_addr.ip6.as_u64[1] = ulh->dst_address.as_u64[1];
406 e->key.s_port = src_port;
407 e->key.d_port = dst_port;
408 e->key.proto = ulh->protocol;
410 kv.value = (u64) (e - ls_mem->cache);
414 clib_bihash_add_or_overwrite_stale_40_8 (h, &kv,
415 ad_flow_is_idle_entry_cb, &ctx);
419 /* Cache encapsulation headers */
420 if (PREDICT_FALSE (encap_length > e->rw_len))
422 vec_validate (e->rw_data, encap_length - 1);
424 clib_memcpy_fast (e->rw_data, ip, encap_length);
425 e->rw_len = encap_length;
428 ad_flow_entry_update_lru (ls_mem, e);
430 /* Decapsulate the packet */
431 vlib_buffer_advance (b, encap_length);
434 *next = SRV6_AD_FLOW_LOCALSID_NEXT_REWRITE6;
436 /* Set Xconnect adjacency to VNF */
437 vnet_buffer (b)->ip.adj_index[VLIB_TX] = ls_mem->nh_adj;
440 static_always_inline void
441 end_ad_flow_processing_v4 (vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip,
442 srv6_ad_flow_localsid_t *ls_mem, u32 *next,
443 vlib_combined_counter_main_t **cnt, u32 *cnt_idx,
446 ip6_sr_main_t *srm = &sr_main;
447 srv6_ad_flow_main_t *sm = &srv6_ad_flow_main;
448 ip6_address_t *new_dst;
449 u32 encap_length = sizeof (ip6_header_t);
450 ip6_sr_header_t *srh;
451 clib_bihash_40_8_t *h = &ls_mem->ftable;
452 ip4_header_t *ulh = NULL;
453 u16 src_port = 0, dst_port = 0;
454 srv6_ad_flow_entry_t *e = NULL;
455 clib_bihash_kv_40_8_t kv, value;
456 srv6_ad_is_idle_entry_ctx_t ctx;
458 /* Find SRH in the extension header chain */
459 end_ad_flow_walk_expect_first_hdr (vm, b, (void *) (ip + 1), ip->protocol,
460 IP_PROTOCOL_IPV6_ROUTE, &encap_length,
463 /* Punt the packet if no SRH or SRH with SL = 0 */
464 if (PREDICT_FALSE (srh == NULL || srh->type != ROUTING_HEADER_TYPE_SR ||
465 srh->segments_left == 0))
467 *next = SRV6_AD_FLOW_LOCALSID_NEXT_PUNT;
468 *cnt = &(sm->sid_punt_counters);
469 *cnt_idx = ls_mem->index;
473 /* Decrement Segments Left and update Destination Address */
474 srh->segments_left -= 1;
475 new_dst = (ip6_address_t *) (srh->segments) + srh->segments_left;
476 ip->dst_address.as_u64[0] = new_dst->as_u64[0];
477 ip->dst_address.as_u64[1] = new_dst->as_u64[1];
479 /* Add SRH length to the total encapsulation size */
480 encap_length += ip6_ext_header_len ((ip6_ext_header_t *) srh);
482 /* Find the inner IPv6 header (ULH) */
483 int ret = end_ad_flow_walk_expect_first_hdr (
484 vm, b, ip6_ext_next_header ((ip6_ext_header_t *) srh), srh->protocol,
485 IP_PROTOCOL_IP_IN_IP, &encap_length, (u8 **) &ulh);
487 if (PREDICT_FALSE (ulh == NULL))
489 if (ret == -1) /* Bypass the NF if ULH is not of expected type */
491 *next = SRV6_AD_FLOW_LOCALSID_NEXT_BYPASS;
492 *cnt = &(sm->sid_bypass_counters);
493 *cnt_idx = ls_mem->index;
497 *next = SRV6_AD_FLOW_LOCALSID_NEXT_ERROR;
498 *cnt = &(srm->sr_ls_invalid_counters);
503 /* Compute flow hash on ULH */
504 if (PREDICT_TRUE (ulh->protocol == IP_PROTOCOL_UDP ||
505 ulh->protocol == IP_PROTOCOL_TCP))
507 udp_header_t *ulh_l4_hdr = (udp_header_t *) (ulh + 1);
508 src_port = ulh_l4_hdr->src_port;
509 dst_port = ulh_l4_hdr->dst_port;
512 kv.key[0] = *((u64 *) &ulh->address_pair);
513 kv.key[1] = ((u64) src_port << 16) | ((u64) dst_port);
518 /* Lookup flow in hashtable */
519 if (!clib_bihash_search_40_8 (h, &kv, &value))
521 e = pool_elt_at_index (ls_mem->cache,
522 ad_flow_value_get_session_index (&value));
527 if (pool_elts (ls_mem->cache) >= ls_mem->cache_size)
529 if (!ad_flow_lru_free_one (ls_mem, now))
531 *next = SRV6_AD_FLOW_LOCALSID_NEXT_ERROR;
532 *cnt = &(sm->sid_cache_full_counters);
533 *cnt_idx = ls_mem->index;
538 e = ad_flow_entry_alloc (ls_mem, now);
540 e->key.s_addr.ip4 = ulh->src_address;
541 e->key.d_addr.ip4 = ulh->dst_address;
542 e->key.s_port = src_port;
543 e->key.d_port = dst_port;
544 e->key.proto = ulh->protocol;
546 kv.value = (u64) (e - ls_mem->cache);
550 clib_bihash_add_or_overwrite_stale_40_8 (h, &kv,
551 ad_flow_is_idle_entry_cb, &ctx);
555 /* Cache encapsulation headers */
556 if (PREDICT_FALSE (encap_length > e->rw_len))
558 vec_validate (e->rw_data, encap_length - 1);
560 clib_memcpy_fast (e->rw_data, ip, encap_length);
561 e->rw_len = encap_length;
564 ad_flow_entry_update_lru (ls_mem, e);
566 /* Decapsulate the packet */
567 vlib_buffer_advance (b, encap_length);
570 *next = SRV6_AD_FLOW_LOCALSID_NEXT_REWRITE4;
572 /* Set Xconnect adjacency to VNF */
573 vnet_buffer (b)->ip.adj_index[VLIB_TX] = ls_mem->nh_adj;
577 * @brief SRv6 AD Localsid graph node
580 srv6_ad_flow_localsid_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
583 ip6_sr_main_t *srm = &sr_main;
584 f64 now = vlib_time_now (vm);
585 u32 n_left_from, next_index, *from, *to_next, n_left_to_next;
586 u32 thread_index = vm->thread_index;
588 from = vlib_frame_vector_args (frame);
589 n_left_from = frame->n_vectors;
590 next_index = node->cached_next_index;
592 while (n_left_from > 0)
594 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
596 /* TODO: Dual/quad loop */
598 while (n_left_from > 0 && n_left_to_next > 0)
602 ip6_header_t *ip0 = 0;
603 ip6_sr_localsid_t *ls0;
604 srv6_ad_flow_localsid_t *ls_mem0;
606 vlib_combined_counter_main_t *cnt0 = &(srm->sr_ls_valid_counters);
616 b0 = vlib_get_buffer (vm, bi0);
617 ip0 = vlib_buffer_get_current (b0);
619 /* Retrieve local SID context based on IP DA (adj) */
620 ls0 = pool_elt_at_index (srm->localsids,
621 vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
623 cnt_idx0 = ls0 - srm->localsids;
625 /* Retrieve local SID's plugin memory */
626 ls_mem0 = ls0->plugin_mem;
629 if (ls_mem0->inner_type == AD_TYPE_IP6)
630 end_ad_flow_processing_v6 (vm, b0, ip0, ls_mem0, &next0, &cnt0,
633 end_ad_flow_processing_v4 (vm, b0, ip0, ls_mem0, &next0, &cnt0,
636 /* Trace packet (if enabled) */
637 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
639 srv6_ad_flow_localsid_trace_t *tr =
640 vlib_add_trace (vm, node, b0, sizeof *tr);
641 tr->localsid_index = ls_mem0->index;
644 /* Increment the appropriate per-SID counter */
645 vlib_increment_combined_counter (
646 cnt0, thread_index, cnt_idx0, 1,
647 vlib_buffer_length_in_chain (vm, b0));
649 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
650 n_left_to_next, bi0, next0);
652 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
655 return frame->n_vectors;
658 VLIB_REGISTER_NODE (srv6_ad_flow_localsid_node) = {
659 .function = srv6_ad_flow_localsid_fn,
660 .name = "srv6-ad-flow-localsid",
661 .vector_size = sizeof (u32),
662 .format_trace = format_srv6_ad_flow_localsid_trace,
663 .type = VLIB_NODE_TYPE_INTERNAL,
664 .n_next_nodes = SRV6_AD_FLOW_LOCALSID_N_NEXT,
666 [SRV6_AD_FLOW_LOCALSID_NEXT_PUNT] = "ip6-local",
667 [SRV6_AD_FLOW_LOCALSID_NEXT_BYPASS] = "ip6-lookup",
668 [SRV6_AD_FLOW_LOCALSID_NEXT_REWRITE4] = "ip4-rewrite",
669 [SRV6_AD_FLOW_LOCALSID_NEXT_REWRITE6] = "ip6-rewrite",
670 [SRV6_AD_FLOW_LOCALSID_NEXT_ERROR] = "error-drop",
674 /****************************** Rewriting node *******************************/
677 * @brief Graph node for applying a SR policy into an IPv6 packet.
681 srv6_ad4_flow_rewrite_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
684 ip6_sr_main_t *srm = &sr_main;
685 srv6_ad_flow_main_t *sm = &srv6_ad_flow_main;
686 u32 n_left_from, next_index, *from, *to_next;
689 from = vlib_frame_vector_args (frame);
690 n_left_from = frame->n_vectors;
691 next_index = node->cached_next_index;
693 while (n_left_from > 0)
697 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
699 /* TODO: Dual/quad loop */
701 while (n_left_from > 0 && n_left_to_next > 0)
705 ip4_header_t *ip0_encap = 0;
706 ip6_header_t *ip0 = 0;
707 ip6_sr_localsid_t *ls0;
708 srv6_ad_flow_localsid_t *ls0_mem;
709 srv6_ad_flow_entry_t *s0;
710 u32 next0 = SRV6_AD_FLOW_REWRITE_NEXT_LOOKUP;
720 b0 = vlib_get_buffer (vm, bi0);
721 ip0_encap = vlib_buffer_get_current (b0);
722 ls0 = pool_elt_at_index (
724 sm->sw_iface_localsid4[vnet_buffer (b0)->sw_if_index[VLIB_RX]]);
725 ls0_mem = ls0->plugin_mem;
727 if (PREDICT_FALSE (ls0_mem == NULL))
729 next0 = SRV6_AD_FLOW_REWRITE_NEXT_ERROR;
730 b0->error = node->errors[SRV6_AD_FLOW_REWRITE_COUNTER_NO_RW];
734 clib_bihash_kv_40_8_t kv0, value0;
736 /* Compute flow hash */
738 if (PREDICT_TRUE (ip0_encap->protocol == IP_PROTOCOL_UDP ||
739 ip0_encap->protocol == IP_PROTOCOL_TCP))
741 udp_header_t *udp0 = (udp_header_t *) (ip0_encap + 1);
743 ((u64) udp0->src_port << 16) | ((u64) udp0->dst_port);
746 kv0.key[0] = *((u64 *) &ip0_encap->address_pair);
752 /* Lookup flow in hashtable */
753 if (clib_bihash_search_40_8 (&ls0_mem->ftable, &kv0, &value0) <
757 next0 = SRV6_AD_FLOW_REWRITE_NEXT_ERROR;
758 b0->error = node->errors[SRV6_AD_FLOW_REWRITE_COUNTER_NO_RW];
763 s0 = pool_elt_at_index (
764 ls0_mem->cache, ad_flow_value_get_session_index (&value0));
766 ASSERT (VLIB_BUFFER_PRE_DATA_SIZE >=
767 (s0->rw_len + b0->current_data));
769 clib_memcpy_fast (((u8 *) ip0_encap) - s0->rw_len,
770 s0->rw_data, s0->rw_len);
771 vlib_buffer_advance (b0, -(word) s0->rw_len);
773 ip0 = vlib_buffer_get_current (b0);
775 /* Update inner IPv4 TTL and checksum */
779 ip0_encap->checksum + clib_host_to_net_u16 (0x0100);
780 checksum0 += checksum0 >= 0xffff;
781 ip0_encap->checksum = checksum0;
783 /* Update outer IPv6 length (in case it has changed) */
784 new_l0 = s0->rw_len - sizeof (ip6_header_t) +
785 clib_net_to_host_u16 (ip0_encap->length);
786 ip0->payload_length = clib_host_to_net_u16 (new_l0);
790 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
791 PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
793 srv6_ad_flow_rewrite_trace_t *tr =
794 vlib_add_trace (vm, node, b0, sizeof *tr);
797 if (next0 == SRV6_AD_FLOW_REWRITE_NEXT_ERROR)
803 clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
804 sizeof tr->src.as_u8);
805 clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
806 sizeof tr->dst.as_u8);
810 /* Increment per-SID AD rewrite counters */
811 vlib_increment_combined_counter (
812 ((next0 == SRV6_AD_FLOW_REWRITE_NEXT_ERROR) ?
813 &(sm->rw_invalid_counters) :
814 &(sm->rw_valid_counters)),
815 vm->thread_index, ls0_mem->index, 1,
816 vlib_buffer_length_in_chain (vm, b0));
818 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
819 n_left_to_next, bi0, next0);
824 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
827 /* Update counters */
828 vlib_node_increment_counter (vm, srv6_ad4_flow_rewrite_node.index,
829 SRV6_AD_FLOW_REWRITE_COUNTER_PROCESSED,
832 return frame->n_vectors;
835 VLIB_REGISTER_NODE (srv6_ad4_flow_rewrite_node) = {
836 .function = srv6_ad4_flow_rewrite_fn,
837 .name = "srv6-ad4-flow-rewrite",
838 .vector_size = sizeof (u32),
839 .format_trace = format_srv6_ad_flow_rewrite_trace,
840 .type = VLIB_NODE_TYPE_INTERNAL,
841 .n_errors = SRV6_AD_FLOW_REWRITE_N_COUNTERS,
842 .error_strings = srv6_ad_flow_rewrite_counter_strings,
843 .n_next_nodes = SRV6_AD_FLOW_REWRITE_N_NEXT,
845 [SRV6_AD_FLOW_REWRITE_NEXT_LOOKUP] = "ip6-lookup",
846 [SRV6_AD_FLOW_REWRITE_NEXT_ERROR] = "error-drop",
851 * @brief Graph node for applying a SR policy into an IPv6 packet.
855 srv6_ad6_flow_rewrite_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
858 ip6_sr_main_t *srm = &sr_main;
859 srv6_ad_flow_main_t *sm = &srv6_ad_flow_main;
860 u32 n_left_from, next_index, *from, *to_next;
863 from = vlib_frame_vector_args (frame);
864 n_left_from = frame->n_vectors;
865 next_index = node->cached_next_index;
867 while (n_left_from > 0)
871 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
873 /* TODO: Dual/quad loop */
875 while (n_left_from > 0 && n_left_to_next > 0)
879 ip6_header_t *ip0 = 0, *ip0_encap = 0;
880 ip6_sr_localsid_t *ls0;
881 srv6_ad_flow_localsid_t *ls0_mem;
882 srv6_ad_flow_entry_t *s0;
883 u32 next0 = SRV6_AD_FLOW_REWRITE_NEXT_LOOKUP;
893 b0 = vlib_get_buffer (vm, bi0);
894 ip0_encap = vlib_buffer_get_current (b0);
895 ls0 = pool_elt_at_index (
897 sm->sw_iface_localsid6[vnet_buffer (b0)->sw_if_index[VLIB_RX]]);
898 ls0_mem = ls0->plugin_mem;
900 if (PREDICT_FALSE (ls0_mem == NULL))
902 next0 = SRV6_AD_FLOW_REWRITE_NEXT_ERROR;
903 b0->error = node->errors[SRV6_AD_FLOW_REWRITE_COUNTER_NO_RW];
907 /* ############################################# */
908 clib_bihash_kv_40_8_t kv0, value0;
910 /* Compute flow hash */
912 if (PREDICT_TRUE (ip0_encap->protocol == IP_PROTOCOL_UDP ||
913 ip0_encap->protocol == IP_PROTOCOL_TCP))
915 udp_header_t *udp0 = (udp_header_t *) (ip0_encap + 1);
917 ((u64) udp0->src_port << 16) | ((u64) udp0->dst_port);
920 kv0.key[0] = ip0_encap->src_address.as_u64[0];
921 kv0.key[1] = ip0_encap->src_address.as_u64[1];
922 kv0.key[2] = ip0_encap->dst_address.as_u64[0];
923 kv0.key[3] = ip0_encap->dst_address.as_u64[1];
926 /* Lookup flow in hashtable */
927 if (clib_bihash_search_40_8 (&ls0_mem->ftable, &kv0, &value0))
930 next0 = SRV6_AD_FLOW_REWRITE_NEXT_ERROR;
931 b0->error = node->errors[SRV6_AD_FLOW_REWRITE_COUNTER_NO_RW];
936 s0 = pool_elt_at_index (
937 ls0_mem->cache, ad_flow_value_get_session_index (&value0));
940 ASSERT (VLIB_BUFFER_PRE_DATA_SIZE >=
941 (s0->rw_len + b0->current_data));
943 clib_memcpy_fast (((u8 *) ip0_encap) - s0->rw_len,
944 s0->rw_data, s0->rw_len);
945 vlib_buffer_advance (b0, -(word) s0->rw_len);
947 ip0 = vlib_buffer_get_current (b0);
949 /* Update inner IPv6 hop limit */
950 ip0_encap->hop_limit -= 1;
952 /* Update outer IPv6 length (in case it has changed) */
953 new_l0 = s0->rw_len +
954 clib_net_to_host_u16 (ip0_encap->payload_length);
955 ip0->payload_length = clib_host_to_net_u16 (new_l0);
959 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
960 PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
962 srv6_ad_flow_rewrite_trace_t *tr =
963 vlib_add_trace (vm, node, b0, sizeof *tr);
966 if (next0 == SRV6_AD_FLOW_REWRITE_NEXT_ERROR)
972 clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
973 sizeof tr->src.as_u8);
974 clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
975 sizeof tr->dst.as_u8);
979 /* Increment per-SID AD rewrite counters */
980 vlib_increment_combined_counter (
981 ((next0 == SRV6_AD_FLOW_REWRITE_NEXT_ERROR) ?
982 &(sm->rw_invalid_counters) :
983 &(sm->rw_valid_counters)),
984 vm->thread_index, ls0_mem->index, 1,
985 vlib_buffer_length_in_chain (vm, b0));
987 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
988 n_left_to_next, bi0, next0);
993 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
996 /* Update counters */
997 vlib_node_increment_counter (vm, srv6_ad6_flow_rewrite_node.index,
998 SRV6_AD_FLOW_REWRITE_COUNTER_PROCESSED,
1001 return frame->n_vectors;
1004 VLIB_REGISTER_NODE (srv6_ad6_flow_rewrite_node) = {
1005 .function = srv6_ad6_flow_rewrite_fn,
1006 .name = "srv6-ad6-flow-rewrite",
1007 .vector_size = sizeof (u32),
1008 .format_trace = format_srv6_ad_flow_rewrite_trace,
1009 .type = VLIB_NODE_TYPE_INTERNAL,
1010 .n_errors = SRV6_AD_FLOW_REWRITE_N_COUNTERS,
1011 .error_strings = srv6_ad_flow_rewrite_counter_strings,
1012 .n_next_nodes = SRV6_AD_FLOW_REWRITE_N_NEXT,
1014 [SRV6_AD_FLOW_REWRITE_NEXT_LOOKUP] = "ip6-lookup",
1015 [SRV6_AD_FLOW_REWRITE_NEXT_ERROR] = "error-drop",
1020 * fd.io coding-style-patch-verification: ON
1023 * eval: (c-set-style "gnu")