2 * sr_policy_rewrite.c: ipv6 sr policy creation
4 * Copyright (c) 2016 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
20 * @brief SR policy creation and application
22 * Create an SR policy.
23 * An SR policy can be either of 'default' type or 'spray' type
24 * An SR policy has attached a list of SID lists.
25 * In case the SR policy is a default one it will load balance among them.
26 * An SR policy has associated a BindingSID.
27 * In case any packet arrives with IPv6 DA == BindingSID then the SR policy
28 * associated to such bindingSID will be applied to such packet.
30 * SR policies can be applied either by using IPv6 encapsulation or
31 * SRH insertion. Both methods can be found on this file.
33 * Traffic input usually is IPv6 packets. However it is possible to have
34 * IPv4 packets or L2 frames. (that are encapsulated into IPv6 with SRH)
36 * This file provides the appropiates VPP graph nodes to do any of these
41 #include <vlib/vlib.h>
42 #include <vnet/vnet.h>
43 #include <vnet/srv6/sr.h>
44 #include <vnet/ip/ip.h>
45 #include <vnet/srv6/sr_packet.h>
46 #include <vnet/ip/ip6_packet.h>
47 #include <vnet/fib/ip6_fib.h>
48 #include <vnet/dpo/dpo.h>
49 #include <vnet/dpo/replicate_dpo.h>
51 #include <vppinfra/error.h>
52 #include <vppinfra/elog.h>
55 * @brief SR policy rewrite trace
59 ip6_address_t src, dst;
60 } sr_policy_rewrite_trace_t;
63 #define foreach_sr_policy_rewrite_next \
64 _(IP6_LOOKUP, "ip6-lookup") \
65 _(ERROR, "error-drop")
69 #define _(s,n) SR_POLICY_REWRITE_NEXT_##s,
70 foreach_sr_policy_rewrite_next
72 SR_POLICY_REWRITE_N_NEXT,
73 } sr_policy_rewrite_next_t;
75 /* SR rewrite errors */
76 #define foreach_sr_policy_rewrite_error \
77 _(INTERNAL_ERROR, "Segment Routing undefined error") \
78 _(BSID_ZERO, "BSID with SL = 0") \
79 _(COUNTER_TOTAL, "SR steered IPv6 packets") \
80 _(COUNTER_ENCAP, "SR: Encaps packets") \
81 _(COUNTER_INSERT, "SR: SRH inserted packets") \
82 _(COUNTER_BSID, "SR: BindingSID steered packets")
86 #define _(sym,str) SR_POLICY_REWRITE_ERROR_##sym,
87 foreach_sr_policy_rewrite_error
89 SR_POLICY_REWRITE_N_ERROR,
90 } sr_policy_rewrite_error_t;
92 static char *sr_policy_rewrite_error_strings[] = {
93 #define _(sym,string) string,
94 foreach_sr_policy_rewrite_error
99 * @brief Dynamically added SR SL DPO type
101 static dpo_type_t sr_pr_encaps_dpo_type;
102 static dpo_type_t sr_pr_insert_dpo_type;
103 static dpo_type_t sr_pr_bsid_encaps_dpo_type;
104 static dpo_type_t sr_pr_bsid_insert_dpo_type;
107 * @brief IPv6 SA for encapsulated packets
109 static ip6_address_t sr_pr_encaps_src;
111 /******************* SR rewrite set encaps IPv6 source addr *******************/
112 /* Note: This is temporal. We don't know whether to follow this path or
113 take the ip address of a loopback interface or even the OIF */
116 sr_set_source (ip6_address_t * address)
118 clib_memcpy_fast (&sr_pr_encaps_src, address, sizeof (sr_pr_encaps_src));
121 static clib_error_t *
122 set_sr_src_command_fn (vlib_main_t * vm, unformat_input_t * input,
123 vlib_cli_command_t * cmd)
125 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
128 (input, "addr %U", unformat_ip6_address, &sr_pr_encaps_src))
131 return clib_error_return (0, "No address specified");
133 return clib_error_return (0, "No address specified");
137 VLIB_CLI_COMMAND (set_sr_src_command, static) = {
138 .path = "set sr encaps source",
139 .short_help = "set sr encaps source addr <ip6_addr>",
140 .function = set_sr_src_command_fn,
144 /*********************** SR rewrite string computation ************************/
146 * @brief SR rewrite string computation for IPv6 encapsulation (inline)
148 * @param sl is a vector of IPv6 addresses composing the Segment List
150 * @return precomputed rewrite string for encapsulation
153 compute_rewrite_encaps (ip6_address_t * sl)
156 ip6_sr_header_t *srh;
157 ip6_address_t *addrp, *this_address;
158 u32 header_length = 0;
162 header_length += IPv6_DEFAULT_HEADER_LENGTH;
163 if (vec_len (sl) > 1)
165 header_length += sizeof (ip6_sr_header_t);
166 header_length += vec_len (sl) * sizeof (ip6_address_t);
169 vec_validate (rs, header_length - 1);
171 iph = (ip6_header_t *) rs;
172 iph->ip_version_traffic_class_and_flow_label =
173 clib_host_to_net_u32 (0 | ((6 & 0xF) << 28));
174 iph->src_address.as_u64[0] = sr_pr_encaps_src.as_u64[0];
175 iph->src_address.as_u64[1] = sr_pr_encaps_src.as_u64[1];
176 iph->payload_length = header_length - IPv6_DEFAULT_HEADER_LENGTH;
177 iph->protocol = IP_PROTOCOL_IPV6;
178 iph->hop_limit = IPv6_DEFAULT_HOP_LIMIT;
180 if (vec_len (sl) > 1)
182 srh = (ip6_sr_header_t *) (iph + 1);
183 iph->protocol = IP_PROTOCOL_IPV6_ROUTE;
184 srh->protocol = IP_PROTOCOL_IPV6;
185 srh->type = ROUTING_HEADER_TYPE_SR;
186 srh->segments_left = vec_len (sl) - 1;
187 srh->first_segment = vec_len (sl) - 1;
188 srh->length = ((sizeof (ip6_sr_header_t) +
189 (vec_len (sl) * sizeof (ip6_address_t))) / 8) - 1;
191 srh->reserved = 0x00;
192 addrp = srh->segments + vec_len (sl) - 1;
193 vec_foreach (this_address, sl)
195 clib_memcpy_fast (addrp->as_u8, this_address->as_u8,
196 sizeof (ip6_address_t));
200 iph->dst_address.as_u64[0] = sl->as_u64[0];
201 iph->dst_address.as_u64[1] = sl->as_u64[1];
206 * @brief SR rewrite string computation for SRH insertion (inline)
208 * @param sl is a vector of IPv6 addresses composing the Segment List
210 * @return precomputed rewrite string for SRH insertion
213 compute_rewrite_insert (ip6_address_t * sl)
215 ip6_sr_header_t *srh;
216 ip6_address_t *addrp, *this_address;
217 u32 header_length = 0;
221 header_length += sizeof (ip6_sr_header_t);
222 header_length += (vec_len (sl) + 1) * sizeof (ip6_address_t);
224 vec_validate (rs, header_length - 1);
226 srh = (ip6_sr_header_t *) rs;
227 srh->type = ROUTING_HEADER_TYPE_SR;
228 srh->segments_left = vec_len (sl);
229 srh->first_segment = vec_len (sl);
230 srh->length = ((sizeof (ip6_sr_header_t) +
231 ((vec_len (sl) + 1) * sizeof (ip6_address_t))) / 8) - 1;
233 srh->reserved = 0x0000;
234 addrp = srh->segments + vec_len (sl);
235 vec_foreach (this_address, sl)
237 clib_memcpy_fast (addrp->as_u8, this_address->as_u8,
238 sizeof (ip6_address_t));
245 * @brief SR rewrite string computation for SRH insertion with BSID (inline)
247 * @param sl is a vector of IPv6 addresses composing the Segment List
249 * @return precomputed rewrite string for SRH insertion with BSID
252 compute_rewrite_bsid (ip6_address_t * sl)
254 ip6_sr_header_t *srh;
255 ip6_address_t *addrp, *this_address;
256 u32 header_length = 0;
260 header_length += sizeof (ip6_sr_header_t);
261 header_length += vec_len (sl) * sizeof (ip6_address_t);
263 vec_validate (rs, header_length - 1);
265 srh = (ip6_sr_header_t *) rs;
266 srh->type = ROUTING_HEADER_TYPE_SR;
267 srh->segments_left = vec_len (sl) - 1;
268 srh->first_segment = vec_len (sl) - 1;
269 srh->length = ((sizeof (ip6_sr_header_t) +
270 (vec_len (sl) * sizeof (ip6_address_t))) / 8) - 1;
272 srh->reserved = 0x0000;
273 addrp = srh->segments + vec_len (sl) - 1;
274 vec_foreach (this_address, sl)
276 clib_memcpy_fast (addrp->as_u8, this_address->as_u8,
277 sizeof (ip6_address_t));
283 /*************************** SR LB helper functions **************************/
285 * @brief Creates a Segment List and adds it to an SR policy
287 * Creates a Segment List and adds it to the SR policy. Notice that the SL are
288 * not necessarily unique. Hence there might be two Segment List within the
289 * same SR Policy with exactly the same segments and same weight.
291 * @param sr_policy is the SR policy where the SL will be added
292 * @param sl is a vector of IPv6 addresses composing the Segment List
293 * @param weight is the weight of the SegmentList (for load-balancing purposes)
294 * @param is_encap represents the mode (SRH insertion vs Encapsulation)
296 * @return pointer to the just created segment list
298 static inline ip6_sr_sl_t *
299 create_sl (ip6_sr_policy_t * sr_policy, ip6_address_t * sl, u32 weight,
302 ip6_sr_main_t *sm = &sr_main;
303 ip6_sr_sl_t *segment_list;
305 pool_get (sm->sid_lists, segment_list);
306 clib_memset (segment_list, 0, sizeof (*segment_list));
308 vec_add1 (sr_policy->segments_lists, segment_list - sm->sid_lists);
310 /* Fill in segment list */
311 segment_list->weight =
312 (weight != (u32) ~ 0 ? weight : SR_SEGMENT_LIST_WEIGHT_DEFAULT);
313 segment_list->segments = vec_dup (sl);
317 segment_list->rewrite = compute_rewrite_encaps (sl);
318 segment_list->rewrite_bsid = segment_list->rewrite;
322 segment_list->rewrite = compute_rewrite_insert (sl);
323 segment_list->rewrite_bsid = compute_rewrite_bsid (sl);
327 dpo_reset (&segment_list->bsid_dpo);
328 dpo_reset (&segment_list->ip6_dpo);
329 dpo_reset (&segment_list->ip4_dpo);
333 dpo_set (&segment_list->ip6_dpo, sr_pr_encaps_dpo_type, DPO_PROTO_IP6,
334 segment_list - sm->sid_lists);
335 dpo_set (&segment_list->ip4_dpo, sr_pr_encaps_dpo_type, DPO_PROTO_IP4,
336 segment_list - sm->sid_lists);
337 dpo_set (&segment_list->bsid_dpo, sr_pr_bsid_encaps_dpo_type,
338 DPO_PROTO_IP6, segment_list - sm->sid_lists);
342 dpo_set (&segment_list->ip6_dpo, sr_pr_insert_dpo_type, DPO_PROTO_IP6,
343 segment_list - sm->sid_lists);
344 dpo_set (&segment_list->bsid_dpo, sr_pr_bsid_insert_dpo_type,
345 DPO_PROTO_IP6, segment_list - sm->sid_lists);
352 * @brief Updates the Load Balancer after an SR Policy change
354 * @param sr_policy is the modified SR Policy
357 update_lb (ip6_sr_policy_t * sr_policy)
359 flow_hash_config_t fhc;
361 ip6_sr_sl_t *segment_list;
362 ip6_sr_main_t *sm = &sr_main;
363 load_balance_path_t path;
364 path.path_index = FIB_NODE_INDEX_INVALID;
365 load_balance_path_t *ip4_path_vector = 0;
366 load_balance_path_t *ip6_path_vector = 0;
367 load_balance_path_t *b_path_vector = 0;
369 /* In case LB does not exist, create it */
370 if (!dpo_id_is_valid (&sr_policy->bsid_dpo))
373 .fp_proto = FIB_PROTOCOL_IP6,
376 .ip6 = sr_policy->bsid,
380 /* Add FIB entry for BSID */
381 fhc = fib_table_get_flow_hash_config (sr_policy->fib_table,
384 dpo_set (&sr_policy->bsid_dpo, DPO_LOAD_BALANCE, DPO_PROTO_IP6,
385 load_balance_create (0, DPO_PROTO_IP6, fhc));
387 dpo_set (&sr_policy->ip6_dpo, DPO_LOAD_BALANCE, DPO_PROTO_IP6,
388 load_balance_create (0, DPO_PROTO_IP6, fhc));
390 /* Update FIB entry's to point to the LB DPO in the main FIB and hidden one */
391 fib_table_entry_special_dpo_update (fib_table_find (FIB_PROTOCOL_IP6,
392 sr_policy->fib_table),
394 FIB_ENTRY_FLAG_EXCLUSIVE,
395 &sr_policy->bsid_dpo);
397 fib_table_entry_special_dpo_update (sm->fib_table_ip6,
400 FIB_ENTRY_FLAG_EXCLUSIVE,
401 &sr_policy->ip6_dpo);
403 if (sr_policy->is_encap)
405 dpo_set (&sr_policy->ip4_dpo, DPO_LOAD_BALANCE, DPO_PROTO_IP4,
406 load_balance_create (0, DPO_PROTO_IP4, fhc));
408 fib_table_entry_special_dpo_update (sm->fib_table_ip4,
411 FIB_ENTRY_FLAG_EXCLUSIVE,
412 &sr_policy->ip4_dpo);
417 /* Create the LB path vector */
418 //path_vector = vec_new(load_balance_path_t, vec_len(sr_policy->segments_lists));
419 vec_foreach (sl_index, sr_policy->segments_lists)
421 segment_list = pool_elt_at_index (sm->sid_lists, *sl_index);
422 path.path_dpo = segment_list->bsid_dpo;
423 path.path_weight = segment_list->weight;
424 vec_add1 (b_path_vector, path);
425 path.path_dpo = segment_list->ip6_dpo;
426 vec_add1 (ip6_path_vector, path);
427 if (sr_policy->is_encap)
429 path.path_dpo = segment_list->ip4_dpo;
430 vec_add1 (ip4_path_vector, path);
434 /* Update LB multipath */
435 load_balance_multipath_update (&sr_policy->bsid_dpo, b_path_vector,
436 LOAD_BALANCE_FLAG_NONE);
437 load_balance_multipath_update (&sr_policy->ip6_dpo, ip6_path_vector,
438 LOAD_BALANCE_FLAG_NONE);
439 if (sr_policy->is_encap)
440 load_balance_multipath_update (&sr_policy->ip4_dpo, ip4_path_vector,
441 LOAD_BALANCE_FLAG_NONE);
444 vec_free (b_path_vector);
445 vec_free (ip6_path_vector);
446 vec_free (ip4_path_vector);
451 * @brief Updates the Replicate DPO after an SR Policy change
453 * @param sr_policy is the modified SR Policy (type spray)
456 update_replicate (ip6_sr_policy_t * sr_policy)
459 ip6_sr_sl_t *segment_list;
460 ip6_sr_main_t *sm = &sr_main;
461 load_balance_path_t path;
462 path.path_index = FIB_NODE_INDEX_INVALID;
463 load_balance_path_t *b_path_vector = 0;
464 load_balance_path_t *ip6_path_vector = 0;
465 load_balance_path_t *ip4_path_vector = 0;
467 /* In case LB does not exist, create it */
468 if (!dpo_id_is_valid (&sr_policy->bsid_dpo))
470 dpo_set (&sr_policy->bsid_dpo, DPO_REPLICATE,
471 DPO_PROTO_IP6, replicate_create (0, DPO_PROTO_IP6));
473 dpo_set (&sr_policy->ip6_dpo, DPO_REPLICATE,
474 DPO_PROTO_IP6, replicate_create (0, DPO_PROTO_IP6));
476 /* Update FIB entry's DPO to point to SR without LB */
478 .fp_proto = FIB_PROTOCOL_IP6,
481 .ip6 = sr_policy->bsid,
484 fib_table_entry_special_dpo_update (fib_table_find (FIB_PROTOCOL_IP6,
485 sr_policy->fib_table),
487 FIB_ENTRY_FLAG_EXCLUSIVE,
488 &sr_policy->bsid_dpo);
490 fib_table_entry_special_dpo_update (sm->fib_table_ip6,
493 FIB_ENTRY_FLAG_EXCLUSIVE,
494 &sr_policy->ip6_dpo);
496 if (sr_policy->is_encap)
498 dpo_set (&sr_policy->ip4_dpo, DPO_REPLICATE, DPO_PROTO_IP4,
499 replicate_create (0, DPO_PROTO_IP4));
501 fib_table_entry_special_dpo_update (sm->fib_table_ip4,
504 FIB_ENTRY_FLAG_EXCLUSIVE,
505 &sr_policy->ip4_dpo);
510 /* Create the replicate path vector */
511 path.path_weight = 1;
512 vec_foreach (sl_index, sr_policy->segments_lists)
514 segment_list = pool_elt_at_index (sm->sid_lists, *sl_index);
515 path.path_dpo = segment_list->bsid_dpo;
516 vec_add1 (b_path_vector, path);
517 path.path_dpo = segment_list->ip6_dpo;
518 vec_add1 (ip6_path_vector, path);
519 if (sr_policy->is_encap)
521 path.path_dpo = segment_list->ip4_dpo;
522 vec_add1 (ip4_path_vector, path);
526 /* Update replicate multipath */
527 replicate_multipath_update (&sr_policy->bsid_dpo, b_path_vector);
528 replicate_multipath_update (&sr_policy->ip6_dpo, ip6_path_vector);
529 if (sr_policy->is_encap)
530 replicate_multipath_update (&sr_policy->ip4_dpo, ip4_path_vector);
533 /******************************* SR rewrite API *******************************/
534 /* Three functions for handling sr policies:
538 * All of them are API. CLI function on sr_policy_command_fn */
541 * @brief Create a new SR policy
543 * @param bsid is the bindingSID of the SR Policy
544 * @param segments is a vector of IPv6 address composing the segment list
545 * @param weight is the weight of the sid list. optional.
546 * @param behavior is the behavior of the SR policy. (default//spray)
547 * @param fib_table is the VRF where to install the FIB entry for the BSID
548 * @param is_encap (bool) whether SR policy should behave as Encap/SRH Insertion
550 * @return 0 if correct, else error
553 sr_policy_add (ip6_address_t * bsid, ip6_address_t * segments,
554 u32 weight, u8 behavior, u32 fib_table, u8 is_encap)
556 ip6_sr_main_t *sm = &sr_main;
557 ip6_sr_policy_t *sr_policy = 0;
560 /* Search for existing keys (BSID) */
561 p = mhash_get (&sm->sr_policies_index_hash, bsid);
564 /* Add SR policy that already exists; complain */
568 /* Search collision in FIB entries */
569 /* Explanation: It might be possible that some other entity has already
570 * created a route for the BSID. This in theory is impossible, but in
571 * practise we could see it. Assert it and scream if needed */
573 .fp_proto = FIB_PROTOCOL_IP6,
580 /* Lookup the FIB index associated to the table selected */
581 u32 fib_index = fib_table_find (FIB_PROTOCOL_IP6,
582 (fib_table != (u32) ~ 0 ? fib_table : 0));
586 /* Lookup whether there exists an entry for the BSID */
587 fib_node_index_t fei = fib_table_lookup_exact_match (fib_index, &pfx);
588 if (FIB_NODE_INDEX_INVALID != fei)
589 return -12; //There is an entry for such lookup
591 /* Add an SR policy object */
592 pool_get (sm->sr_policies, sr_policy);
593 clib_memset (sr_policy, 0, sizeof (*sr_policy));
594 clib_memcpy_fast (&sr_policy->bsid, bsid, sizeof (ip6_address_t));
595 sr_policy->type = behavior;
596 sr_policy->fib_table = (fib_table != (u32) ~ 0 ? fib_table : 0); //Is default FIB 0 ?
597 sr_policy->is_encap = is_encap;
600 mhash_set (&sm->sr_policies_index_hash, bsid, sr_policy - sm->sr_policies,
603 /* Create a segment list and add the index to the SR policy */
604 create_sl (sr_policy, segments, weight, is_encap);
606 /* If FIB doesnt exist, create them */
607 if (sm->fib_table_ip6 == (u32) ~ 0)
609 sm->fib_table_ip6 = fib_table_create_and_lock (FIB_PROTOCOL_IP6,
611 "SRv6 steering of IP6 prefixes through BSIDs");
612 sm->fib_table_ip4 = fib_table_create_and_lock (FIB_PROTOCOL_IP6,
614 "SRv6 steering of IP4 prefixes through BSIDs");
617 /* Create IPv6 FIB for the BindingSID attached to the DPO of the only SL */
618 if (sr_policy->type == SR_POLICY_TYPE_DEFAULT)
619 update_lb (sr_policy);
620 else if (sr_policy->type == SR_POLICY_TYPE_SPRAY)
621 update_replicate (sr_policy);
626 * @brief Delete a SR policy
628 * @param bsid is the bindingSID of the SR Policy
629 * @param index is the index of the SR policy
631 * @return 0 if correct, else error
634 sr_policy_del (ip6_address_t * bsid, u32 index)
636 ip6_sr_main_t *sm = &sr_main;
637 ip6_sr_policy_t *sr_policy = 0;
638 ip6_sr_sl_t *segment_list;
644 p = mhash_get (&sm->sr_policies_index_hash, bsid);
646 sr_policy = pool_elt_at_index (sm->sr_policies, p[0]);
652 sr_policy = pool_elt_at_index (sm->sr_policies, index);
657 /* Remove BindingSID FIB entry */
659 .fp_proto = FIB_PROTOCOL_IP6,
662 .ip6 = sr_policy->bsid,
667 fib_table_entry_special_remove (fib_table_find (FIB_PROTOCOL_IP6,
668 sr_policy->fib_table),
669 &pfx, FIB_SOURCE_SR);
671 fib_table_entry_special_remove (sm->fib_table_ip6, &pfx, FIB_SOURCE_SR);
673 if (sr_policy->is_encap)
674 fib_table_entry_special_remove (sm->fib_table_ip4, &pfx, FIB_SOURCE_SR);
676 if (dpo_id_is_valid (&sr_policy->bsid_dpo))
678 dpo_reset (&sr_policy->bsid_dpo);
679 dpo_reset (&sr_policy->ip4_dpo);
680 dpo_reset (&sr_policy->ip6_dpo);
683 /* Clean SID Lists */
684 vec_foreach (sl_index, sr_policy->segments_lists)
686 segment_list = pool_elt_at_index (sm->sid_lists, *sl_index);
687 vec_free (segment_list->segments);
688 vec_free (segment_list->rewrite);
689 if (!sr_policy->is_encap)
690 vec_free (segment_list->rewrite_bsid);
691 pool_put_index (sm->sid_lists, *sl_index);
694 /* Remove SR policy entry */
695 mhash_unset (&sm->sr_policies_index_hash, &sr_policy->bsid, NULL);
696 pool_put (sm->sr_policies, sr_policy);
698 /* If FIB empty unlock it */
699 if (!pool_elts (sm->sr_policies) && !pool_elts (sm->steer_policies))
701 fib_table_unlock (sm->fib_table_ip6, FIB_PROTOCOL_IP6, FIB_SOURCE_SR);
702 fib_table_unlock (sm->fib_table_ip4, FIB_PROTOCOL_IP6, FIB_SOURCE_SR);
703 sm->fib_table_ip6 = (u32) ~ 0;
704 sm->fib_table_ip4 = (u32) ~ 0;
711 * @brief Modify an existing SR policy
713 * The possible modifications are adding a new Segment List, modifying an
714 * existing Segment List (modify the weight only) and delete a given
715 * Segment List from the SR Policy.
717 * @param bsid is the bindingSID of the SR Policy
718 * @param index is the index of the SR policy
719 * @param fib_table is the VRF where to install the FIB entry for the BSID
720 * @param operation is the operation to perform (among the top ones)
721 * @param segments is a vector of IPv6 address composing the segment list
722 * @param sl_index is the index of the Segment List to modify/delete
723 * @param weight is the weight of the sid list. optional.
724 * @param is_encap Mode. Encapsulation or SRH insertion.
726 * @return 0 if correct, else error
729 sr_policy_mod (ip6_address_t * bsid, u32 index, u32 fib_table,
730 u8 operation, ip6_address_t * segments, u32 sl_index,
733 ip6_sr_main_t *sm = &sr_main;
734 ip6_sr_policy_t *sr_policy = 0;
735 ip6_sr_sl_t *segment_list;
736 u32 *sl_index_iterate;
741 p = mhash_get (&sm->sr_policies_index_hash, bsid);
743 sr_policy = pool_elt_at_index (sm->sr_policies, p[0]);
749 sr_policy = pool_elt_at_index (sm->sr_policies, index);
754 if (operation == 1) /* Add SR List to an existing SR policy */
756 /* Create the new SL */
758 create_sl (sr_policy, segments, weight, sr_policy->is_encap);
760 /* Create a new LB DPO */
761 if (sr_policy->type == SR_POLICY_TYPE_DEFAULT)
762 update_lb (sr_policy);
763 else if (sr_policy->type == SR_POLICY_TYPE_SPRAY)
764 update_replicate (sr_policy);
766 else if (operation == 2) /* Delete SR List from an existing SR policy */
768 /* Check that currently there are more than one SID list */
769 if (vec_len (sr_policy->segments_lists) == 1)
772 /* Check that the SR list does exist and is assigned to the sr policy */
773 vec_foreach (sl_index_iterate, sr_policy->segments_lists)
774 if (*sl_index_iterate == sl_index)
777 if (*sl_index_iterate != sl_index)
780 /* Remove the lucky SR list that is being kicked out */
781 segment_list = pool_elt_at_index (sm->sid_lists, sl_index);
782 vec_free (segment_list->segments);
783 vec_free (segment_list->rewrite);
784 if (!sr_policy->is_encap)
785 vec_free (segment_list->rewrite_bsid);
786 pool_put_index (sm->sid_lists, sl_index);
787 vec_del1 (sr_policy->segments_lists,
788 sl_index_iterate - sr_policy->segments_lists);
790 /* Create a new LB DPO */
791 if (sr_policy->type == SR_POLICY_TYPE_DEFAULT)
792 update_lb (sr_policy);
793 else if (sr_policy->type == SR_POLICY_TYPE_SPRAY)
794 update_replicate (sr_policy);
796 else if (operation == 3) /* Modify the weight of an existing SR List */
798 /* Find the corresponding SL */
799 vec_foreach (sl_index_iterate, sr_policy->segments_lists)
800 if (*sl_index_iterate == sl_index)
803 if (*sl_index_iterate != sl_index)
806 /* Change the weight */
807 segment_list = pool_elt_at_index (sm->sid_lists, sl_index);
808 segment_list->weight = weight;
811 if (sr_policy->type == SR_POLICY_TYPE_DEFAULT)
812 update_lb (sr_policy);
814 else /* Incorrect op. */
821 * @brief CLI for 'sr policies' command family
823 static clib_error_t *
824 sr_policy_command_fn (vlib_main_t * vm, unformat_input_t * input,
825 vlib_cli_command_t * cmd)
828 char is_del = 0, is_add = 0, is_mod = 0;
830 ip6_address_t bsid, next_address;
831 u32 sr_policy_index = (u32) ~ 0, sl_index = (u32) ~ 0;
832 u32 weight = (u32) ~ 0, fib_table = (u32) ~ 0;
833 ip6_address_t *segments = 0, *this_seg;
838 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
840 if (!is_add && !is_mod && !is_del && unformat (input, "add"))
842 else if (!is_add && !is_mod && !is_del && unformat (input, "del"))
844 else if (!is_add && !is_mod && !is_del && unformat (input, "mod"))
847 && unformat (input, "bsid %U", unformat_ip6_address, &bsid))
849 else if (!is_add && !policy_set
850 && unformat (input, "index %d", &sr_policy_index))
852 else if (unformat (input, "weight %d", &weight));
854 if (unformat (input, "next %U", unformat_ip6_address, &next_address))
856 vec_add2 (segments, this_seg, 1);
857 clib_memcpy_fast (this_seg->as_u8, next_address.as_u8,
860 else if (unformat (input, "add sl"))
862 else if (unformat (input, "del sl index %d", &sl_index))
864 else if (unformat (input, "mod sl index %d", &sl_index))
866 else if (fib_table == (u32) ~ 0
867 && unformat (input, "fib-table %d", &fib_table));
868 else if (unformat (input, "encap"))
870 else if (unformat (input, "insert"))
872 else if (unformat (input, "spray"))
878 if (!is_add && !is_mod && !is_del)
879 return clib_error_return (0, "Incorrect CLI");
882 return clib_error_return (0, "No SR policy BSID or index specified");
886 if (vec_len (segments) == 0)
887 return clib_error_return (0, "No Segment List specified");
888 rv = sr_policy_add (&bsid, segments, weight,
889 (is_spray ? SR_POLICY_TYPE_SPRAY :
890 SR_POLICY_TYPE_DEFAULT), fib_table, is_encap);
894 rv = sr_policy_del ((sr_policy_index != (u32) ~ 0 ? NULL : &bsid),
899 return clib_error_return (0, "No SL modification specified");
900 if (operation != 1 && sl_index == (u32) ~ 0)
901 return clib_error_return (0, "No Segment List index specified");
902 if (operation == 1 && vec_len (segments) == 0)
903 return clib_error_return (0, "No Segment List specified");
904 if (operation == 3 && weight == (u32) ~ 0)
905 return clib_error_return (0, "No new weight for the SL specified");
906 rv = sr_policy_mod ((sr_policy_index != (u32) ~ 0 ? NULL : &bsid),
907 sr_policy_index, fib_table, operation, segments,
919 return clib_error_return (0,
920 "There is already a FIB entry for the BindingSID address.\n"
921 "The SR policy could not be created.");
923 return clib_error_return (0, "The specified FIB table does not exist.");
925 return clib_error_return (0,
926 "The selected SR policy only contains ONE segment list. "
927 "Please remove the SR policy instead");
929 return clib_error_return (0,
930 "Could not delete the segment list. "
931 "It is not associated with that SR policy.");
933 return clib_error_return (0,
934 "Could not modify the segment list. "
935 "The given SL is not associated with such SR policy.");
937 return clib_error_return (0, "BUG: sr policy returns %d", rv);
943 VLIB_CLI_COMMAND (sr_policy_command, static) = {
945 .short_help = "sr policy [add||del||mod] [bsid 2001::1||index 5] "
946 "next A:: next B:: next C:: (weight 1) (fib-table 2) (encap|insert)",
948 "Manipulation of SR policies.\n"
949 "A Segment Routing policy may contain several SID lists. Each SID list has\n"
950 "an associated weight (default 1), which will result in wECMP (uECMP).\n"
951 "Segment Routing policies might be of type encapsulation or srh insertion\n"
952 "Each SR policy will be associated with a unique BindingSID.\n"
953 "A BindingSID is a locally allocated SegmentID. For every packet that arrives\n"
954 "with IPv6_DA:BSID such traffic will be steered into the SR policy.\n"
955 "The add command will create a SR policy with its first segment list (sl)\n"
956 "The mod command allows you to add, remove, or modify the existing segment lists\n"
957 "within an SR policy.\n"
958 "The del command allows you to delete a SR policy along with all its associated\n"
960 .function = sr_policy_command_fn,
965 * @brief CLI to display onscreen all the SR policies
967 static clib_error_t *
968 show_sr_policies_command_fn (vlib_main_t * vm, unformat_input_t * input,
969 vlib_cli_command_t * cmd)
971 ip6_sr_main_t *sm = &sr_main;
973 ip6_sr_sl_t *segment_list = 0;
974 ip6_sr_policy_t *sr_policy = 0;
975 ip6_sr_policy_t **vec_policies = 0;
980 vlib_cli_output (vm, "SR policies:");
983 pool_foreach (sr_policy, sm->sr_policies,
984 {vec_add1 (vec_policies, sr_policy); } );
987 vec_foreach_index (i, vec_policies)
989 sr_policy = vec_policies[i];
990 vlib_cli_output (vm, "[%u].-\tBSID: %U",
991 (u32) (sr_policy - sm->sr_policies),
992 format_ip6_address, &sr_policy->bsid);
993 vlib_cli_output (vm, "\tBehavior: %s",
994 (sr_policy->is_encap ? "Encapsulation" :
996 vlib_cli_output (vm, "\tType: %s",
998 SR_POLICY_TYPE_DEFAULT ? "Default" : "Spray"));
999 vlib_cli_output (vm, "\tFIB table: %u",
1000 (sr_policy->fib_table !=
1001 (u32) ~ 0 ? sr_policy->fib_table : 0));
1002 vlib_cli_output (vm, "\tSegment Lists:");
1003 vec_foreach (sl_index, sr_policy->segments_lists)
1006 s = format (s, "\t[%u].- ", *sl_index);
1007 segment_list = pool_elt_at_index (sm->sid_lists, *sl_index);
1008 s = format (s, "< ");
1009 vec_foreach (addr, segment_list->segments)
1011 s = format (s, "%U, ", format_ip6_address, addr);
1013 s = format (s, "\b\b > ");
1014 s = format (s, "weight: %u", segment_list->weight);
1015 vlib_cli_output (vm, " %s", s);
1017 vlib_cli_output (vm, "-----------");
1023 VLIB_CLI_COMMAND (show_sr_policies_command, static) = {
1024 .path = "show sr policies",
1025 .short_help = "show sr policies",
1026 .function = show_sr_policies_command_fn,
1030 /*************************** SR rewrite graph node ****************************/
1032 * @brief Trace for the SR Policy Rewrite graph node
1035 format_sr_policy_rewrite_trace (u8 * s, va_list * args)
1038 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1039 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1040 sr_policy_rewrite_trace_t *t = va_arg (*args, sr_policy_rewrite_trace_t *);
1043 (s, "SR-policy-rewrite: src %U dst %U",
1044 format_ip6_address, &t->src, format_ip6_address, &t->dst);
1050 * @brief IPv6 encapsulation processing as per RFC2473
1052 static_always_inline void
1053 encaps_processing_v6 (vlib_node_runtime_t * node,
1055 ip6_header_t * ip0, ip6_header_t * ip0_encap)
1059 ip0_encap->hop_limit -= 1;
1061 ip0->payload_length + sizeof (ip6_header_t) +
1062 clib_net_to_host_u16 (ip0_encap->payload_length);
1063 ip0->payload_length = clib_host_to_net_u16 (new_l0);
1064 ip0->ip_version_traffic_class_and_flow_label =
1065 ip0_encap->ip_version_traffic_class_and_flow_label;
1069 * @brief Graph node for applying a SR policy into an IPv6 packet. Encapsulation
1072 sr_policy_rewrite_encaps (vlib_main_t * vm, vlib_node_runtime_t * node,
1073 vlib_frame_t * from_frame)
1075 ip6_sr_main_t *sm = &sr_main;
1076 u32 n_left_from, next_index, *from, *to_next;
1078 from = vlib_frame_vector_args (from_frame);
1079 n_left_from = from_frame->n_vectors;
1081 next_index = node->cached_next_index;
1083 int encap_pkts = 0, bsid_pkts = 0;
1085 while (n_left_from > 0)
1089 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1092 while (n_left_from >= 8 && n_left_to_next >= 4)
1094 u32 bi0, bi1, bi2, bi3;
1095 vlib_buffer_t *b0, *b1, *b2, *b3;
1096 u32 next0, next1, next2, next3;
1097 next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
1098 ip6_header_t *ip0, *ip1, *ip2, *ip3;
1099 ip6_header_t *ip0_encap, *ip1_encap, *ip2_encap, *ip3_encap;
1100 ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3;
1102 /* Prefetch next iteration. */
1104 vlib_buffer_t *p4, *p5, *p6, *p7;
1106 p4 = vlib_get_buffer (vm, from[4]);
1107 p5 = vlib_get_buffer (vm, from[5]);
1108 p6 = vlib_get_buffer (vm, from[6]);
1109 p7 = vlib_get_buffer (vm, from[7]);
1111 /* Prefetch the buffer header and packet for the N+2 loop iteration */
1112 vlib_prefetch_buffer_header (p4, LOAD);
1113 vlib_prefetch_buffer_header (p5, LOAD);
1114 vlib_prefetch_buffer_header (p6, LOAD);
1115 vlib_prefetch_buffer_header (p7, LOAD);
1117 CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
1118 CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
1119 CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE);
1120 CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE);
1123 to_next[0] = bi0 = from[0];
1124 to_next[1] = bi1 = from[1];
1125 to_next[2] = bi2 = from[2];
1126 to_next[3] = bi3 = from[3];
1130 n_left_to_next -= 4;
1132 b0 = vlib_get_buffer (vm, bi0);
1133 b1 = vlib_get_buffer (vm, bi1);
1134 b2 = vlib_get_buffer (vm, bi2);
1135 b3 = vlib_get_buffer (vm, bi3);
1138 pool_elt_at_index (sm->sid_lists,
1139 vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
1141 pool_elt_at_index (sm->sid_lists,
1142 vnet_buffer (b1)->ip.adj_index[VLIB_TX]);
1144 pool_elt_at_index (sm->sid_lists,
1145 vnet_buffer (b2)->ip.adj_index[VLIB_TX]);
1147 pool_elt_at_index (sm->sid_lists,
1148 vnet_buffer (b3)->ip.adj_index[VLIB_TX]);
1150 ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1151 vec_len (sl0->rewrite));
1152 ASSERT (b1->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1153 vec_len (sl1->rewrite));
1154 ASSERT (b2->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1155 vec_len (sl2->rewrite));
1156 ASSERT (b3->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1157 vec_len (sl3->rewrite));
1159 ip0_encap = vlib_buffer_get_current (b0);
1160 ip1_encap = vlib_buffer_get_current (b1);
1161 ip2_encap = vlib_buffer_get_current (b2);
1162 ip3_encap = vlib_buffer_get_current (b3);
1164 clib_memcpy_fast (((u8 *) ip0_encap) - vec_len (sl0->rewrite),
1165 sl0->rewrite, vec_len (sl0->rewrite));
1166 clib_memcpy_fast (((u8 *) ip1_encap) - vec_len (sl1->rewrite),
1167 sl1->rewrite, vec_len (sl1->rewrite));
1168 clib_memcpy_fast (((u8 *) ip2_encap) - vec_len (sl2->rewrite),
1169 sl2->rewrite, vec_len (sl2->rewrite));
1170 clib_memcpy_fast (((u8 *) ip3_encap) - vec_len (sl3->rewrite),
1171 sl3->rewrite, vec_len (sl3->rewrite));
1173 vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
1174 vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite));
1175 vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite));
1176 vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite));
1178 ip0 = vlib_buffer_get_current (b0);
1179 ip1 = vlib_buffer_get_current (b1);
1180 ip2 = vlib_buffer_get_current (b2);
1181 ip3 = vlib_buffer_get_current (b3);
1183 encaps_processing_v6 (node, b0, ip0, ip0_encap);
1184 encaps_processing_v6 (node, b1, ip1, ip1_encap);
1185 encaps_processing_v6 (node, b2, ip2, ip2_encap);
1186 encaps_processing_v6 (node, b3, ip3, ip3_encap);
1188 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
1190 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1192 sr_policy_rewrite_trace_t *tr =
1193 vlib_add_trace (vm, node, b0, sizeof (*tr));
1194 clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
1195 sizeof (tr->src.as_u8));
1196 clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
1197 sizeof (tr->dst.as_u8));
1200 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
1202 sr_policy_rewrite_trace_t *tr =
1203 vlib_add_trace (vm, node, b1, sizeof (*tr));
1204 clib_memcpy_fast (tr->src.as_u8, ip1->src_address.as_u8,
1205 sizeof (tr->src.as_u8));
1206 clib_memcpy_fast (tr->dst.as_u8, ip1->dst_address.as_u8,
1207 sizeof (tr->dst.as_u8));
1210 if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED))
1212 sr_policy_rewrite_trace_t *tr =
1213 vlib_add_trace (vm, node, b2, sizeof (*tr));
1214 clib_memcpy_fast (tr->src.as_u8, ip2->src_address.as_u8,
1215 sizeof (tr->src.as_u8));
1216 clib_memcpy_fast (tr->dst.as_u8, ip2->dst_address.as_u8,
1217 sizeof (tr->dst.as_u8));
1220 if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED))
1222 sr_policy_rewrite_trace_t *tr =
1223 vlib_add_trace (vm, node, b3, sizeof (*tr));
1224 clib_memcpy_fast (tr->src.as_u8, ip3->src_address.as_u8,
1225 sizeof (tr->src.as_u8));
1226 clib_memcpy_fast (tr->dst.as_u8, ip3->dst_address.as_u8,
1227 sizeof (tr->dst.as_u8));
1232 vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
1233 n_left_to_next, bi0, bi1, bi2, bi3,
1234 next0, next1, next2, next3);
1237 /* Single loop for potentially the last three packets */
1238 while (n_left_from > 0 && n_left_to_next > 0)
1242 ip6_header_t *ip0 = 0, *ip0_encap = 0;
1244 u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
1251 n_left_to_next -= 1;
1252 b0 = vlib_get_buffer (vm, bi0);
1255 pool_elt_at_index (sm->sid_lists,
1256 vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
1257 ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1258 vec_len (sl0->rewrite));
1260 ip0_encap = vlib_buffer_get_current (b0);
1262 clib_memcpy_fast (((u8 *) ip0_encap) - vec_len (sl0->rewrite),
1263 sl0->rewrite, vec_len (sl0->rewrite));
1264 vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
1266 ip0 = vlib_buffer_get_current (b0);
1268 encaps_processing_v6 (node, b0, ip0, ip0_encap);
1270 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
1271 PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1273 sr_policy_rewrite_trace_t *tr =
1274 vlib_add_trace (vm, node, b0, sizeof (*tr));
1275 clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
1276 sizeof (tr->src.as_u8));
1277 clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
1278 sizeof (tr->dst.as_u8));
1282 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1283 n_left_to_next, bi0, next0);
1286 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1289 /* Update counters */
1290 vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index,
1291 SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL,
1293 vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index,
1294 SR_POLICY_REWRITE_ERROR_COUNTER_BSID,
1297 return from_frame->n_vectors;
1301 VLIB_REGISTER_NODE (sr_policy_rewrite_encaps_node) = {
1302 .function = sr_policy_rewrite_encaps,
1303 .name = "sr-pl-rewrite-encaps",
1304 .vector_size = sizeof (u32),
1305 .format_trace = format_sr_policy_rewrite_trace,
1306 .type = VLIB_NODE_TYPE_INTERNAL,
1307 .n_errors = SR_POLICY_REWRITE_N_ERROR,
1308 .error_strings = sr_policy_rewrite_error_strings,
1309 .n_next_nodes = SR_POLICY_REWRITE_N_NEXT,
1311 #define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n,
1312 foreach_sr_policy_rewrite_next
1319 * @brief IPv4 encapsulation processing as per RFC2473
1321 static_always_inline void
1322 encaps_processing_v4 (vlib_node_runtime_t * node,
1324 ip6_header_t * ip0, ip4_header_t * ip0_encap)
1327 ip6_sr_header_t *sr0;
1331 /* Inner IPv4: Decrement TTL & update checksum */
1332 ip0_encap->ttl -= 1;
1333 checksum0 = ip0_encap->checksum + clib_host_to_net_u16 (0x0100);
1334 checksum0 += checksum0 >= 0xffff;
1335 ip0_encap->checksum = checksum0;
1337 /* Outer IPv6: Update length, FL, proto */
1338 new_l0 = ip0->payload_length + clib_net_to_host_u16 (ip0_encap->length);
1339 ip0->payload_length = clib_host_to_net_u16 (new_l0);
1340 ip0->ip_version_traffic_class_and_flow_label =
1341 clib_host_to_net_u32 (0 | ((6 & 0xF) << 28) |
1342 ((ip0_encap->tos & 0xFF) << 20));
1343 if (ip0->protocol == IP_PROTOCOL_IPV6_ROUTE)
1345 sr0 = (void *) (ip0 + 1);
1346 sr0->protocol = IP_PROTOCOL_IP_IN_IP;
1349 ip0->protocol = IP_PROTOCOL_IP_IN_IP;
1353 * @brief Graph node for applying a SR policy into an IPv4 packet. Encapsulation
1356 sr_policy_rewrite_encaps_v4 (vlib_main_t * vm, vlib_node_runtime_t * node,
1357 vlib_frame_t * from_frame)
1359 ip6_sr_main_t *sm = &sr_main;
1360 u32 n_left_from, next_index, *from, *to_next;
1362 from = vlib_frame_vector_args (from_frame);
1363 n_left_from = from_frame->n_vectors;
1365 next_index = node->cached_next_index;
1367 int encap_pkts = 0, bsid_pkts = 0;
1369 while (n_left_from > 0)
1373 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1376 while (n_left_from >= 8 && n_left_to_next >= 4)
1378 u32 bi0, bi1, bi2, bi3;
1379 vlib_buffer_t *b0, *b1, *b2, *b3;
1380 u32 next0, next1, next2, next3;
1381 next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
1382 ip6_header_t *ip0, *ip1, *ip2, *ip3;
1383 ip4_header_t *ip0_encap, *ip1_encap, *ip2_encap, *ip3_encap;
1384 ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3;
1386 /* Prefetch next iteration. */
1388 vlib_buffer_t *p4, *p5, *p6, *p7;
1390 p4 = vlib_get_buffer (vm, from[4]);
1391 p5 = vlib_get_buffer (vm, from[5]);
1392 p6 = vlib_get_buffer (vm, from[6]);
1393 p7 = vlib_get_buffer (vm, from[7]);
1395 /* Prefetch the buffer header and packet for the N+2 loop iteration */
1396 vlib_prefetch_buffer_header (p4, LOAD);
1397 vlib_prefetch_buffer_header (p5, LOAD);
1398 vlib_prefetch_buffer_header (p6, LOAD);
1399 vlib_prefetch_buffer_header (p7, LOAD);
1401 CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
1402 CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
1403 CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE);
1404 CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE);
1407 to_next[0] = bi0 = from[0];
1408 to_next[1] = bi1 = from[1];
1409 to_next[2] = bi2 = from[2];
1410 to_next[3] = bi3 = from[3];
1414 n_left_to_next -= 4;
1416 b0 = vlib_get_buffer (vm, bi0);
1417 b1 = vlib_get_buffer (vm, bi1);
1418 b2 = vlib_get_buffer (vm, bi2);
1419 b3 = vlib_get_buffer (vm, bi3);
1422 pool_elt_at_index (sm->sid_lists,
1423 vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
1425 pool_elt_at_index (sm->sid_lists,
1426 vnet_buffer (b1)->ip.adj_index[VLIB_TX]);
1428 pool_elt_at_index (sm->sid_lists,
1429 vnet_buffer (b2)->ip.adj_index[VLIB_TX]);
1431 pool_elt_at_index (sm->sid_lists,
1432 vnet_buffer (b3)->ip.adj_index[VLIB_TX]);
1433 ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1434 vec_len (sl0->rewrite));
1435 ASSERT (b1->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1436 vec_len (sl1->rewrite));
1437 ASSERT (b2->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1438 vec_len (sl2->rewrite));
1439 ASSERT (b3->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1440 vec_len (sl3->rewrite));
1442 ip0_encap = vlib_buffer_get_current (b0);
1443 ip1_encap = vlib_buffer_get_current (b1);
1444 ip2_encap = vlib_buffer_get_current (b2);
1445 ip3_encap = vlib_buffer_get_current (b3);
1447 clib_memcpy_fast (((u8 *) ip0_encap) - vec_len (sl0->rewrite),
1448 sl0->rewrite, vec_len (sl0->rewrite));
1449 clib_memcpy_fast (((u8 *) ip1_encap) - vec_len (sl1->rewrite),
1450 sl1->rewrite, vec_len (sl1->rewrite));
1451 clib_memcpy_fast (((u8 *) ip2_encap) - vec_len (sl2->rewrite),
1452 sl2->rewrite, vec_len (sl2->rewrite));
1453 clib_memcpy_fast (((u8 *) ip3_encap) - vec_len (sl3->rewrite),
1454 sl3->rewrite, vec_len (sl3->rewrite));
1456 vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
1457 vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite));
1458 vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite));
1459 vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite));
1461 ip0 = vlib_buffer_get_current (b0);
1462 ip1 = vlib_buffer_get_current (b1);
1463 ip2 = vlib_buffer_get_current (b2);
1464 ip3 = vlib_buffer_get_current (b3);
1466 encaps_processing_v4 (node, b0, ip0, ip0_encap);
1467 encaps_processing_v4 (node, b1, ip1, ip1_encap);
1468 encaps_processing_v4 (node, b2, ip2, ip2_encap);
1469 encaps_processing_v4 (node, b3, ip3, ip3_encap);
1471 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
1473 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1475 sr_policy_rewrite_trace_t *tr =
1476 vlib_add_trace (vm, node, b0, sizeof (*tr));
1477 clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
1478 sizeof (tr->src.as_u8));
1479 clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
1480 sizeof (tr->dst.as_u8));
1483 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
1485 sr_policy_rewrite_trace_t *tr =
1486 vlib_add_trace (vm, node, b1, sizeof (*tr));
1487 clib_memcpy_fast (tr->src.as_u8, ip1->src_address.as_u8,
1488 sizeof (tr->src.as_u8));
1489 clib_memcpy_fast (tr->dst.as_u8, ip1->dst_address.as_u8,
1490 sizeof (tr->dst.as_u8));
1493 if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED))
1495 sr_policy_rewrite_trace_t *tr =
1496 vlib_add_trace (vm, node, b2, sizeof (*tr));
1497 clib_memcpy_fast (tr->src.as_u8, ip2->src_address.as_u8,
1498 sizeof (tr->src.as_u8));
1499 clib_memcpy_fast (tr->dst.as_u8, ip2->dst_address.as_u8,
1500 sizeof (tr->dst.as_u8));
1503 if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED))
1505 sr_policy_rewrite_trace_t *tr =
1506 vlib_add_trace (vm, node, b3, sizeof (*tr));
1507 clib_memcpy_fast (tr->src.as_u8, ip3->src_address.as_u8,
1508 sizeof (tr->src.as_u8));
1509 clib_memcpy_fast (tr->dst.as_u8, ip3->dst_address.as_u8,
1510 sizeof (tr->dst.as_u8));
1515 vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
1516 n_left_to_next, bi0, bi1, bi2, bi3,
1517 next0, next1, next2, next3);
1520 /* Single loop for potentially the last three packets */
1521 while (n_left_from > 0 && n_left_to_next > 0)
1525 ip6_header_t *ip0 = 0;
1526 ip4_header_t *ip0_encap = 0;
1528 u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
1535 n_left_to_next -= 1;
1536 b0 = vlib_get_buffer (vm, bi0);
1539 pool_elt_at_index (sm->sid_lists,
1540 vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
1541 ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1542 vec_len (sl0->rewrite));
1544 ip0_encap = vlib_buffer_get_current (b0);
1546 clib_memcpy_fast (((u8 *) ip0_encap) - vec_len (sl0->rewrite),
1547 sl0->rewrite, vec_len (sl0->rewrite));
1548 vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
1550 ip0 = vlib_buffer_get_current (b0);
1552 encaps_processing_v4 (node, b0, ip0, ip0_encap);
1554 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
1555 PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1557 sr_policy_rewrite_trace_t *tr =
1558 vlib_add_trace (vm, node, b0, sizeof (*tr));
1559 clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
1560 sizeof (tr->src.as_u8));
1561 clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
1562 sizeof (tr->dst.as_u8));
1566 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1567 n_left_to_next, bi0, next0);
1570 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1573 /* Update counters */
1574 vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index,
1575 SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL,
1577 vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index,
1578 SR_POLICY_REWRITE_ERROR_COUNTER_BSID,
1581 return from_frame->n_vectors;
1585 VLIB_REGISTER_NODE (sr_policy_rewrite_encaps_v4_node) = {
1586 .function = sr_policy_rewrite_encaps_v4,
1587 .name = "sr-pl-rewrite-encaps-v4",
1588 .vector_size = sizeof (u32),
1589 .format_trace = format_sr_policy_rewrite_trace,
1590 .type = VLIB_NODE_TYPE_INTERNAL,
1591 .n_errors = SR_POLICY_REWRITE_N_ERROR,
1592 .error_strings = sr_policy_rewrite_error_strings,
1593 .n_next_nodes = SR_POLICY_REWRITE_N_NEXT,
1595 #define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n,
1596 foreach_sr_policy_rewrite_next
1603 ip_flow_hash (void *data)
1605 ip4_header_t *iph = (ip4_header_t *) data;
1607 if ((iph->ip_version_and_header_length & 0xF0) == 0x40)
1608 return ip4_compute_flow_hash (iph, IP_FLOW_HASH_DEFAULT);
1610 return ip6_compute_flow_hash ((ip6_header_t *) iph, IP_FLOW_HASH_DEFAULT);
1616 return (*((u64 *) m) & 0xffffffffffff);
1620 l2_flow_hash (vlib_buffer_t * b0)
1622 ethernet_header_t *eh;
1624 uword is_ip, eh_size;
1627 eh = vlib_buffer_get_current (b0);
1628 eh_type = clib_net_to_host_u16 (eh->type);
1629 eh_size = ethernet_buffer_header_size (b0);
1631 is_ip = (eh_type == ETHERNET_TYPE_IP4 || eh_type == ETHERNET_TYPE_IP6);
1633 /* since we have 2 cache lines, use them */
1635 a = ip_flow_hash ((u8 *) vlib_buffer_get_current (b0) + eh_size);
1639 b = mac_to_u64 ((u8 *) eh->dst_address);
1640 c = mac_to_u64 ((u8 *) eh->src_address);
1641 hash_mix64 (a, b, c);
1647 * @brief Graph node for applying a SR policy into a L2 frame
1650 sr_policy_rewrite_encaps_l2 (vlib_main_t * vm, vlib_node_runtime_t * node,
1651 vlib_frame_t * from_frame)
1653 ip6_sr_main_t *sm = &sr_main;
1654 u32 n_left_from, next_index, *from, *to_next;
1656 from = vlib_frame_vector_args (from_frame);
1657 n_left_from = from_frame->n_vectors;
1659 next_index = node->cached_next_index;
1661 int encap_pkts = 0, bsid_pkts = 0;
1663 while (n_left_from > 0)
1667 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1670 while (n_left_from >= 8 && n_left_to_next >= 4)
1672 u32 bi0, bi1, bi2, bi3;
1673 vlib_buffer_t *b0, *b1, *b2, *b3;
1674 u32 next0, next1, next2, next3;
1675 next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
1676 ethernet_header_t *en0, *en1, *en2, *en3;
1677 ip6_header_t *ip0, *ip1, *ip2, *ip3;
1678 ip6_sr_header_t *sr0, *sr1, *sr2, *sr3;
1679 ip6_sr_policy_t *sp0, *sp1, *sp2, *sp3;
1680 ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3;
1682 /* Prefetch next iteration. */
1684 vlib_buffer_t *p4, *p5, *p6, *p7;
1686 p4 = vlib_get_buffer (vm, from[4]);
1687 p5 = vlib_get_buffer (vm, from[5]);
1688 p6 = vlib_get_buffer (vm, from[6]);
1689 p7 = vlib_get_buffer (vm, from[7]);
1691 /* Prefetch the buffer header and packet for the N+2 loop iteration */
1692 vlib_prefetch_buffer_header (p4, LOAD);
1693 vlib_prefetch_buffer_header (p5, LOAD);
1694 vlib_prefetch_buffer_header (p6, LOAD);
1695 vlib_prefetch_buffer_header (p7, LOAD);
1697 CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
1698 CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
1699 CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE);
1700 CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE);
1703 to_next[0] = bi0 = from[0];
1704 to_next[1] = bi1 = from[1];
1705 to_next[2] = bi2 = from[2];
1706 to_next[3] = bi3 = from[3];
1710 n_left_to_next -= 4;
1712 b0 = vlib_get_buffer (vm, bi0);
1713 b1 = vlib_get_buffer (vm, bi1);
1714 b2 = vlib_get_buffer (vm, bi2);
1715 b3 = vlib_get_buffer (vm, bi3);
1717 sp0 = pool_elt_at_index (sm->sr_policies,
1718 sm->sw_iface_sr_policies[vnet_buffer
1722 sp1 = pool_elt_at_index (sm->sr_policies,
1723 sm->sw_iface_sr_policies[vnet_buffer
1727 sp2 = pool_elt_at_index (sm->sr_policies,
1728 sm->sw_iface_sr_policies[vnet_buffer
1732 sp3 = pool_elt_at_index (sm->sr_policies,
1733 sm->sw_iface_sr_policies[vnet_buffer
1737 if (vec_len (sp0->segments_lists) == 1)
1738 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = sp0->segments_lists[0];
1741 vnet_buffer (b0)->ip.flow_hash = l2_flow_hash (b0);
1742 vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
1743 sp0->segments_lists[(vnet_buffer (b0)->ip.flow_hash &
1744 (vec_len (sp0->segments_lists) - 1))];
1747 if (vec_len (sp1->segments_lists) == 1)
1748 vnet_buffer (b1)->ip.adj_index[VLIB_TX] = sp1->segments_lists[1];
1751 vnet_buffer (b1)->ip.flow_hash = l2_flow_hash (b1);
1752 vnet_buffer (b1)->ip.adj_index[VLIB_TX] =
1753 sp1->segments_lists[(vnet_buffer (b1)->ip.flow_hash &
1754 (vec_len (sp1->segments_lists) - 1))];
1757 if (vec_len (sp2->segments_lists) == 1)
1758 vnet_buffer (b2)->ip.adj_index[VLIB_TX] = sp2->segments_lists[2];
1761 vnet_buffer (b2)->ip.flow_hash = l2_flow_hash (b2);
1762 vnet_buffer (b2)->ip.adj_index[VLIB_TX] =
1763 sp2->segments_lists[(vnet_buffer (b2)->ip.flow_hash &
1764 (vec_len (sp2->segments_lists) - 1))];
1767 if (vec_len (sp3->segments_lists) == 1)
1768 vnet_buffer (b3)->ip.adj_index[VLIB_TX] = sp3->segments_lists[3];
1771 vnet_buffer (b3)->ip.flow_hash = l2_flow_hash (b3);
1772 vnet_buffer (b3)->ip.adj_index[VLIB_TX] =
1773 sp3->segments_lists[(vnet_buffer (b3)->ip.flow_hash &
1774 (vec_len (sp3->segments_lists) - 1))];
1778 pool_elt_at_index (sm->sid_lists,
1779 vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
1781 pool_elt_at_index (sm->sid_lists,
1782 vnet_buffer (b1)->ip.adj_index[VLIB_TX]);
1784 pool_elt_at_index (sm->sid_lists,
1785 vnet_buffer (b2)->ip.adj_index[VLIB_TX]);
1787 pool_elt_at_index (sm->sid_lists,
1788 vnet_buffer (b3)->ip.adj_index[VLIB_TX]);
1790 ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1791 vec_len (sl0->rewrite));
1792 ASSERT (b1->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1793 vec_len (sl1->rewrite));
1794 ASSERT (b2->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1795 vec_len (sl2->rewrite));
1796 ASSERT (b3->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1797 vec_len (sl3->rewrite));
1799 en0 = vlib_buffer_get_current (b0);
1800 en1 = vlib_buffer_get_current (b1);
1801 en2 = vlib_buffer_get_current (b2);
1802 en3 = vlib_buffer_get_current (b3);
1804 clib_memcpy_fast (((u8 *) en0) - vec_len (sl0->rewrite),
1805 sl0->rewrite, vec_len (sl0->rewrite));
1806 clib_memcpy_fast (((u8 *) en1) - vec_len (sl1->rewrite),
1807 sl1->rewrite, vec_len (sl1->rewrite));
1808 clib_memcpy_fast (((u8 *) en2) - vec_len (sl2->rewrite),
1809 sl2->rewrite, vec_len (sl2->rewrite));
1810 clib_memcpy_fast (((u8 *) en3) - vec_len (sl3->rewrite),
1811 sl3->rewrite, vec_len (sl3->rewrite));
1813 vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
1814 vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite));
1815 vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite));
1816 vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite));
1818 ip0 = vlib_buffer_get_current (b0);
1819 ip1 = vlib_buffer_get_current (b1);
1820 ip2 = vlib_buffer_get_current (b2);
1821 ip3 = vlib_buffer_get_current (b3);
1823 ip0->payload_length =
1824 clib_host_to_net_u16 (b0->current_length - sizeof (ip6_header_t));
1825 ip1->payload_length =
1826 clib_host_to_net_u16 (b1->current_length - sizeof (ip6_header_t));
1827 ip2->payload_length =
1828 clib_host_to_net_u16 (b2->current_length - sizeof (ip6_header_t));
1829 ip3->payload_length =
1830 clib_host_to_net_u16 (b3->current_length - sizeof (ip6_header_t));
1832 if (ip0->protocol == IP_PROTOCOL_IPV6_ROUTE)
1834 sr0 = (void *) (ip0 + 1);
1835 sr0->protocol = IP_PROTOCOL_IP6_NONXT;
1838 ip0->protocol = IP_PROTOCOL_IP6_NONXT;
1840 if (ip1->protocol == IP_PROTOCOL_IPV6_ROUTE)
1842 sr1 = (void *) (ip1 + 1);
1843 sr1->protocol = IP_PROTOCOL_IP6_NONXT;
1846 ip1->protocol = IP_PROTOCOL_IP6_NONXT;
1848 if (ip2->protocol == IP_PROTOCOL_IPV6_ROUTE)
1850 sr2 = (void *) (ip2 + 1);
1851 sr2->protocol = IP_PROTOCOL_IP6_NONXT;
1854 ip2->protocol = IP_PROTOCOL_IP6_NONXT;
1856 if (ip3->protocol == IP_PROTOCOL_IPV6_ROUTE)
1858 sr3 = (void *) (ip3 + 1);
1859 sr3->protocol = IP_PROTOCOL_IP6_NONXT;
1862 ip3->protocol = IP_PROTOCOL_IP6_NONXT;
1864 /* Which Traffic class and flow label do I set ? */
1865 //ip0->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32(0|((6&0xF)<<28)|((ip0_encap->tos&0xFF)<<20));
1867 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
1869 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1871 sr_policy_rewrite_trace_t *tr =
1872 vlib_add_trace (vm, node, b0, sizeof (*tr));
1873 clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
1874 sizeof (tr->src.as_u8));
1875 clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
1876 sizeof (tr->dst.as_u8));
1879 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
1881 sr_policy_rewrite_trace_t *tr =
1882 vlib_add_trace (vm, node, b1, sizeof (*tr));
1883 clib_memcpy_fast (tr->src.as_u8, ip1->src_address.as_u8,
1884 sizeof (tr->src.as_u8));
1885 clib_memcpy_fast (tr->dst.as_u8, ip1->dst_address.as_u8,
1886 sizeof (tr->dst.as_u8));
1889 if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED))
1891 sr_policy_rewrite_trace_t *tr =
1892 vlib_add_trace (vm, node, b2, sizeof (*tr));
1893 clib_memcpy_fast (tr->src.as_u8, ip2->src_address.as_u8,
1894 sizeof (tr->src.as_u8));
1895 clib_memcpy_fast (tr->dst.as_u8, ip2->dst_address.as_u8,
1896 sizeof (tr->dst.as_u8));
1899 if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED))
1901 sr_policy_rewrite_trace_t *tr =
1902 vlib_add_trace (vm, node, b3, sizeof (*tr));
1903 clib_memcpy_fast (tr->src.as_u8, ip3->src_address.as_u8,
1904 sizeof (tr->src.as_u8));
1905 clib_memcpy_fast (tr->dst.as_u8, ip3->dst_address.as_u8,
1906 sizeof (tr->dst.as_u8));
1911 vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
1912 n_left_to_next, bi0, bi1, bi2, bi3,
1913 next0, next1, next2, next3);
1916 /* Single loop for potentially the last three packets */
1917 while (n_left_from > 0 && n_left_to_next > 0)
1921 ip6_header_t *ip0 = 0;
1922 ip6_sr_header_t *sr0;
1923 ethernet_header_t *en0;
1924 ip6_sr_policy_t *sp0;
1926 u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
1933 n_left_to_next -= 1;
1934 b0 = vlib_get_buffer (vm, bi0);
1936 /* Find the SR policy */
1937 sp0 = pool_elt_at_index (sm->sr_policies,
1938 sm->sw_iface_sr_policies[vnet_buffer
1942 /* In case there is more than one SL, LB among them */
1943 if (vec_len (sp0->segments_lists) == 1)
1944 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = sp0->segments_lists[0];
1947 vnet_buffer (b0)->ip.flow_hash = l2_flow_hash (b0);
1948 vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
1949 sp0->segments_lists[(vnet_buffer (b0)->ip.flow_hash &
1950 (vec_len (sp0->segments_lists) - 1))];
1953 pool_elt_at_index (sm->sid_lists,
1954 vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
1955 ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1956 vec_len (sl0->rewrite));
1958 en0 = vlib_buffer_get_current (b0);
1960 clib_memcpy_fast (((u8 *) en0) - vec_len (sl0->rewrite),
1961 sl0->rewrite, vec_len (sl0->rewrite));
1963 vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
1965 ip0 = vlib_buffer_get_current (b0);
1967 ip0->payload_length =
1968 clib_host_to_net_u16 (b0->current_length - sizeof (ip6_header_t));
1970 if (ip0->protocol == IP_PROTOCOL_IPV6_ROUTE)
1972 sr0 = (void *) (ip0 + 1);
1973 sr0->protocol = IP_PROTOCOL_IP6_NONXT;
1976 ip0->protocol = IP_PROTOCOL_IP6_NONXT;
1978 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
1979 PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1981 sr_policy_rewrite_trace_t *tr =
1982 vlib_add_trace (vm, node, b0, sizeof (*tr));
1983 clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
1984 sizeof (tr->src.as_u8));
1985 clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
1986 sizeof (tr->dst.as_u8));
1990 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1991 n_left_to_next, bi0, next0);
1994 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1997 /* Update counters */
1998 vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index,
1999 SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL,
2001 vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index,
2002 SR_POLICY_REWRITE_ERROR_COUNTER_BSID,
2005 return from_frame->n_vectors;
2009 VLIB_REGISTER_NODE (sr_policy_rewrite_encaps_l2_node) = {
2010 .function = sr_policy_rewrite_encaps_l2,
2011 .name = "sr-pl-rewrite-encaps-l2",
2012 .vector_size = sizeof (u32),
2013 .format_trace = format_sr_policy_rewrite_trace,
2014 .type = VLIB_NODE_TYPE_INTERNAL,
2015 .n_errors = SR_POLICY_REWRITE_N_ERROR,
2016 .error_strings = sr_policy_rewrite_error_strings,
2017 .n_next_nodes = SR_POLICY_REWRITE_N_NEXT,
2019 #define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n,
2020 foreach_sr_policy_rewrite_next
2027 * @brief Graph node for applying a SR policy into a packet. SRH insertion.
2030 sr_policy_rewrite_insert (vlib_main_t * vm, vlib_node_runtime_t * node,
2031 vlib_frame_t * from_frame)
2033 ip6_sr_main_t *sm = &sr_main;
2034 u32 n_left_from, next_index, *from, *to_next;
2036 from = vlib_frame_vector_args (from_frame);
2037 n_left_from = from_frame->n_vectors;
2039 next_index = node->cached_next_index;
2041 int insert_pkts = 0, bsid_pkts = 0;
2043 while (n_left_from > 0)
2047 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2050 while (n_left_from >= 8 && n_left_to_next >= 4)
2052 u32 bi0, bi1, bi2, bi3;
2053 vlib_buffer_t *b0, *b1, *b2, *b3;
2054 u32 next0, next1, next2, next3;
2055 next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
2056 ip6_header_t *ip0, *ip1, *ip2, *ip3;
2057 ip6_sr_header_t *sr0, *sr1, *sr2, *sr3;
2058 ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3;
2059 u16 new_l0, new_l1, new_l2, new_l3;
2061 /* Prefetch next iteration. */
2063 vlib_buffer_t *p4, *p5, *p6, *p7;
2065 p4 = vlib_get_buffer (vm, from[4]);
2066 p5 = vlib_get_buffer (vm, from[5]);
2067 p6 = vlib_get_buffer (vm, from[6]);
2068 p7 = vlib_get_buffer (vm, from[7]);
2070 /* Prefetch the buffer header and packet for the N+2 loop iteration */
2071 vlib_prefetch_buffer_header (p4, LOAD);
2072 vlib_prefetch_buffer_header (p5, LOAD);
2073 vlib_prefetch_buffer_header (p6, LOAD);
2074 vlib_prefetch_buffer_header (p7, LOAD);
2076 CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
2077 CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
2078 CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE);
2079 CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE);
2082 to_next[0] = bi0 = from[0];
2083 to_next[1] = bi1 = from[1];
2084 to_next[2] = bi2 = from[2];
2085 to_next[3] = bi3 = from[3];
2089 n_left_to_next -= 4;
2091 b0 = vlib_get_buffer (vm, bi0);
2092 b1 = vlib_get_buffer (vm, bi1);
2093 b2 = vlib_get_buffer (vm, bi2);
2094 b3 = vlib_get_buffer (vm, bi3);
2097 pool_elt_at_index (sm->sid_lists,
2098 vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
2100 pool_elt_at_index (sm->sid_lists,
2101 vnet_buffer (b1)->ip.adj_index[VLIB_TX]);
2103 pool_elt_at_index (sm->sid_lists,
2104 vnet_buffer (b2)->ip.adj_index[VLIB_TX]);
2106 pool_elt_at_index (sm->sid_lists,
2107 vnet_buffer (b3)->ip.adj_index[VLIB_TX]);
2108 ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2109 vec_len (sl0->rewrite));
2110 ASSERT (b1->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2111 vec_len (sl1->rewrite));
2112 ASSERT (b2->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2113 vec_len (sl2->rewrite));
2114 ASSERT (b3->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2115 vec_len (sl3->rewrite));
2117 ip0 = vlib_buffer_get_current (b0);
2118 ip1 = vlib_buffer_get_current (b1);
2119 ip2 = vlib_buffer_get_current (b2);
2120 ip3 = vlib_buffer_get_current (b3);
2122 if (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2124 (ip6_sr_header_t *) (((void *) (ip0 + 1)) +
2125 ip6_ext_header_len (ip0 + 1));
2127 sr0 = (ip6_sr_header_t *) (ip0 + 1);
2129 if (ip1->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2131 (ip6_sr_header_t *) (((void *) (ip1 + 1)) +
2132 ip6_ext_header_len (ip1 + 1));
2134 sr1 = (ip6_sr_header_t *) (ip1 + 1);
2136 if (ip2->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2138 (ip6_sr_header_t *) (((void *) (ip2 + 1)) +
2139 ip6_ext_header_len (ip2 + 1));
2141 sr2 = (ip6_sr_header_t *) (ip2 + 1);
2143 if (ip3->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2145 (ip6_sr_header_t *) (((void *) (ip3 + 1)) +
2146 ip6_ext_header_len (ip3 + 1));
2148 sr3 = (ip6_sr_header_t *) (ip3 + 1);
2150 clib_memcpy_fast ((u8 *) ip0 - vec_len (sl0->rewrite), (u8 *) ip0,
2151 (void *) sr0 - (void *) ip0);
2152 clib_memcpy_fast ((u8 *) ip1 - vec_len (sl1->rewrite), (u8 *) ip1,
2153 (void *) sr1 - (void *) ip1);
2154 clib_memcpy_fast ((u8 *) ip2 - vec_len (sl2->rewrite), (u8 *) ip2,
2155 (void *) sr2 - (void *) ip2);
2156 clib_memcpy_fast ((u8 *) ip3 - vec_len (sl3->rewrite), (u8 *) ip3,
2157 (void *) sr3 - (void *) ip3);
2159 clib_memcpy_fast (((u8 *) sr0 - vec_len (sl0->rewrite)),
2160 sl0->rewrite, vec_len (sl0->rewrite));
2161 clib_memcpy_fast (((u8 *) sr1 - vec_len (sl1->rewrite)),
2162 sl1->rewrite, vec_len (sl1->rewrite));
2163 clib_memcpy_fast (((u8 *) sr2 - vec_len (sl2->rewrite)),
2164 sl2->rewrite, vec_len (sl2->rewrite));
2165 clib_memcpy_fast (((u8 *) sr3 - vec_len (sl3->rewrite)),
2166 sl3->rewrite, vec_len (sl3->rewrite));
2168 vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
2169 vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite));
2170 vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite));
2171 vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite));
2173 ip0 = ((void *) ip0) - vec_len (sl0->rewrite);
2174 ip1 = ((void *) ip1) - vec_len (sl1->rewrite);
2175 ip2 = ((void *) ip2) - vec_len (sl2->rewrite);
2176 ip3 = ((void *) ip3) - vec_len (sl3->rewrite);
2178 ip0->hop_limit -= 1;
2179 ip1->hop_limit -= 1;
2180 ip2->hop_limit -= 1;
2181 ip3->hop_limit -= 1;
2184 clib_net_to_host_u16 (ip0->payload_length) +
2185 vec_len (sl0->rewrite);
2187 clib_net_to_host_u16 (ip1->payload_length) +
2188 vec_len (sl1->rewrite);
2190 clib_net_to_host_u16 (ip2->payload_length) +
2191 vec_len (sl2->rewrite);
2193 clib_net_to_host_u16 (ip3->payload_length) +
2194 vec_len (sl3->rewrite);
2196 ip0->payload_length = clib_host_to_net_u16 (new_l0);
2197 ip1->payload_length = clib_host_to_net_u16 (new_l1);
2198 ip2->payload_length = clib_host_to_net_u16 (new_l2);
2199 ip3->payload_length = clib_host_to_net_u16 (new_l3);
2201 sr0 = ((void *) sr0) - vec_len (sl0->rewrite);
2202 sr1 = ((void *) sr1) - vec_len (sl1->rewrite);
2203 sr2 = ((void *) sr2) - vec_len (sl2->rewrite);
2204 sr3 = ((void *) sr3) - vec_len (sl3->rewrite);
2206 sr0->segments->as_u64[0] = ip0->dst_address.as_u64[0];
2207 sr0->segments->as_u64[1] = ip0->dst_address.as_u64[1];
2208 sr1->segments->as_u64[0] = ip1->dst_address.as_u64[0];
2209 sr1->segments->as_u64[1] = ip1->dst_address.as_u64[1];
2210 sr2->segments->as_u64[0] = ip2->dst_address.as_u64[0];
2211 sr2->segments->as_u64[1] = ip2->dst_address.as_u64[1];
2212 sr3->segments->as_u64[0] = ip3->dst_address.as_u64[0];
2213 sr3->segments->as_u64[1] = ip3->dst_address.as_u64[1];
2215 ip0->dst_address.as_u64[0] =
2216 (sr0->segments + sr0->segments_left)->as_u64[0];
2217 ip0->dst_address.as_u64[1] =
2218 (sr0->segments + sr0->segments_left)->as_u64[1];
2219 ip1->dst_address.as_u64[0] =
2220 (sr1->segments + sr1->segments_left)->as_u64[0];
2221 ip1->dst_address.as_u64[1] =
2222 (sr1->segments + sr1->segments_left)->as_u64[1];
2223 ip2->dst_address.as_u64[0] =
2224 (sr2->segments + sr2->segments_left)->as_u64[0];
2225 ip2->dst_address.as_u64[1] =
2226 (sr2->segments + sr2->segments_left)->as_u64[1];
2227 ip3->dst_address.as_u64[0] =
2228 (sr3->segments + sr3->segments_left)->as_u64[0];
2229 ip3->dst_address.as_u64[1] =
2230 (sr3->segments + sr3->segments_left)->as_u64[1];
2232 ip6_ext_header_t *ip_ext;
2233 if (ip0 + 1 == (void *) sr0)
2235 sr0->protocol = ip0->protocol;
2236 ip0->protocol = IP_PROTOCOL_IPV6_ROUTE;
2240 ip_ext = (void *) (ip0 + 1);
2241 sr0->protocol = ip_ext->next_hdr;
2242 ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2245 if (ip1 + 1 == (void *) sr1)
2247 sr1->protocol = ip1->protocol;
2248 ip1->protocol = IP_PROTOCOL_IPV6_ROUTE;
2252 ip_ext = (void *) (ip2 + 1);
2253 sr2->protocol = ip_ext->next_hdr;
2254 ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2257 if (ip2 + 1 == (void *) sr2)
2259 sr2->protocol = ip2->protocol;
2260 ip2->protocol = IP_PROTOCOL_IPV6_ROUTE;
2264 ip_ext = (void *) (ip2 + 1);
2265 sr2->protocol = ip_ext->next_hdr;
2266 ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2269 if (ip3 + 1 == (void *) sr3)
2271 sr3->protocol = ip3->protocol;
2272 ip3->protocol = IP_PROTOCOL_IPV6_ROUTE;
2276 ip_ext = (void *) (ip3 + 1);
2277 sr3->protocol = ip_ext->next_hdr;
2278 ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2283 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
2285 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2287 sr_policy_rewrite_trace_t *tr =
2288 vlib_add_trace (vm, node, b0, sizeof (*tr));
2289 clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
2290 sizeof (tr->src.as_u8));
2291 clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
2292 sizeof (tr->dst.as_u8));
2295 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
2297 sr_policy_rewrite_trace_t *tr =
2298 vlib_add_trace (vm, node, b1, sizeof (*tr));
2299 clib_memcpy_fast (tr->src.as_u8, ip1->src_address.as_u8,
2300 sizeof (tr->src.as_u8));
2301 clib_memcpy_fast (tr->dst.as_u8, ip1->dst_address.as_u8,
2302 sizeof (tr->dst.as_u8));
2305 if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED))
2307 sr_policy_rewrite_trace_t *tr =
2308 vlib_add_trace (vm, node, b2, sizeof (*tr));
2309 clib_memcpy_fast (tr->src.as_u8, ip2->src_address.as_u8,
2310 sizeof (tr->src.as_u8));
2311 clib_memcpy_fast (tr->dst.as_u8, ip2->dst_address.as_u8,
2312 sizeof (tr->dst.as_u8));
2315 if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED))
2317 sr_policy_rewrite_trace_t *tr =
2318 vlib_add_trace (vm, node, b3, sizeof (*tr));
2319 clib_memcpy_fast (tr->src.as_u8, ip3->src_address.as_u8,
2320 sizeof (tr->src.as_u8));
2321 clib_memcpy_fast (tr->dst.as_u8, ip3->dst_address.as_u8,
2322 sizeof (tr->dst.as_u8));
2326 vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
2327 n_left_to_next, bi0, bi1, bi2, bi3,
2328 next0, next1, next2, next3);
2331 /* Single loop for potentially the last three packets */
2332 while (n_left_from > 0 && n_left_to_next > 0)
2336 ip6_header_t *ip0 = 0;
2337 ip6_sr_header_t *sr0 = 0;
2339 u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
2347 n_left_to_next -= 1;
2349 b0 = vlib_get_buffer (vm, bi0);
2351 pool_elt_at_index (sm->sid_lists,
2352 vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
2353 ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2354 vec_len (sl0->rewrite));
2356 ip0 = vlib_buffer_get_current (b0);
2358 if (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2360 (ip6_sr_header_t *) (((void *) (ip0 + 1)) +
2361 ip6_ext_header_len (ip0 + 1));
2363 sr0 = (ip6_sr_header_t *) (ip0 + 1);
2365 clib_memcpy_fast ((u8 *) ip0 - vec_len (sl0->rewrite), (u8 *) ip0,
2366 (void *) sr0 - (void *) ip0);
2367 clib_memcpy_fast (((u8 *) sr0 - vec_len (sl0->rewrite)),
2368 sl0->rewrite, vec_len (sl0->rewrite));
2370 vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
2372 ip0 = ((void *) ip0) - vec_len (sl0->rewrite);
2373 ip0->hop_limit -= 1;
2375 clib_net_to_host_u16 (ip0->payload_length) +
2376 vec_len (sl0->rewrite);
2377 ip0->payload_length = clib_host_to_net_u16 (new_l0);
2379 sr0 = ((void *) sr0) - vec_len (sl0->rewrite);
2380 sr0->segments->as_u64[0] = ip0->dst_address.as_u64[0];
2381 sr0->segments->as_u64[1] = ip0->dst_address.as_u64[1];
2383 ip0->dst_address.as_u64[0] =
2384 (sr0->segments + sr0->segments_left)->as_u64[0];
2385 ip0->dst_address.as_u64[1] =
2386 (sr0->segments + sr0->segments_left)->as_u64[1];
2388 if (ip0 + 1 == (void *) sr0)
2390 sr0->protocol = ip0->protocol;
2391 ip0->protocol = IP_PROTOCOL_IPV6_ROUTE;
2395 ip6_ext_header_t *ip_ext = (void *) (ip0 + 1);
2396 sr0->protocol = ip_ext->next_hdr;
2397 ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2400 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
2401 PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2403 sr_policy_rewrite_trace_t *tr =
2404 vlib_add_trace (vm, node, b0, sizeof (*tr));
2405 clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
2406 sizeof (tr->src.as_u8));
2407 clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
2408 sizeof (tr->dst.as_u8));
2413 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2414 n_left_to_next, bi0, next0);
2417 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2420 /* Update counters */
2421 vlib_node_increment_counter (vm, sr_policy_rewrite_insert_node.index,
2422 SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL,
2424 vlib_node_increment_counter (vm, sr_policy_rewrite_insert_node.index,
2425 SR_POLICY_REWRITE_ERROR_COUNTER_BSID,
2427 return from_frame->n_vectors;
2431 VLIB_REGISTER_NODE (sr_policy_rewrite_insert_node) = {
2432 .function = sr_policy_rewrite_insert,
2433 .name = "sr-pl-rewrite-insert",
2434 .vector_size = sizeof (u32),
2435 .format_trace = format_sr_policy_rewrite_trace,
2436 .type = VLIB_NODE_TYPE_INTERNAL,
2437 .n_errors = SR_POLICY_REWRITE_N_ERROR,
2438 .error_strings = sr_policy_rewrite_error_strings,
2439 .n_next_nodes = SR_POLICY_REWRITE_N_NEXT,
2441 #define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n,
2442 foreach_sr_policy_rewrite_next
2449 * @brief Graph node for applying a SR policy into a packet. BSID - SRH insertion.
2452 sr_policy_rewrite_b_insert (vlib_main_t * vm, vlib_node_runtime_t * node,
2453 vlib_frame_t * from_frame)
2455 ip6_sr_main_t *sm = &sr_main;
2456 u32 n_left_from, next_index, *from, *to_next;
2458 from = vlib_frame_vector_args (from_frame);
2459 n_left_from = from_frame->n_vectors;
2461 next_index = node->cached_next_index;
2463 int insert_pkts = 0, bsid_pkts = 0;
2465 while (n_left_from > 0)
2469 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2472 while (n_left_from >= 8 && n_left_to_next >= 4)
2474 u32 bi0, bi1, bi2, bi3;
2475 vlib_buffer_t *b0, *b1, *b2, *b3;
2476 u32 next0, next1, next2, next3;
2477 next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
2478 ip6_header_t *ip0, *ip1, *ip2, *ip3;
2479 ip6_sr_header_t *sr0, *sr1, *sr2, *sr3;
2480 ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3;
2481 u16 new_l0, new_l1, new_l2, new_l3;
2483 /* Prefetch next iteration. */
2485 vlib_buffer_t *p4, *p5, *p6, *p7;
2487 p4 = vlib_get_buffer (vm, from[4]);
2488 p5 = vlib_get_buffer (vm, from[5]);
2489 p6 = vlib_get_buffer (vm, from[6]);
2490 p7 = vlib_get_buffer (vm, from[7]);
2492 /* Prefetch the buffer header and packet for the N+2 loop iteration */
2493 vlib_prefetch_buffer_header (p4, LOAD);
2494 vlib_prefetch_buffer_header (p5, LOAD);
2495 vlib_prefetch_buffer_header (p6, LOAD);
2496 vlib_prefetch_buffer_header (p7, LOAD);
2498 CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
2499 CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
2500 CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE);
2501 CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE);
2504 to_next[0] = bi0 = from[0];
2505 to_next[1] = bi1 = from[1];
2506 to_next[2] = bi2 = from[2];
2507 to_next[3] = bi3 = from[3];
2511 n_left_to_next -= 4;
2513 b0 = vlib_get_buffer (vm, bi0);
2514 b1 = vlib_get_buffer (vm, bi1);
2515 b2 = vlib_get_buffer (vm, bi2);
2516 b3 = vlib_get_buffer (vm, bi3);
2519 pool_elt_at_index (sm->sid_lists,
2520 vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
2522 pool_elt_at_index (sm->sid_lists,
2523 vnet_buffer (b1)->ip.adj_index[VLIB_TX]);
2525 pool_elt_at_index (sm->sid_lists,
2526 vnet_buffer (b2)->ip.adj_index[VLIB_TX]);
2528 pool_elt_at_index (sm->sid_lists,
2529 vnet_buffer (b3)->ip.adj_index[VLIB_TX]);
2530 ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2531 vec_len (sl0->rewrite_bsid));
2532 ASSERT (b1->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2533 vec_len (sl1->rewrite_bsid));
2534 ASSERT (b2->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2535 vec_len (sl2->rewrite_bsid));
2536 ASSERT (b3->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2537 vec_len (sl3->rewrite_bsid));
2539 ip0 = vlib_buffer_get_current (b0);
2540 ip1 = vlib_buffer_get_current (b1);
2541 ip2 = vlib_buffer_get_current (b2);
2542 ip3 = vlib_buffer_get_current (b3);
2544 if (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2546 (ip6_sr_header_t *) (((void *) (ip0 + 1)) +
2547 ip6_ext_header_len (ip0 + 1));
2549 sr0 = (ip6_sr_header_t *) (ip0 + 1);
2551 if (ip1->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2553 (ip6_sr_header_t *) (((void *) (ip1 + 1)) +
2554 ip6_ext_header_len (ip1 + 1));
2556 sr1 = (ip6_sr_header_t *) (ip1 + 1);
2558 if (ip2->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2560 (ip6_sr_header_t *) (((void *) (ip2 + 1)) +
2561 ip6_ext_header_len (ip2 + 1));
2563 sr2 = (ip6_sr_header_t *) (ip2 + 1);
2565 if (ip3->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2567 (ip6_sr_header_t *) (((void *) (ip3 + 1)) +
2568 ip6_ext_header_len (ip3 + 1));
2570 sr3 = (ip6_sr_header_t *) (ip3 + 1);
2572 clib_memcpy_fast ((u8 *) ip0 - vec_len (sl0->rewrite_bsid),
2573 (u8 *) ip0, (void *) sr0 - (void *) ip0);
2574 clib_memcpy_fast ((u8 *) ip1 - vec_len (sl1->rewrite_bsid),
2575 (u8 *) ip1, (void *) sr1 - (void *) ip1);
2576 clib_memcpy_fast ((u8 *) ip2 - vec_len (sl2->rewrite_bsid),
2577 (u8 *) ip2, (void *) sr2 - (void *) ip2);
2578 clib_memcpy_fast ((u8 *) ip3 - vec_len (sl3->rewrite_bsid),
2579 (u8 *) ip3, (void *) sr3 - (void *) ip3);
2581 clib_memcpy_fast (((u8 *) sr0 - vec_len (sl0->rewrite_bsid)),
2582 sl0->rewrite_bsid, vec_len (sl0->rewrite_bsid));
2583 clib_memcpy_fast (((u8 *) sr1 - vec_len (sl1->rewrite_bsid)),
2584 sl1->rewrite_bsid, vec_len (sl1->rewrite_bsid));
2585 clib_memcpy_fast (((u8 *) sr2 - vec_len (sl2->rewrite_bsid)),
2586 sl2->rewrite_bsid, vec_len (sl2->rewrite_bsid));
2587 clib_memcpy_fast (((u8 *) sr3 - vec_len (sl3->rewrite_bsid)),
2588 sl3->rewrite_bsid, vec_len (sl3->rewrite_bsid));
2590 vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite_bsid));
2591 vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite_bsid));
2592 vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite_bsid));
2593 vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite_bsid));
2595 ip0 = ((void *) ip0) - vec_len (sl0->rewrite_bsid);
2596 ip1 = ((void *) ip1) - vec_len (sl1->rewrite_bsid);
2597 ip2 = ((void *) ip2) - vec_len (sl2->rewrite_bsid);
2598 ip3 = ((void *) ip3) - vec_len (sl3->rewrite_bsid);
2600 ip0->hop_limit -= 1;
2601 ip1->hop_limit -= 1;
2602 ip2->hop_limit -= 1;
2603 ip3->hop_limit -= 1;
2606 clib_net_to_host_u16 (ip0->payload_length) +
2607 vec_len (sl0->rewrite_bsid);
2609 clib_net_to_host_u16 (ip1->payload_length) +
2610 vec_len (sl1->rewrite_bsid);
2612 clib_net_to_host_u16 (ip2->payload_length) +
2613 vec_len (sl2->rewrite_bsid);
2615 clib_net_to_host_u16 (ip3->payload_length) +
2616 vec_len (sl3->rewrite_bsid);
2618 ip0->payload_length = clib_host_to_net_u16 (new_l0);
2619 ip1->payload_length = clib_host_to_net_u16 (new_l1);
2620 ip2->payload_length = clib_host_to_net_u16 (new_l2);
2621 ip3->payload_length = clib_host_to_net_u16 (new_l3);
2623 sr0 = ((void *) sr0) - vec_len (sl0->rewrite_bsid);
2624 sr1 = ((void *) sr1) - vec_len (sl1->rewrite_bsid);
2625 sr2 = ((void *) sr2) - vec_len (sl2->rewrite_bsid);
2626 sr3 = ((void *) sr3) - vec_len (sl3->rewrite_bsid);
2628 ip0->dst_address.as_u64[0] =
2629 (sr0->segments + sr0->segments_left)->as_u64[0];
2630 ip0->dst_address.as_u64[1] =
2631 (sr0->segments + sr0->segments_left)->as_u64[1];
2632 ip1->dst_address.as_u64[0] =
2633 (sr1->segments + sr1->segments_left)->as_u64[0];
2634 ip1->dst_address.as_u64[1] =
2635 (sr1->segments + sr1->segments_left)->as_u64[1];
2636 ip2->dst_address.as_u64[0] =
2637 (sr2->segments + sr2->segments_left)->as_u64[0];
2638 ip2->dst_address.as_u64[1] =
2639 (sr2->segments + sr2->segments_left)->as_u64[1];
2640 ip3->dst_address.as_u64[0] =
2641 (sr3->segments + sr3->segments_left)->as_u64[0];
2642 ip3->dst_address.as_u64[1] =
2643 (sr3->segments + sr3->segments_left)->as_u64[1];
2645 ip6_ext_header_t *ip_ext;
2646 if (ip0 + 1 == (void *) sr0)
2648 sr0->protocol = ip0->protocol;
2649 ip0->protocol = IP_PROTOCOL_IPV6_ROUTE;
2653 ip_ext = (void *) (ip0 + 1);
2654 sr0->protocol = ip_ext->next_hdr;
2655 ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2658 if (ip1 + 1 == (void *) sr1)
2660 sr1->protocol = ip1->protocol;
2661 ip1->protocol = IP_PROTOCOL_IPV6_ROUTE;
2665 ip_ext = (void *) (ip2 + 1);
2666 sr2->protocol = ip_ext->next_hdr;
2667 ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2670 if (ip2 + 1 == (void *) sr2)
2672 sr2->protocol = ip2->protocol;
2673 ip2->protocol = IP_PROTOCOL_IPV6_ROUTE;
2677 ip_ext = (void *) (ip2 + 1);
2678 sr2->protocol = ip_ext->next_hdr;
2679 ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2682 if (ip3 + 1 == (void *) sr3)
2684 sr3->protocol = ip3->protocol;
2685 ip3->protocol = IP_PROTOCOL_IPV6_ROUTE;
2689 ip_ext = (void *) (ip3 + 1);
2690 sr3->protocol = ip_ext->next_hdr;
2691 ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2696 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
2698 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2700 sr_policy_rewrite_trace_t *tr =
2701 vlib_add_trace (vm, node, b0, sizeof (*tr));
2702 clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
2703 sizeof (tr->src.as_u8));
2704 clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
2705 sizeof (tr->dst.as_u8));
2708 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
2710 sr_policy_rewrite_trace_t *tr =
2711 vlib_add_trace (vm, node, b1, sizeof (*tr));
2712 clib_memcpy_fast (tr->src.as_u8, ip1->src_address.as_u8,
2713 sizeof (tr->src.as_u8));
2714 clib_memcpy_fast (tr->dst.as_u8, ip1->dst_address.as_u8,
2715 sizeof (tr->dst.as_u8));
2718 if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED))
2720 sr_policy_rewrite_trace_t *tr =
2721 vlib_add_trace (vm, node, b2, sizeof (*tr));
2722 clib_memcpy_fast (tr->src.as_u8, ip2->src_address.as_u8,
2723 sizeof (tr->src.as_u8));
2724 clib_memcpy_fast (tr->dst.as_u8, ip2->dst_address.as_u8,
2725 sizeof (tr->dst.as_u8));
2728 if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED))
2730 sr_policy_rewrite_trace_t *tr =
2731 vlib_add_trace (vm, node, b3, sizeof (*tr));
2732 clib_memcpy_fast (tr->src.as_u8, ip3->src_address.as_u8,
2733 sizeof (tr->src.as_u8));
2734 clib_memcpy_fast (tr->dst.as_u8, ip3->dst_address.as_u8,
2735 sizeof (tr->dst.as_u8));
2739 vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
2740 n_left_to_next, bi0, bi1, bi2, bi3,
2741 next0, next1, next2, next3);
2744 /* Single loop for potentially the last three packets */
2745 while (n_left_from > 0 && n_left_to_next > 0)
2749 ip6_header_t *ip0 = 0;
2750 ip6_sr_header_t *sr0 = 0;
2752 u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
2760 n_left_to_next -= 1;
2762 b0 = vlib_get_buffer (vm, bi0);
2764 pool_elt_at_index (sm->sid_lists,
2765 vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
2766 ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2767 vec_len (sl0->rewrite_bsid));
2769 ip0 = vlib_buffer_get_current (b0);
2771 if (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2773 (ip6_sr_header_t *) (((void *) (ip0 + 1)) +
2774 ip6_ext_header_len (ip0 + 1));
2776 sr0 = (ip6_sr_header_t *) (ip0 + 1);
2778 clib_memcpy_fast ((u8 *) ip0 - vec_len (sl0->rewrite_bsid),
2779 (u8 *) ip0, (void *) sr0 - (void *) ip0);
2780 clib_memcpy_fast (((u8 *) sr0 - vec_len (sl0->rewrite_bsid)),
2781 sl0->rewrite_bsid, vec_len (sl0->rewrite_bsid));
2783 vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite_bsid));
2785 ip0 = ((void *) ip0) - vec_len (sl0->rewrite_bsid);
2786 ip0->hop_limit -= 1;
2788 clib_net_to_host_u16 (ip0->payload_length) +
2789 vec_len (sl0->rewrite_bsid);
2790 ip0->payload_length = clib_host_to_net_u16 (new_l0);
2792 sr0 = ((void *) sr0) - vec_len (sl0->rewrite_bsid);
2794 ip0->dst_address.as_u64[0] =
2795 (sr0->segments + sr0->segments_left)->as_u64[0];
2796 ip0->dst_address.as_u64[1] =
2797 (sr0->segments + sr0->segments_left)->as_u64[1];
2799 if (ip0 + 1 == (void *) sr0)
2801 sr0->protocol = ip0->protocol;
2802 ip0->protocol = IP_PROTOCOL_IPV6_ROUTE;
2806 ip6_ext_header_t *ip_ext = (void *) (ip0 + 1);
2807 sr0->protocol = ip_ext->next_hdr;
2808 ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2811 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
2812 PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2814 sr_policy_rewrite_trace_t *tr =
2815 vlib_add_trace (vm, node, b0, sizeof (*tr));
2816 clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
2817 sizeof (tr->src.as_u8));
2818 clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
2819 sizeof (tr->dst.as_u8));
2824 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2825 n_left_to_next, bi0, next0);
2828 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2831 /* Update counters */
2832 vlib_node_increment_counter (vm, sr_policy_rewrite_insert_node.index,
2833 SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL,
2835 vlib_node_increment_counter (vm, sr_policy_rewrite_insert_node.index,
2836 SR_POLICY_REWRITE_ERROR_COUNTER_BSID,
2838 return from_frame->n_vectors;
2842 VLIB_REGISTER_NODE (sr_policy_rewrite_b_insert_node) = {
2843 .function = sr_policy_rewrite_b_insert,
2844 .name = "sr-pl-rewrite-b-insert",
2845 .vector_size = sizeof (u32),
2846 .format_trace = format_sr_policy_rewrite_trace,
2847 .type = VLIB_NODE_TYPE_INTERNAL,
2848 .n_errors = SR_POLICY_REWRITE_N_ERROR,
2849 .error_strings = sr_policy_rewrite_error_strings,
2850 .n_next_nodes = SR_POLICY_REWRITE_N_NEXT,
2852 #define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n,
2853 foreach_sr_policy_rewrite_next
2860 * @brief Function BSID encapsulation
2862 static_always_inline void
2863 end_bsid_encaps_srh_processing (vlib_node_runtime_t * node,
2866 ip6_sr_header_t * sr0, u32 * next0)
2868 ip6_address_t *new_dst0;
2870 if (PREDICT_FALSE (!sr0))
2871 goto error_bsid_encaps;
2873 if (PREDICT_TRUE (sr0->type == ROUTING_HEADER_TYPE_SR))
2875 if (PREDICT_TRUE (sr0->segments_left != 0))
2877 sr0->segments_left -= 1;
2878 new_dst0 = (ip6_address_t *) (sr0->segments);
2879 new_dst0 += sr0->segments_left;
2880 ip0->dst_address.as_u64[0] = new_dst0->as_u64[0];
2881 ip0->dst_address.as_u64[1] = new_dst0->as_u64[1];
2887 *next0 = SR_POLICY_REWRITE_NEXT_ERROR;
2888 b0->error = node->errors[SR_POLICY_REWRITE_ERROR_BSID_ZERO];
2892 * @brief Graph node for applying a SR policy BSID - Encapsulation
2895 sr_policy_rewrite_b_encaps (vlib_main_t * vm, vlib_node_runtime_t * node,
2896 vlib_frame_t * from_frame)
2898 ip6_sr_main_t *sm = &sr_main;
2899 u32 n_left_from, next_index, *from, *to_next;
2901 from = vlib_frame_vector_args (from_frame);
2902 n_left_from = from_frame->n_vectors;
2904 next_index = node->cached_next_index;
2906 int encap_pkts = 0, bsid_pkts = 0;
2908 while (n_left_from > 0)
2912 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2915 while (n_left_from >= 8 && n_left_to_next >= 4)
2917 u32 bi0, bi1, bi2, bi3;
2918 vlib_buffer_t *b0, *b1, *b2, *b3;
2919 u32 next0, next1, next2, next3;
2920 next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
2921 ip6_header_t *ip0, *ip1, *ip2, *ip3;
2922 ip6_header_t *ip0_encap, *ip1_encap, *ip2_encap, *ip3_encap;
2923 ip6_sr_header_t *sr0, *sr1, *sr2, *sr3;
2924 ip6_ext_header_t *prev0, *prev1, *prev2, *prev3;
2925 ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3;
2927 /* Prefetch next iteration. */
2929 vlib_buffer_t *p4, *p5, *p6, *p7;
2931 p4 = vlib_get_buffer (vm, from[4]);
2932 p5 = vlib_get_buffer (vm, from[5]);
2933 p6 = vlib_get_buffer (vm, from[6]);
2934 p7 = vlib_get_buffer (vm, from[7]);
2936 /* Prefetch the buffer header and packet for the N+2 loop iteration */
2937 vlib_prefetch_buffer_header (p4, LOAD);
2938 vlib_prefetch_buffer_header (p5, LOAD);
2939 vlib_prefetch_buffer_header (p6, LOAD);
2940 vlib_prefetch_buffer_header (p7, LOAD);
2942 CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
2943 CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
2944 CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE);
2945 CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE);
2948 to_next[0] = bi0 = from[0];
2949 to_next[1] = bi1 = from[1];
2950 to_next[2] = bi2 = from[2];
2951 to_next[3] = bi3 = from[3];
2955 n_left_to_next -= 4;
2957 b0 = vlib_get_buffer (vm, bi0);
2958 b1 = vlib_get_buffer (vm, bi1);
2959 b2 = vlib_get_buffer (vm, bi2);
2960 b3 = vlib_get_buffer (vm, bi3);
2963 pool_elt_at_index (sm->sid_lists,
2964 vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
2966 pool_elt_at_index (sm->sid_lists,
2967 vnet_buffer (b1)->ip.adj_index[VLIB_TX]);
2969 pool_elt_at_index (sm->sid_lists,
2970 vnet_buffer (b2)->ip.adj_index[VLIB_TX]);
2972 pool_elt_at_index (sm->sid_lists,
2973 vnet_buffer (b3)->ip.adj_index[VLIB_TX]);
2974 ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2975 vec_len (sl0->rewrite));
2976 ASSERT (b1->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2977 vec_len (sl1->rewrite));
2978 ASSERT (b2->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2979 vec_len (sl2->rewrite));
2980 ASSERT (b3->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2981 vec_len (sl3->rewrite));
2983 ip0_encap = vlib_buffer_get_current (b0);
2984 ip1_encap = vlib_buffer_get_current (b1);
2985 ip2_encap = vlib_buffer_get_current (b2);
2986 ip3_encap = vlib_buffer_get_current (b3);
2988 ip6_ext_header_find_t (ip0_encap, prev0, sr0,
2989 IP_PROTOCOL_IPV6_ROUTE);
2990 ip6_ext_header_find_t (ip1_encap, prev1, sr1,
2991 IP_PROTOCOL_IPV6_ROUTE);
2992 ip6_ext_header_find_t (ip2_encap, prev2, sr2,
2993 IP_PROTOCOL_IPV6_ROUTE);
2994 ip6_ext_header_find_t (ip3_encap, prev3, sr3,
2995 IP_PROTOCOL_IPV6_ROUTE);
2997 end_bsid_encaps_srh_processing (node, b0, ip0_encap, sr0, &next0);
2998 end_bsid_encaps_srh_processing (node, b1, ip1_encap, sr1, &next1);
2999 end_bsid_encaps_srh_processing (node, b2, ip2_encap, sr2, &next2);
3000 end_bsid_encaps_srh_processing (node, b3, ip3_encap, sr3, &next3);
3002 clib_memcpy_fast (((u8 *) ip0_encap) - vec_len (sl0->rewrite),
3003 sl0->rewrite, vec_len (sl0->rewrite));
3004 clib_memcpy_fast (((u8 *) ip1_encap) - vec_len (sl1->rewrite),
3005 sl1->rewrite, vec_len (sl1->rewrite));
3006 clib_memcpy_fast (((u8 *) ip2_encap) - vec_len (sl2->rewrite),
3007 sl2->rewrite, vec_len (sl2->rewrite));
3008 clib_memcpy_fast (((u8 *) ip3_encap) - vec_len (sl3->rewrite),
3009 sl3->rewrite, vec_len (sl3->rewrite));
3011 vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
3012 vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite));
3013 vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite));
3014 vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite));
3016 ip0 = vlib_buffer_get_current (b0);
3017 ip1 = vlib_buffer_get_current (b1);
3018 ip2 = vlib_buffer_get_current (b2);
3019 ip3 = vlib_buffer_get_current (b3);
3021 encaps_processing_v6 (node, b0, ip0, ip0_encap);
3022 encaps_processing_v6 (node, b1, ip1, ip1_encap);
3023 encaps_processing_v6 (node, b2, ip2, ip2_encap);
3024 encaps_processing_v6 (node, b3, ip3, ip3_encap);
3026 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
3028 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
3030 sr_policy_rewrite_trace_t *tr =
3031 vlib_add_trace (vm, node, b0, sizeof (*tr));
3032 clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
3033 sizeof (tr->src.as_u8));
3034 clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
3035 sizeof (tr->dst.as_u8));
3038 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
3040 sr_policy_rewrite_trace_t *tr =
3041 vlib_add_trace (vm, node, b1, sizeof (*tr));
3042 clib_memcpy_fast (tr->src.as_u8, ip1->src_address.as_u8,
3043 sizeof (tr->src.as_u8));
3044 clib_memcpy_fast (tr->dst.as_u8, ip1->dst_address.as_u8,
3045 sizeof (tr->dst.as_u8));
3048 if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED))
3050 sr_policy_rewrite_trace_t *tr =
3051 vlib_add_trace (vm, node, b2, sizeof (*tr));
3052 clib_memcpy_fast (tr->src.as_u8, ip2->src_address.as_u8,
3053 sizeof (tr->src.as_u8));
3054 clib_memcpy_fast (tr->dst.as_u8, ip2->dst_address.as_u8,
3055 sizeof (tr->dst.as_u8));
3058 if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED))
3060 sr_policy_rewrite_trace_t *tr =
3061 vlib_add_trace (vm, node, b3, sizeof (*tr));
3062 clib_memcpy_fast (tr->src.as_u8, ip3->src_address.as_u8,
3063 sizeof (tr->src.as_u8));
3064 clib_memcpy_fast (tr->dst.as_u8, ip3->dst_address.as_u8,
3065 sizeof (tr->dst.as_u8));
3070 vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
3071 n_left_to_next, bi0, bi1, bi2, bi3,
3072 next0, next1, next2, next3);
3075 /* Single loop for potentially the last three packets */
3076 while (n_left_from > 0 && n_left_to_next > 0)
3080 ip6_header_t *ip0 = 0, *ip0_encap = 0;
3081 ip6_ext_header_t *prev0;
3082 ip6_sr_header_t *sr0;
3084 u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
3091 n_left_to_next -= 1;
3092 b0 = vlib_get_buffer (vm, bi0);
3095 pool_elt_at_index (sm->sid_lists,
3096 vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
3097 ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
3098 vec_len (sl0->rewrite));
3100 ip0_encap = vlib_buffer_get_current (b0);
3101 ip6_ext_header_find_t (ip0_encap, prev0, sr0,
3102 IP_PROTOCOL_IPV6_ROUTE);
3103 end_bsid_encaps_srh_processing (node, b0, ip0_encap, sr0, &next0);
3105 clib_memcpy_fast (((u8 *) ip0_encap) - vec_len (sl0->rewrite),
3106 sl0->rewrite, vec_len (sl0->rewrite));
3107 vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
3109 ip0 = vlib_buffer_get_current (b0);
3111 encaps_processing_v6 (node, b0, ip0, ip0_encap);
3113 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
3114 PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
3116 sr_policy_rewrite_trace_t *tr =
3117 vlib_add_trace (vm, node, b0, sizeof (*tr));
3118 clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
3119 sizeof (tr->src.as_u8));
3120 clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
3121 sizeof (tr->dst.as_u8));
3125 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
3126 n_left_to_next, bi0, next0);
3129 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
3132 /* Update counters */
3133 vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index,
3134 SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL,
3136 vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index,
3137 SR_POLICY_REWRITE_ERROR_COUNTER_BSID,
3140 return from_frame->n_vectors;
3144 VLIB_REGISTER_NODE (sr_policy_rewrite_b_encaps_node) = {
3145 .function = sr_policy_rewrite_b_encaps,
3146 .name = "sr-pl-rewrite-b-encaps",
3147 .vector_size = sizeof (u32),
3148 .format_trace = format_sr_policy_rewrite_trace,
3149 .type = VLIB_NODE_TYPE_INTERNAL,
3150 .n_errors = SR_POLICY_REWRITE_N_ERROR,
3151 .error_strings = sr_policy_rewrite_error_strings,
3152 .n_next_nodes = SR_POLICY_REWRITE_N_NEXT,
3154 #define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n,
3155 foreach_sr_policy_rewrite_next
3161 /*************************** SR Segment Lists DPOs ****************************/
3163 format_sr_segment_list_dpo (u8 * s, va_list * args)
3165 ip6_sr_main_t *sm = &sr_main;
3166 ip6_address_t *addr;
3169 index_t index = va_arg (*args, index_t);
3170 CLIB_UNUSED (u32 indent) = va_arg (*args, u32);
3171 s = format (s, "SR: Segment List index:[%d]", index);
3172 s = format (s, "\n\tSegments:");
3174 sl = pool_elt_at_index (sm->sid_lists, index);
3176 s = format (s, "< ");
3177 vec_foreach (addr, sl->segments)
3179 s = format (s, "%U, ", format_ip6_address, addr);
3181 s = format (s, "\b\b > - ");
3182 s = format (s, "Weight: %u", sl->weight);
3187 const static dpo_vft_t sr_policy_rewrite_vft = {
3188 .dv_lock = sr_dpo_lock,
3189 .dv_unlock = sr_dpo_unlock,
3190 .dv_format = format_sr_segment_list_dpo,
3193 const static char *const sr_pr_encaps_ip6_nodes[] = {
3194 "sr-pl-rewrite-encaps",
3198 const static char *const sr_pr_encaps_ip4_nodes[] = {
3199 "sr-pl-rewrite-encaps-v4",
3203 const static char *const *const sr_pr_encaps_nodes[DPO_PROTO_NUM] = {
3204 [DPO_PROTO_IP6] = sr_pr_encaps_ip6_nodes,
3205 [DPO_PROTO_IP4] = sr_pr_encaps_ip4_nodes,
3208 const static char *const sr_pr_insert_ip6_nodes[] = {
3209 "sr-pl-rewrite-insert",
3213 const static char *const *const sr_pr_insert_nodes[DPO_PROTO_NUM] = {
3214 [DPO_PROTO_IP6] = sr_pr_insert_ip6_nodes,
3217 const static char *const sr_pr_bsid_insert_ip6_nodes[] = {
3218 "sr-pl-rewrite-b-insert",
3222 const static char *const *const sr_pr_bsid_insert_nodes[DPO_PROTO_NUM] = {
3223 [DPO_PROTO_IP6] = sr_pr_bsid_insert_ip6_nodes,
3226 const static char *const sr_pr_bsid_encaps_ip6_nodes[] = {
3227 "sr-pl-rewrite-b-encaps",
3231 const static char *const *const sr_pr_bsid_encaps_nodes[DPO_PROTO_NUM] = {
3232 [DPO_PROTO_IP6] = sr_pr_bsid_encaps_ip6_nodes,
3235 /********************* SR Policy Rewrite initialization ***********************/
3237 * @brief SR Policy Rewrite initialization
3240 sr_policy_rewrite_init (vlib_main_t * vm)
3242 ip6_sr_main_t *sm = &sr_main;
3244 /* Init memory for sr policy keys (bsid <-> ip6_address_t) */
3245 mhash_init (&sm->sr_policies_index_hash, sizeof (uword),
3246 sizeof (ip6_address_t));
3248 /* Init SR VPO DPOs type */
3249 sr_pr_encaps_dpo_type =
3250 dpo_register_new_type (&sr_policy_rewrite_vft, sr_pr_encaps_nodes);
3252 sr_pr_insert_dpo_type =
3253 dpo_register_new_type (&sr_policy_rewrite_vft, sr_pr_insert_nodes);
3255 sr_pr_bsid_encaps_dpo_type =
3256 dpo_register_new_type (&sr_policy_rewrite_vft, sr_pr_bsid_encaps_nodes);
3258 sr_pr_bsid_insert_dpo_type =
3259 dpo_register_new_type (&sr_policy_rewrite_vft, sr_pr_bsid_insert_nodes);
3261 /* Register the L2 encaps node used in HW redirect */
3262 sm->l2_sr_policy_rewrite_index = sr_policy_rewrite_encaps_node.index;
3264 sm->fib_table_ip6 = (u32) ~ 0;
3265 sm->fib_table_ip4 = (u32) ~ 0;
3270 VLIB_INIT_FUNCTION (sr_policy_rewrite_init);
3274 * fd.io coding-style-patch-verification: ON
3277 * eval: (c-set-style "gnu")