sr: add "set sr encaps hop-limit" command
[vpp.git] / src / vnet / srv6 / sr_policy_rewrite.c
1 /*
2  * sr_policy_rewrite.c: ipv6 sr policy creation
3  *
4  * Copyright (c) 2016 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 /**
19  * @file
20  * @brief SR policy creation and application
21  *
22  * Create an SR policy.
23  * An SR policy can be either of 'default' type or 'spray' type
24  * An SR policy has attached a list of SID lists.
25  * In case the SR policy is a default one it will load balance among them.
26  * An SR policy has associated a BindingSID.
27  * In case any packet arrives with IPv6 DA == BindingSID then the SR policy
28  * associated to such bindingSID will be applied to such packet.
29  *
30  * SR policies can be applied either by using IPv6 encapsulation or
31  * SRH insertion. Both methods can be found on this file.
32  *
33  * Traffic input usually is IPv6 packets. However it is possible to have
34  * IPv4 packets or L2 frames. (that are encapsulated into IPv6 with SRH)
35  *
36  * This file provides the appropiates VPP graph nodes to do any of these
37  * methods.
38  *
39  */
40
41 #include <vlib/vlib.h>
42 #include <vnet/vnet.h>
43 #include <vnet/srv6/sr.h>
44 #include <vnet/ip/ip.h>
45 #include <vnet/srv6/sr_packet.h>
46 #include <vnet/ip/ip6_packet.h>
47 #include <vnet/fib/ip6_fib.h>
48 #include <vnet/dpo/dpo.h>
49 #include <vnet/dpo/replicate_dpo.h>
50
51 #include <vppinfra/error.h>
52 #include <vppinfra/elog.h>
53
54 /**
55  * @brief SR policy rewrite trace
56  */
57 typedef struct
58 {
59   ip6_address_t src, dst;
60 } sr_policy_rewrite_trace_t;
61
62 /* Graph arcs */
63 #define foreach_sr_policy_rewrite_next     \
64 _(IP6_LOOKUP, "ip6-lookup")         \
65 _(ERROR, "error-drop")
66
67 typedef enum
68 {
69 #define _(s,n) SR_POLICY_REWRITE_NEXT_##s,
70   foreach_sr_policy_rewrite_next
71 #undef _
72     SR_POLICY_REWRITE_N_NEXT,
73 } sr_policy_rewrite_next_t;
74
75 /* SR rewrite errors */
76 #define foreach_sr_policy_rewrite_error                     \
77 _(INTERNAL_ERROR, "Segment Routing undefined error")        \
78 _(BSID_ZERO, "BSID with SL = 0")                            \
79 _(COUNTER_TOTAL, "SR steered IPv6 packets")                 \
80 _(COUNTER_ENCAP, "SR: Encaps packets")                      \
81 _(COUNTER_INSERT, "SR: SRH inserted packets")               \
82 _(COUNTER_BSID, "SR: BindingSID steered packets")
83
84 typedef enum
85 {
86 #define _(sym,str) SR_POLICY_REWRITE_ERROR_##sym,
87   foreach_sr_policy_rewrite_error
88 #undef _
89     SR_POLICY_REWRITE_N_ERROR,
90 } sr_policy_rewrite_error_t;
91
92 static char *sr_policy_rewrite_error_strings[] = {
93 #define _(sym,string) string,
94   foreach_sr_policy_rewrite_error
95 #undef _
96 };
97
98 /**
99  * @brief Dynamically added SR SL DPO type
100  */
101 static dpo_type_t sr_pr_encaps_dpo_type;
102 static dpo_type_t sr_pr_insert_dpo_type;
103 static dpo_type_t sr_pr_bsid_encaps_dpo_type;
104 static dpo_type_t sr_pr_bsid_insert_dpo_type;
105
106 /**
107  * @brief IPv6 SA for encapsulated packets
108  */
109 static ip6_address_t sr_pr_encaps_src;
110 static u8 sr_pr_encaps_hop_limit = IPv6_DEFAULT_HOP_LIMIT;
111
112 /******************* SR rewrite set encaps IPv6 source addr *******************/
113 /* Note:  This is temporal. We don't know whether to follow this path or
114           take the ip address of a loopback interface or even the OIF         */
115
116 void
117 sr_set_source (ip6_address_t * address)
118 {
119   clib_memcpy_fast (&sr_pr_encaps_src, address, sizeof (sr_pr_encaps_src));
120 }
121
122 static clib_error_t *
123 set_sr_src_command_fn (vlib_main_t * vm, unformat_input_t * input,
124                        vlib_cli_command_t * cmd)
125 {
126   while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
127     {
128       if (unformat
129           (input, "addr %U", unformat_ip6_address, &sr_pr_encaps_src))
130         return 0;
131       else
132         return clib_error_return (0, "No address specified");
133     }
134   return clib_error_return (0, "No address specified");
135 }
136
137 /* *INDENT-OFF* */
138 VLIB_CLI_COMMAND (set_sr_src_command, static) = {
139   .path = "set sr encaps source",
140   .short_help = "set sr encaps source addr <ip6_addr>",
141   .function = set_sr_src_command_fn,
142 };
143 /* *INDENT-ON* */
144
145 /******************** SR rewrite set encaps IPv6 hop-limit ********************/
146
147 void
148 sr_set_hop_limit (u8 hop_limit)
149 {
150   sr_pr_encaps_hop_limit = hop_limit;
151 }
152
153 u8
154 sr_get_hop_limit (void)
155 {
156   return sr_pr_encaps_hop_limit;
157 }
158
159 static clib_error_t *
160 set_sr_hop_limit_command_fn (vlib_main_t * vm, unformat_input_t * input,
161                              vlib_cli_command_t * cmd)
162 {
163   int hop_limit = sr_get_hop_limit ();
164
165   if (unformat_check_input (input) == UNFORMAT_END_OF_INPUT)
166     return clib_error_return (0, "No value specified");
167   if (!unformat (input, "%d", &hop_limit))
168     return clib_error_return (0, "Invalid value");
169   if (hop_limit <= 0 || hop_limit > 255)
170     return clib_error_return (0, "Value out of range [1-255]");
171   sr_pr_encaps_hop_limit = (u8) hop_limit;
172   return 0;
173 }
174
175 /* *INDENT-OFF* */
176 VLIB_CLI_COMMAND (set_sr_hop_limit_command, static) = {
177   .path = "set sr encaps hop-limit",
178   .short_help = "set sr encaps hop-limit <value>",
179   .function = set_sr_hop_limit_command_fn,
180 };
181 /* *INDENT-ON* */
182
183 /*********************** SR rewrite string computation ************************/
184 /**
185  * @brief SR rewrite string computation for IPv6 encapsulation (inline)
186  *
187  * @param sl is a vector of IPv6 addresses composing the Segment List
188  *
189  * @return precomputed rewrite string for encapsulation
190  */
191 static inline u8 *
192 compute_rewrite_encaps (ip6_address_t * sl)
193 {
194   ip6_header_t *iph;
195   ip6_sr_header_t *srh;
196   ip6_address_t *addrp, *this_address;
197   u32 header_length = 0;
198   u8 *rs = NULL;
199
200   header_length = 0;
201   header_length += IPv6_DEFAULT_HEADER_LENGTH;
202   if (vec_len (sl) > 1)
203     {
204       header_length += sizeof (ip6_sr_header_t);
205       header_length += vec_len (sl) * sizeof (ip6_address_t);
206     }
207
208   vec_validate (rs, header_length - 1);
209
210   iph = (ip6_header_t *) rs;
211   iph->ip_version_traffic_class_and_flow_label =
212     clib_host_to_net_u32 (0 | ((6 & 0xF) << 28));
213   iph->src_address.as_u64[0] = sr_pr_encaps_src.as_u64[0];
214   iph->src_address.as_u64[1] = sr_pr_encaps_src.as_u64[1];
215   iph->payload_length = header_length - IPv6_DEFAULT_HEADER_LENGTH;
216   iph->protocol = IP_PROTOCOL_IPV6;
217   iph->hop_limit = sr_pr_encaps_hop_limit;
218
219   if (vec_len (sl) > 1)
220     {
221       srh = (ip6_sr_header_t *) (iph + 1);
222       iph->protocol = IP_PROTOCOL_IPV6_ROUTE;
223       srh->protocol = IP_PROTOCOL_IPV6;
224       srh->type = ROUTING_HEADER_TYPE_SR;
225       srh->segments_left = vec_len (sl) - 1;
226       srh->last_entry = vec_len (sl) - 1;
227       srh->length = ((sizeof (ip6_sr_header_t) +
228                       (vec_len (sl) * sizeof (ip6_address_t))) / 8) - 1;
229       srh->flags = 0x00;
230       srh->tag = 0x0000;
231       addrp = srh->segments + vec_len (sl) - 1;
232       vec_foreach (this_address, sl)
233       {
234         clib_memcpy_fast (addrp->as_u8, this_address->as_u8,
235                           sizeof (ip6_address_t));
236         addrp--;
237       }
238     }
239   iph->dst_address.as_u64[0] = sl->as_u64[0];
240   iph->dst_address.as_u64[1] = sl->as_u64[1];
241   return rs;
242 }
243
244 /**
245  * @brief SR rewrite string computation for SRH insertion (inline)
246  *
247  * @param sl is a vector of IPv6 addresses composing the Segment List
248  *
249  * @return precomputed rewrite string for SRH insertion
250  */
251 static inline u8 *
252 compute_rewrite_insert (ip6_address_t * sl)
253 {
254   ip6_sr_header_t *srh;
255   ip6_address_t *addrp, *this_address;
256   u32 header_length = 0;
257   u8 *rs = NULL;
258
259   header_length = 0;
260   header_length += sizeof (ip6_sr_header_t);
261   header_length += (vec_len (sl) + 1) * sizeof (ip6_address_t);
262
263   vec_validate (rs, header_length - 1);
264
265   srh = (ip6_sr_header_t *) rs;
266   srh->type = ROUTING_HEADER_TYPE_SR;
267   srh->segments_left = vec_len (sl);
268   srh->last_entry = vec_len (sl);
269   srh->length = ((sizeof (ip6_sr_header_t) +
270                   ((vec_len (sl) + 1) * sizeof (ip6_address_t))) / 8) - 1;
271   srh->flags = 0x00;
272   srh->tag = 0x0000;
273   addrp = srh->segments + vec_len (sl);
274   vec_foreach (this_address, sl)
275   {
276     clib_memcpy_fast (addrp->as_u8, this_address->as_u8,
277                       sizeof (ip6_address_t));
278     addrp--;
279   }
280   return rs;
281 }
282
283 /**
284  * @brief SR rewrite string computation for SRH insertion with BSID (inline)
285  *
286  * @param sl is a vector of IPv6 addresses composing the Segment List
287  *
288  * @return precomputed rewrite string for SRH insertion with BSID
289  */
290 static inline u8 *
291 compute_rewrite_bsid (ip6_address_t * sl)
292 {
293   ip6_sr_header_t *srh;
294   ip6_address_t *addrp, *this_address;
295   u32 header_length = 0;
296   u8 *rs = NULL;
297
298   header_length = 0;
299   header_length += sizeof (ip6_sr_header_t);
300   header_length += vec_len (sl) * sizeof (ip6_address_t);
301
302   vec_validate (rs, header_length - 1);
303
304   srh = (ip6_sr_header_t *) rs;
305   srh->type = ROUTING_HEADER_TYPE_SR;
306   srh->segments_left = vec_len (sl) - 1;
307   srh->last_entry = vec_len (sl) - 1;
308   srh->length = ((sizeof (ip6_sr_header_t) +
309                   (vec_len (sl) * sizeof (ip6_address_t))) / 8) - 1;
310   srh->flags = 0x00;
311   srh->tag = 0x0000;
312   addrp = srh->segments + vec_len (sl) - 1;
313   vec_foreach (this_address, sl)
314   {
315     clib_memcpy_fast (addrp->as_u8, this_address->as_u8,
316                       sizeof (ip6_address_t));
317     addrp--;
318   }
319   return rs;
320 }
321
322 /***************************  SR LB helper functions **************************/
323 /**
324  * @brief Creates a Segment List and adds it to an SR policy
325  *
326  * Creates a Segment List and adds it to the SR policy. Notice that the SL are
327  * not necessarily unique. Hence there might be two Segment List within the
328  * same SR Policy with exactly the same segments and same weight.
329  *
330  * @param sr_policy is the SR policy where the SL will be added
331  * @param sl is a vector of IPv6 addresses composing the Segment List
332  * @param weight is the weight of the SegmentList (for load-balancing purposes)
333  * @param is_encap represents the mode (SRH insertion vs Encapsulation)
334  *
335  * @return pointer to the just created segment list
336  */
337 static inline ip6_sr_sl_t *
338 create_sl (ip6_sr_policy_t * sr_policy, ip6_address_t * sl, u32 weight,
339            u8 is_encap)
340 {
341   ip6_sr_main_t *sm = &sr_main;
342   ip6_sr_sl_t *segment_list;
343
344   pool_get (sm->sid_lists, segment_list);
345   clib_memset (segment_list, 0, sizeof (*segment_list));
346
347   vec_add1 (sr_policy->segments_lists, segment_list - sm->sid_lists);
348
349   /* Fill in segment list */
350   segment_list->weight =
351     (weight != (u32) ~ 0 ? weight : SR_SEGMENT_LIST_WEIGHT_DEFAULT);
352   segment_list->segments = vec_dup (sl);
353
354   if (is_encap)
355     {
356       segment_list->rewrite = compute_rewrite_encaps (sl);
357       segment_list->rewrite_bsid = segment_list->rewrite;
358     }
359   else
360     {
361       segment_list->rewrite = compute_rewrite_insert (sl);
362       segment_list->rewrite_bsid = compute_rewrite_bsid (sl);
363     }
364
365   /* Create DPO */
366   dpo_reset (&segment_list->bsid_dpo);
367   dpo_reset (&segment_list->ip6_dpo);
368   dpo_reset (&segment_list->ip4_dpo);
369
370   if (is_encap)
371     {
372       dpo_set (&segment_list->ip6_dpo, sr_pr_encaps_dpo_type, DPO_PROTO_IP6,
373                segment_list - sm->sid_lists);
374       dpo_set (&segment_list->ip4_dpo, sr_pr_encaps_dpo_type, DPO_PROTO_IP4,
375                segment_list - sm->sid_lists);
376       dpo_set (&segment_list->bsid_dpo, sr_pr_bsid_encaps_dpo_type,
377                DPO_PROTO_IP6, segment_list - sm->sid_lists);
378     }
379   else
380     {
381       dpo_set (&segment_list->ip6_dpo, sr_pr_insert_dpo_type, DPO_PROTO_IP6,
382                segment_list - sm->sid_lists);
383       dpo_set (&segment_list->bsid_dpo, sr_pr_bsid_insert_dpo_type,
384                DPO_PROTO_IP6, segment_list - sm->sid_lists);
385     }
386
387   return segment_list;
388 }
389
390 /**
391  * @brief Updates the Load Balancer after an SR Policy change
392  *
393  * @param sr_policy is the modified SR Policy
394  */
395 static inline void
396 update_lb (ip6_sr_policy_t * sr_policy)
397 {
398   flow_hash_config_t fhc;
399   u32 *sl_index;
400   ip6_sr_sl_t *segment_list;
401   ip6_sr_main_t *sm = &sr_main;
402   load_balance_path_t path;
403   path.path_index = FIB_NODE_INDEX_INVALID;
404   load_balance_path_t *ip4_path_vector = 0;
405   load_balance_path_t *ip6_path_vector = 0;
406   load_balance_path_t *b_path_vector = 0;
407
408   /* In case LB does not exist, create it */
409   if (!dpo_id_is_valid (&sr_policy->bsid_dpo))
410     {
411       fib_prefix_t pfx = {
412         .fp_proto = FIB_PROTOCOL_IP6,
413         .fp_len = 128,
414         .fp_addr = {
415                     .ip6 = sr_policy->bsid,
416                     }
417       };
418
419       /* Add FIB entry for BSID */
420       fhc = fib_table_get_flow_hash_config (sr_policy->fib_table,
421                                             FIB_PROTOCOL_IP6);
422
423       dpo_set (&sr_policy->bsid_dpo, DPO_LOAD_BALANCE, DPO_PROTO_IP6,
424                load_balance_create (0, DPO_PROTO_IP6, fhc));
425
426       dpo_set (&sr_policy->ip6_dpo, DPO_LOAD_BALANCE, DPO_PROTO_IP6,
427                load_balance_create (0, DPO_PROTO_IP6, fhc));
428
429       /* Update FIB entry's to point to the LB DPO in the main FIB and hidden one */
430       fib_table_entry_special_dpo_update (fib_table_find (FIB_PROTOCOL_IP6,
431                                                           sr_policy->fib_table),
432                                           &pfx, FIB_SOURCE_SR,
433                                           FIB_ENTRY_FLAG_EXCLUSIVE,
434                                           &sr_policy->bsid_dpo);
435
436       fib_table_entry_special_dpo_update (sm->fib_table_ip6,
437                                           &pfx,
438                                           FIB_SOURCE_SR,
439                                           FIB_ENTRY_FLAG_EXCLUSIVE,
440                                           &sr_policy->ip6_dpo);
441
442       if (sr_policy->is_encap)
443         {
444           dpo_set (&sr_policy->ip4_dpo, DPO_LOAD_BALANCE, DPO_PROTO_IP4,
445                    load_balance_create (0, DPO_PROTO_IP4, fhc));
446
447           fib_table_entry_special_dpo_update (sm->fib_table_ip4,
448                                               &pfx,
449                                               FIB_SOURCE_SR,
450                                               FIB_ENTRY_FLAG_EXCLUSIVE,
451                                               &sr_policy->ip4_dpo);
452         }
453
454     }
455
456   /* Create the LB path vector */
457   //path_vector = vec_new(load_balance_path_t, vec_len(sr_policy->segments_lists));
458   vec_foreach (sl_index, sr_policy->segments_lists)
459   {
460     segment_list = pool_elt_at_index (sm->sid_lists, *sl_index);
461     path.path_dpo = segment_list->bsid_dpo;
462     path.path_weight = segment_list->weight;
463     vec_add1 (b_path_vector, path);
464     path.path_dpo = segment_list->ip6_dpo;
465     vec_add1 (ip6_path_vector, path);
466     if (sr_policy->is_encap)
467       {
468         path.path_dpo = segment_list->ip4_dpo;
469         vec_add1 (ip4_path_vector, path);
470       }
471   }
472
473   /* Update LB multipath */
474   load_balance_multipath_update (&sr_policy->bsid_dpo, b_path_vector,
475                                  LOAD_BALANCE_FLAG_NONE);
476   load_balance_multipath_update (&sr_policy->ip6_dpo, ip6_path_vector,
477                                  LOAD_BALANCE_FLAG_NONE);
478   if (sr_policy->is_encap)
479     load_balance_multipath_update (&sr_policy->ip4_dpo, ip4_path_vector,
480                                    LOAD_BALANCE_FLAG_NONE);
481
482   /* Cleanup */
483   vec_free (b_path_vector);
484   vec_free (ip6_path_vector);
485   vec_free (ip4_path_vector);
486
487 }
488
489 /**
490  * @brief Updates the Replicate DPO after an SR Policy change
491  *
492  * @param sr_policy is the modified SR Policy (type spray)
493  */
494 static inline void
495 update_replicate (ip6_sr_policy_t * sr_policy)
496 {
497   u32 *sl_index;
498   ip6_sr_sl_t *segment_list;
499   ip6_sr_main_t *sm = &sr_main;
500   load_balance_path_t path;
501   path.path_index = FIB_NODE_INDEX_INVALID;
502   load_balance_path_t *b_path_vector = 0;
503   load_balance_path_t *ip6_path_vector = 0;
504   load_balance_path_t *ip4_path_vector = 0;
505
506   /* In case LB does not exist, create it */
507   if (!dpo_id_is_valid (&sr_policy->bsid_dpo))
508     {
509       dpo_set (&sr_policy->bsid_dpo, DPO_REPLICATE,
510                DPO_PROTO_IP6, replicate_create (0, DPO_PROTO_IP6));
511
512       dpo_set (&sr_policy->ip6_dpo, DPO_REPLICATE,
513                DPO_PROTO_IP6, replicate_create (0, DPO_PROTO_IP6));
514
515       /* Update FIB entry's DPO to point to SR without LB */
516       fib_prefix_t pfx = {
517         .fp_proto = FIB_PROTOCOL_IP6,
518         .fp_len = 128,
519         .fp_addr = {
520                     .ip6 = sr_policy->bsid,
521                     }
522       };
523       fib_table_entry_special_dpo_update (fib_table_find (FIB_PROTOCOL_IP6,
524                                                           sr_policy->fib_table),
525                                           &pfx, FIB_SOURCE_SR,
526                                           FIB_ENTRY_FLAG_EXCLUSIVE,
527                                           &sr_policy->bsid_dpo);
528
529       fib_table_entry_special_dpo_update (sm->fib_table_ip6,
530                                           &pfx,
531                                           FIB_SOURCE_SR,
532                                           FIB_ENTRY_FLAG_EXCLUSIVE,
533                                           &sr_policy->ip6_dpo);
534
535       if (sr_policy->is_encap)
536         {
537           dpo_set (&sr_policy->ip4_dpo, DPO_REPLICATE, DPO_PROTO_IP4,
538                    replicate_create (0, DPO_PROTO_IP4));
539
540           fib_table_entry_special_dpo_update (sm->fib_table_ip4,
541                                               &pfx,
542                                               FIB_SOURCE_SR,
543                                               FIB_ENTRY_FLAG_EXCLUSIVE,
544                                               &sr_policy->ip4_dpo);
545         }
546
547     }
548
549   /* Create the replicate path vector */
550   path.path_weight = 1;
551   vec_foreach (sl_index, sr_policy->segments_lists)
552   {
553     segment_list = pool_elt_at_index (sm->sid_lists, *sl_index);
554     path.path_dpo = segment_list->bsid_dpo;
555     vec_add1 (b_path_vector, path);
556     path.path_dpo = segment_list->ip6_dpo;
557     vec_add1 (ip6_path_vector, path);
558     if (sr_policy->is_encap)
559       {
560         path.path_dpo = segment_list->ip4_dpo;
561         vec_add1 (ip4_path_vector, path);
562       }
563   }
564
565   /* Update replicate multipath */
566   replicate_multipath_update (&sr_policy->bsid_dpo, b_path_vector);
567   replicate_multipath_update (&sr_policy->ip6_dpo, ip6_path_vector);
568   if (sr_policy->is_encap)
569     replicate_multipath_update (&sr_policy->ip4_dpo, ip4_path_vector);
570 }
571
572 /******************************* SR rewrite API *******************************/
573 /* Three functions for handling sr policies:
574  *   -> sr_policy_add
575  *   -> sr_policy_del
576  *   -> sr_policy_mod
577  * All of them are API. CLI function on sr_policy_command_fn                  */
578
579 /**
580  * @brief Create a new SR policy
581  *
582  * @param bsid is the bindingSID of the SR Policy
583  * @param segments is a vector of IPv6 address composing the segment list
584  * @param weight is the weight of the sid list. optional.
585  * @param behavior is the behavior of the SR policy. (default//spray)
586  * @param fib_table is the VRF where to install the FIB entry for the BSID
587  * @param is_encap (bool) whether SR policy should behave as Encap/SRH Insertion
588  *
589  * @return 0 if correct, else error
590  */
591 int
592 sr_policy_add (ip6_address_t * bsid, ip6_address_t * segments,
593                u32 weight, u8 behavior, u32 fib_table, u8 is_encap)
594 {
595   ip6_sr_main_t *sm = &sr_main;
596   ip6_sr_policy_t *sr_policy = 0;
597   uword *p;
598
599   /* Search for existing keys (BSID) */
600   p = mhash_get (&sm->sr_policies_index_hash, bsid);
601   if (p)
602     {
603       /* Add SR policy that already exists; complain */
604       return -12;
605     }
606
607   /* Search collision in FIB entries */
608   /* Explanation: It might be possible that some other entity has already
609    * created a route for the BSID. This in theory is impossible, but in
610    * practise we could see it. Assert it and scream if needed */
611   fib_prefix_t pfx = {
612     .fp_proto = FIB_PROTOCOL_IP6,
613     .fp_len = 128,
614     .fp_addr = {
615                 .ip6 = *bsid,
616                 }
617   };
618
619   /* Lookup the FIB index associated to the table selected */
620   u32 fib_index = fib_table_find (FIB_PROTOCOL_IP6,
621                                   (fib_table != (u32) ~ 0 ? fib_table : 0));
622   if (fib_index == ~0)
623     return -13;
624
625   /* Lookup whether there exists an entry for the BSID */
626   fib_node_index_t fei = fib_table_lookup_exact_match (fib_index, &pfx);
627   if (FIB_NODE_INDEX_INVALID != fei)
628     return -12;                 //There is an entry for such lookup
629
630   /* Add an SR policy object */
631   pool_get (sm->sr_policies, sr_policy);
632   clib_memset (sr_policy, 0, sizeof (*sr_policy));
633   clib_memcpy_fast (&sr_policy->bsid, bsid, sizeof (ip6_address_t));
634   sr_policy->type = behavior;
635   sr_policy->fib_table = (fib_table != (u32) ~ 0 ? fib_table : 0);      //Is default FIB 0 ?
636   sr_policy->is_encap = is_encap;
637
638   /* Copy the key */
639   mhash_set (&sm->sr_policies_index_hash, bsid, sr_policy - sm->sr_policies,
640              NULL);
641
642   /* Create a segment list and add the index to the SR policy */
643   create_sl (sr_policy, segments, weight, is_encap);
644
645   /* If FIB doesnt exist, create them */
646   if (sm->fib_table_ip6 == (u32) ~ 0)
647     {
648       sm->fib_table_ip6 = fib_table_create_and_lock (FIB_PROTOCOL_IP6,
649                                                      FIB_SOURCE_SR,
650                                                      "SRv6 steering of IP6 prefixes through BSIDs");
651       sm->fib_table_ip4 = fib_table_create_and_lock (FIB_PROTOCOL_IP6,
652                                                      FIB_SOURCE_SR,
653                                                      "SRv6 steering of IP4 prefixes through BSIDs");
654     }
655
656   /* Create IPv6 FIB for the BindingSID attached to the DPO of the only SL */
657   if (sr_policy->type == SR_POLICY_TYPE_DEFAULT)
658     update_lb (sr_policy);
659   else if (sr_policy->type == SR_POLICY_TYPE_SPRAY)
660     update_replicate (sr_policy);
661   return 0;
662 }
663
664 /**
665  * @brief Delete a SR policy
666  *
667  * @param bsid is the bindingSID of the SR Policy
668  * @param index is the index of the SR policy
669  *
670  * @return 0 if correct, else error
671  */
672 int
673 sr_policy_del (ip6_address_t * bsid, u32 index)
674 {
675   ip6_sr_main_t *sm = &sr_main;
676   ip6_sr_policy_t *sr_policy = 0;
677   ip6_sr_sl_t *segment_list;
678   u32 *sl_index;
679   uword *p;
680
681   if (bsid)
682     {
683       p = mhash_get (&sm->sr_policies_index_hash, bsid);
684       if (p)
685         sr_policy = pool_elt_at_index (sm->sr_policies, p[0]);
686       else
687         return -1;
688     }
689   else
690     {
691       sr_policy = pool_elt_at_index (sm->sr_policies, index);
692       if (!sr_policy)
693         return -1;
694     }
695
696   /* Remove BindingSID FIB entry */
697   fib_prefix_t pfx = {
698     .fp_proto = FIB_PROTOCOL_IP6,
699     .fp_len = 128,
700     .fp_addr = {
701                 .ip6 = sr_policy->bsid,
702                 }
703     ,
704   };
705
706   fib_table_entry_special_remove (fib_table_find (FIB_PROTOCOL_IP6,
707                                                   sr_policy->fib_table),
708                                   &pfx, FIB_SOURCE_SR);
709
710   fib_table_entry_special_remove (sm->fib_table_ip6, &pfx, FIB_SOURCE_SR);
711
712   if (sr_policy->is_encap)
713     fib_table_entry_special_remove (sm->fib_table_ip4, &pfx, FIB_SOURCE_SR);
714
715   if (dpo_id_is_valid (&sr_policy->bsid_dpo))
716     {
717       dpo_reset (&sr_policy->bsid_dpo);
718       dpo_reset (&sr_policy->ip4_dpo);
719       dpo_reset (&sr_policy->ip6_dpo);
720     }
721
722   /* Clean SID Lists */
723   vec_foreach (sl_index, sr_policy->segments_lists)
724   {
725     segment_list = pool_elt_at_index (sm->sid_lists, *sl_index);
726     vec_free (segment_list->segments);
727     vec_free (segment_list->rewrite);
728     if (!sr_policy->is_encap)
729       vec_free (segment_list->rewrite_bsid);
730     pool_put_index (sm->sid_lists, *sl_index);
731   }
732
733   /* Remove SR policy entry */
734   mhash_unset (&sm->sr_policies_index_hash, &sr_policy->bsid, NULL);
735   pool_put (sm->sr_policies, sr_policy);
736
737   /* If FIB empty unlock it */
738   if (!pool_elts (sm->sr_policies) && !pool_elts (sm->steer_policies))
739     {
740       fib_table_unlock (sm->fib_table_ip6, FIB_PROTOCOL_IP6, FIB_SOURCE_SR);
741       fib_table_unlock (sm->fib_table_ip4, FIB_PROTOCOL_IP6, FIB_SOURCE_SR);
742       sm->fib_table_ip6 = (u32) ~ 0;
743       sm->fib_table_ip4 = (u32) ~ 0;
744     }
745
746   return 0;
747 }
748
749 /**
750  * @brief Modify an existing SR policy
751  *
752  * The possible modifications are adding a new Segment List, modifying an
753  * existing Segment List (modify the weight only) and delete a given
754  * Segment List from the SR Policy.
755  *
756  * @param bsid is the bindingSID of the SR Policy
757  * @param index is the index of the SR policy
758  * @param fib_table is the VRF where to install the FIB entry for the BSID
759  * @param operation is the operation to perform (among the top ones)
760  * @param segments is a vector of IPv6 address composing the segment list
761  * @param sl_index is the index of the Segment List to modify/delete
762  * @param weight is the weight of the sid list. optional.
763  * @param is_encap Mode. Encapsulation or SRH insertion.
764  *
765  * @return 0 if correct, else error
766  */
767 int
768 sr_policy_mod (ip6_address_t * bsid, u32 index, u32 fib_table,
769                u8 operation, ip6_address_t * segments, u32 sl_index,
770                u32 weight)
771 {
772   ip6_sr_main_t *sm = &sr_main;
773   ip6_sr_policy_t *sr_policy = 0;
774   ip6_sr_sl_t *segment_list;
775   u32 *sl_index_iterate;
776   uword *p;
777
778   if (bsid)
779     {
780       p = mhash_get (&sm->sr_policies_index_hash, bsid);
781       if (p)
782         sr_policy = pool_elt_at_index (sm->sr_policies, p[0]);
783       else
784         return -1;
785     }
786   else
787     {
788       sr_policy = pool_elt_at_index (sm->sr_policies, index);
789       if (!sr_policy)
790         return -1;
791     }
792
793   if (operation == 1)           /* Add SR List to an existing SR policy */
794     {
795       /* Create the new SL */
796       segment_list =
797         create_sl (sr_policy, segments, weight, sr_policy->is_encap);
798
799       /* Create a new LB DPO */
800       if (sr_policy->type == SR_POLICY_TYPE_DEFAULT)
801         update_lb (sr_policy);
802       else if (sr_policy->type == SR_POLICY_TYPE_SPRAY)
803         update_replicate (sr_policy);
804     }
805   else if (operation == 2)      /* Delete SR List from an existing SR policy */
806     {
807       /* Check that currently there are more than one SID list */
808       if (vec_len (sr_policy->segments_lists) == 1)
809         return -21;
810
811       /* Check that the SR list does exist and is assigned to the sr policy */
812       vec_foreach (sl_index_iterate, sr_policy->segments_lists)
813         if (*sl_index_iterate == sl_index)
814         break;
815
816       if (*sl_index_iterate != sl_index)
817         return -22;
818
819       /* Remove the lucky SR list that is being kicked out */
820       segment_list = pool_elt_at_index (sm->sid_lists, sl_index);
821       vec_free (segment_list->segments);
822       vec_free (segment_list->rewrite);
823       if (!sr_policy->is_encap)
824         vec_free (segment_list->rewrite_bsid);
825       pool_put_index (sm->sid_lists, sl_index);
826       vec_del1 (sr_policy->segments_lists,
827                 sl_index_iterate - sr_policy->segments_lists);
828
829       /* Create a new LB DPO */
830       if (sr_policy->type == SR_POLICY_TYPE_DEFAULT)
831         update_lb (sr_policy);
832       else if (sr_policy->type == SR_POLICY_TYPE_SPRAY)
833         update_replicate (sr_policy);
834     }
835   else if (operation == 3)      /* Modify the weight of an existing SR List */
836     {
837       /* Find the corresponding SL */
838       vec_foreach (sl_index_iterate, sr_policy->segments_lists)
839         if (*sl_index_iterate == sl_index)
840         break;
841
842       if (*sl_index_iterate != sl_index)
843         return -32;
844
845       /* Change the weight */
846       segment_list = pool_elt_at_index (sm->sid_lists, sl_index);
847       segment_list->weight = weight;
848
849       /* Update LB */
850       if (sr_policy->type == SR_POLICY_TYPE_DEFAULT)
851         update_lb (sr_policy);
852     }
853   else                          /* Incorrect op. */
854     return -1;
855
856   return 0;
857 }
858
859 /**
860  * @brief CLI for 'sr policies' command family
861  */
862 static clib_error_t *
863 sr_policy_command_fn (vlib_main_t * vm, unformat_input_t * input,
864                       vlib_cli_command_t * cmd)
865 {
866   int rv = -1;
867   char is_del = 0, is_add = 0, is_mod = 0;
868   char policy_set = 0;
869   ip6_address_t bsid, next_address;
870   u32 sr_policy_index = (u32) ~ 0, sl_index = (u32) ~ 0;
871   u32 weight = (u32) ~ 0, fib_table = (u32) ~ 0;
872   ip6_address_t *segments = 0, *this_seg;
873   u8 operation = 0;
874   char is_encap = 1;
875   char is_spray = 0;
876
877   while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
878     {
879       if (!is_add && !is_mod && !is_del && unformat (input, "add"))
880         is_add = 1;
881       else if (!is_add && !is_mod && !is_del && unformat (input, "del"))
882         is_del = 1;
883       else if (!is_add && !is_mod && !is_del && unformat (input, "mod"))
884         is_mod = 1;
885       else if (!policy_set
886                && unformat (input, "bsid %U", unformat_ip6_address, &bsid))
887         policy_set = 1;
888       else if (!is_add && !policy_set
889                && unformat (input, "index %d", &sr_policy_index))
890         policy_set = 1;
891       else if (unformat (input, "weight %d", &weight));
892       else
893         if (unformat (input, "next %U", unformat_ip6_address, &next_address))
894         {
895           vec_add2 (segments, this_seg, 1);
896           clib_memcpy_fast (this_seg->as_u8, next_address.as_u8,
897                             sizeof (*this_seg));
898         }
899       else if (unformat (input, "add sl"))
900         operation = 1;
901       else if (unformat (input, "del sl index %d", &sl_index))
902         operation = 2;
903       else if (unformat (input, "mod sl index %d", &sl_index))
904         operation = 3;
905       else if (fib_table == (u32) ~ 0
906                && unformat (input, "fib-table %d", &fib_table));
907       else if (unformat (input, "encap"))
908         is_encap = 1;
909       else if (unformat (input, "insert"))
910         is_encap = 0;
911       else if (unformat (input, "spray"))
912         is_spray = 1;
913       else
914         break;
915     }
916
917   if (!is_add && !is_mod && !is_del)
918     return clib_error_return (0, "Incorrect CLI");
919
920   if (!policy_set)
921     return clib_error_return (0, "No SR policy BSID or index specified");
922
923   if (is_add)
924     {
925       if (vec_len (segments) == 0)
926         return clib_error_return (0, "No Segment List specified");
927       rv = sr_policy_add (&bsid, segments, weight,
928                           (is_spray ? SR_POLICY_TYPE_SPRAY :
929                            SR_POLICY_TYPE_DEFAULT), fib_table, is_encap);
930       vec_free (segments);
931     }
932   else if (is_del)
933     rv = sr_policy_del ((sr_policy_index != (u32) ~ 0 ? NULL : &bsid),
934                         sr_policy_index);
935   else if (is_mod)
936     {
937       if (!operation)
938         return clib_error_return (0, "No SL modification specified");
939       if (operation != 1 && sl_index == (u32) ~ 0)
940         return clib_error_return (0, "No Segment List index specified");
941       if (operation == 1 && vec_len (segments) == 0)
942         return clib_error_return (0, "No Segment List specified");
943       if (operation == 3 && weight == (u32) ~ 0)
944         return clib_error_return (0, "No new weight for the SL specified");
945       rv = sr_policy_mod ((sr_policy_index != (u32) ~ 0 ? NULL : &bsid),
946                           sr_policy_index, fib_table, operation, segments,
947                           sl_index, weight);
948       vec_free (segments);
949     }
950
951   switch (rv)
952     {
953     case 0:
954       break;
955     case 1:
956       return 0;
957     case -12:
958       return clib_error_return (0,
959                                 "There is already a FIB entry for the BindingSID address.\n"
960                                 "The SR policy could not be created.");
961     case -13:
962       return clib_error_return (0, "The specified FIB table does not exist.");
963     case -21:
964       return clib_error_return (0,
965                                 "The selected SR policy only contains ONE segment list. "
966                                 "Please remove the SR policy instead");
967     case -22:
968       return clib_error_return (0,
969                                 "Could not delete the segment list. "
970                                 "It is not associated with that SR policy.");
971     case -32:
972       return clib_error_return (0,
973                                 "Could not modify the segment list. "
974                                 "The given SL is not associated with such SR policy.");
975     default:
976       return clib_error_return (0, "BUG: sr policy returns %d", rv);
977     }
978   return 0;
979 }
980
981 /* *INDENT-OFF* */
982 VLIB_CLI_COMMAND (sr_policy_command, static) = {
983   .path = "sr policy",
984   .short_help = "sr policy [add||del||mod] [bsid 2001::1||index 5] "
985     "next A:: next B:: next C:: (weight 1) (fib-table 2) (encap|insert)",
986   .long_help =
987     "Manipulation of SR policies.\n"
988     "A Segment Routing policy may contain several SID lists. Each SID list has\n"
989     "an associated weight (default 1), which will result in wECMP (uECMP).\n"
990     "Segment Routing policies might be of type encapsulation or srh insertion\n"
991     "Each SR policy will be associated with a unique BindingSID.\n"
992     "A BindingSID is a locally allocated SegmentID. For every packet that arrives\n"
993     "with IPv6_DA:BSID such traffic will be steered into the SR policy.\n"
994     "The add command will create a SR policy with its first segment list (sl)\n"
995     "The mod command allows you to add, remove, or modify the existing segment lists\n"
996     "within an SR policy.\n"
997     "The del command allows you to delete a SR policy along with all its associated\n"
998     "SID lists.\n",
999   .function = sr_policy_command_fn,
1000 };
1001 /* *INDENT-ON* */
1002
1003 /**
1004  * @brief CLI to display onscreen all the SR policies
1005  */
1006 static clib_error_t *
1007 show_sr_policies_command_fn (vlib_main_t * vm, unformat_input_t * input,
1008                              vlib_cli_command_t * cmd)
1009 {
1010   ip6_sr_main_t *sm = &sr_main;
1011   u32 *sl_index;
1012   ip6_sr_sl_t *segment_list = 0;
1013   ip6_sr_policy_t *sr_policy = 0;
1014   ip6_sr_policy_t **vec_policies = 0;
1015   ip6_address_t *addr;
1016   u8 *s;
1017   int i = 0;
1018
1019   vlib_cli_output (vm, "SR policies:");
1020
1021   /* *INDENT-OFF* */
1022   pool_foreach  (sr_policy, sm->sr_policies,
1023                 {vec_add1 (vec_policies, sr_policy); } );
1024   /* *INDENT-ON* */
1025
1026   vec_foreach_index (i, vec_policies)
1027   {
1028     sr_policy = vec_policies[i];
1029     vlib_cli_output (vm, "[%u].-\tBSID: %U",
1030                      (u32) (sr_policy - sm->sr_policies),
1031                      format_ip6_address, &sr_policy->bsid);
1032     vlib_cli_output (vm, "\tBehavior: %s",
1033                      (sr_policy->is_encap ? "Encapsulation" :
1034                       "SRH insertion"));
1035     vlib_cli_output (vm, "\tType: %s",
1036                      (sr_policy->type ==
1037                       SR_POLICY_TYPE_DEFAULT ? "Default" : "Spray"));
1038     vlib_cli_output (vm, "\tFIB table: %u",
1039                      (sr_policy->fib_table !=
1040                       (u32) ~ 0 ? sr_policy->fib_table : 0));
1041     vlib_cli_output (vm, "\tSegment Lists:");
1042     vec_foreach (sl_index, sr_policy->segments_lists)
1043     {
1044       s = NULL;
1045       s = format (s, "\t[%u].- ", *sl_index);
1046       segment_list = pool_elt_at_index (sm->sid_lists, *sl_index);
1047       s = format (s, "< ");
1048       vec_foreach (addr, segment_list->segments)
1049       {
1050         s = format (s, "%U, ", format_ip6_address, addr);
1051       }
1052       s = format (s, "\b\b > ");
1053       s = format (s, "weight: %u", segment_list->weight);
1054       vlib_cli_output (vm, "  %s", s);
1055     }
1056     vlib_cli_output (vm, "-----------");
1057   }
1058   return 0;
1059 }
1060
1061 /* *INDENT-OFF* */
1062 VLIB_CLI_COMMAND (show_sr_policies_command, static) = {
1063   .path = "show sr policies",
1064   .short_help = "show sr policies",
1065   .function = show_sr_policies_command_fn,
1066 };
1067 /* *INDENT-ON* */
1068
1069 /*************************** SR rewrite graph node ****************************/
1070 /**
1071  * @brief Trace for the SR Policy Rewrite graph node
1072  */
1073 static u8 *
1074 format_sr_policy_rewrite_trace (u8 * s, va_list * args)
1075 {
1076   //TODO
1077   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1078   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1079   sr_policy_rewrite_trace_t *t = va_arg (*args, sr_policy_rewrite_trace_t *);
1080
1081   s = format
1082     (s, "SR-policy-rewrite: src %U dst %U",
1083      format_ip6_address, &t->src, format_ip6_address, &t->dst);
1084
1085   return s;
1086 }
1087
1088 /**
1089  * @brief IPv6 encapsulation processing as per RFC2473
1090  */
1091 static_always_inline void
1092 encaps_processing_v6 (vlib_node_runtime_t * node,
1093                       vlib_buffer_t * b0,
1094                       ip6_header_t * ip0, ip6_header_t * ip0_encap)
1095 {
1096   u32 new_l0;
1097
1098   ip0_encap->hop_limit -= 1;
1099   new_l0 =
1100     ip0->payload_length + sizeof (ip6_header_t) +
1101     clib_net_to_host_u16 (ip0_encap->payload_length);
1102   ip0->payload_length = clib_host_to_net_u16 (new_l0);
1103   ip0->ip_version_traffic_class_and_flow_label =
1104     ip0_encap->ip_version_traffic_class_and_flow_label;
1105 }
1106
1107 /**
1108  * @brief Graph node for applying a SR policy into an IPv6 packet. Encapsulation
1109  */
1110 static uword
1111 sr_policy_rewrite_encaps (vlib_main_t * vm, vlib_node_runtime_t * node,
1112                           vlib_frame_t * from_frame)
1113 {
1114   ip6_sr_main_t *sm = &sr_main;
1115   u32 n_left_from, next_index, *from, *to_next;
1116
1117   from = vlib_frame_vector_args (from_frame);
1118   n_left_from = from_frame->n_vectors;
1119
1120   next_index = node->cached_next_index;
1121
1122   int encap_pkts = 0, bsid_pkts = 0;
1123
1124   while (n_left_from > 0)
1125     {
1126       u32 n_left_to_next;
1127
1128       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1129
1130       /* Quad - Loop */
1131       while (n_left_from >= 8 && n_left_to_next >= 4)
1132         {
1133           u32 bi0, bi1, bi2, bi3;
1134           vlib_buffer_t *b0, *b1, *b2, *b3;
1135           u32 next0, next1, next2, next3;
1136           next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
1137           ip6_header_t *ip0, *ip1, *ip2, *ip3;
1138           ip6_header_t *ip0_encap, *ip1_encap, *ip2_encap, *ip3_encap;
1139           ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3;
1140
1141           /* Prefetch next iteration. */
1142           {
1143             vlib_buffer_t *p4, *p5, *p6, *p7;
1144
1145             p4 = vlib_get_buffer (vm, from[4]);
1146             p5 = vlib_get_buffer (vm, from[5]);
1147             p6 = vlib_get_buffer (vm, from[6]);
1148             p7 = vlib_get_buffer (vm, from[7]);
1149
1150             /* Prefetch the buffer header and packet for the N+2 loop iteration */
1151             vlib_prefetch_buffer_header (p4, LOAD);
1152             vlib_prefetch_buffer_header (p5, LOAD);
1153             vlib_prefetch_buffer_header (p6, LOAD);
1154             vlib_prefetch_buffer_header (p7, LOAD);
1155
1156             CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
1157             CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
1158             CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE);
1159             CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE);
1160           }
1161
1162           to_next[0] = bi0 = from[0];
1163           to_next[1] = bi1 = from[1];
1164           to_next[2] = bi2 = from[2];
1165           to_next[3] = bi3 = from[3];
1166           from += 4;
1167           to_next += 4;
1168           n_left_from -= 4;
1169           n_left_to_next -= 4;
1170
1171           b0 = vlib_get_buffer (vm, bi0);
1172           b1 = vlib_get_buffer (vm, bi1);
1173           b2 = vlib_get_buffer (vm, bi2);
1174           b3 = vlib_get_buffer (vm, bi3);
1175
1176           sl0 =
1177             pool_elt_at_index (sm->sid_lists,
1178                                vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
1179           sl1 =
1180             pool_elt_at_index (sm->sid_lists,
1181                                vnet_buffer (b1)->ip.adj_index[VLIB_TX]);
1182           sl2 =
1183             pool_elt_at_index (sm->sid_lists,
1184                                vnet_buffer (b2)->ip.adj_index[VLIB_TX]);
1185           sl3 =
1186             pool_elt_at_index (sm->sid_lists,
1187                                vnet_buffer (b3)->ip.adj_index[VLIB_TX]);
1188
1189           ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1190                   vec_len (sl0->rewrite));
1191           ASSERT (b1->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1192                   vec_len (sl1->rewrite));
1193           ASSERT (b2->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1194                   vec_len (sl2->rewrite));
1195           ASSERT (b3->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1196                   vec_len (sl3->rewrite));
1197
1198           ip0_encap = vlib_buffer_get_current (b0);
1199           ip1_encap = vlib_buffer_get_current (b1);
1200           ip2_encap = vlib_buffer_get_current (b2);
1201           ip3_encap = vlib_buffer_get_current (b3);
1202
1203           clib_memcpy_fast (((u8 *) ip0_encap) - vec_len (sl0->rewrite),
1204                             sl0->rewrite, vec_len (sl0->rewrite));
1205           clib_memcpy_fast (((u8 *) ip1_encap) - vec_len (sl1->rewrite),
1206                             sl1->rewrite, vec_len (sl1->rewrite));
1207           clib_memcpy_fast (((u8 *) ip2_encap) - vec_len (sl2->rewrite),
1208                             sl2->rewrite, vec_len (sl2->rewrite));
1209           clib_memcpy_fast (((u8 *) ip3_encap) - vec_len (sl3->rewrite),
1210                             sl3->rewrite, vec_len (sl3->rewrite));
1211
1212           vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
1213           vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite));
1214           vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite));
1215           vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite));
1216
1217           ip0 = vlib_buffer_get_current (b0);
1218           ip1 = vlib_buffer_get_current (b1);
1219           ip2 = vlib_buffer_get_current (b2);
1220           ip3 = vlib_buffer_get_current (b3);
1221
1222           encaps_processing_v6 (node, b0, ip0, ip0_encap);
1223           encaps_processing_v6 (node, b1, ip1, ip1_encap);
1224           encaps_processing_v6 (node, b2, ip2, ip2_encap);
1225           encaps_processing_v6 (node, b3, ip3, ip3_encap);
1226
1227           if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
1228             {
1229               if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1230                 {
1231                   sr_policy_rewrite_trace_t *tr =
1232                     vlib_add_trace (vm, node, b0, sizeof (*tr));
1233                   clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
1234                                     sizeof (tr->src.as_u8));
1235                   clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
1236                                     sizeof (tr->dst.as_u8));
1237                 }
1238
1239               if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
1240                 {
1241                   sr_policy_rewrite_trace_t *tr =
1242                     vlib_add_trace (vm, node, b1, sizeof (*tr));
1243                   clib_memcpy_fast (tr->src.as_u8, ip1->src_address.as_u8,
1244                                     sizeof (tr->src.as_u8));
1245                   clib_memcpy_fast (tr->dst.as_u8, ip1->dst_address.as_u8,
1246                                     sizeof (tr->dst.as_u8));
1247                 }
1248
1249               if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED))
1250                 {
1251                   sr_policy_rewrite_trace_t *tr =
1252                     vlib_add_trace (vm, node, b2, sizeof (*tr));
1253                   clib_memcpy_fast (tr->src.as_u8, ip2->src_address.as_u8,
1254                                     sizeof (tr->src.as_u8));
1255                   clib_memcpy_fast (tr->dst.as_u8, ip2->dst_address.as_u8,
1256                                     sizeof (tr->dst.as_u8));
1257                 }
1258
1259               if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED))
1260                 {
1261                   sr_policy_rewrite_trace_t *tr =
1262                     vlib_add_trace (vm, node, b3, sizeof (*tr));
1263                   clib_memcpy_fast (tr->src.as_u8, ip3->src_address.as_u8,
1264                                     sizeof (tr->src.as_u8));
1265                   clib_memcpy_fast (tr->dst.as_u8, ip3->dst_address.as_u8,
1266                                     sizeof (tr->dst.as_u8));
1267                 }
1268             }
1269
1270           encap_pkts += 4;
1271           vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
1272                                            n_left_to_next, bi0, bi1, bi2, bi3,
1273                                            next0, next1, next2, next3);
1274         }
1275
1276       /* Single loop for potentially the last three packets */
1277       while (n_left_from > 0 && n_left_to_next > 0)
1278         {
1279           u32 bi0;
1280           vlib_buffer_t *b0;
1281           ip6_header_t *ip0 = 0, *ip0_encap = 0;
1282           ip6_sr_sl_t *sl0;
1283           u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
1284
1285           bi0 = from[0];
1286           to_next[0] = bi0;
1287           from += 1;
1288           to_next += 1;
1289           n_left_from -= 1;
1290           n_left_to_next -= 1;
1291           b0 = vlib_get_buffer (vm, bi0);
1292
1293           sl0 =
1294             pool_elt_at_index (sm->sid_lists,
1295                                vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
1296           ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1297                   vec_len (sl0->rewrite));
1298
1299           ip0_encap = vlib_buffer_get_current (b0);
1300
1301           clib_memcpy_fast (((u8 *) ip0_encap) - vec_len (sl0->rewrite),
1302                             sl0->rewrite, vec_len (sl0->rewrite));
1303           vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
1304
1305           ip0 = vlib_buffer_get_current (b0);
1306
1307           encaps_processing_v6 (node, b0, ip0, ip0_encap);
1308
1309           if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
1310               PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1311             {
1312               sr_policy_rewrite_trace_t *tr =
1313                 vlib_add_trace (vm, node, b0, sizeof (*tr));
1314               clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
1315                                 sizeof (tr->src.as_u8));
1316               clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
1317                                 sizeof (tr->dst.as_u8));
1318             }
1319
1320           encap_pkts++;
1321           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1322                                            n_left_to_next, bi0, next0);
1323         }
1324
1325       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1326     }
1327
1328   /* Update counters */
1329   vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index,
1330                                SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL,
1331                                encap_pkts);
1332   vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index,
1333                                SR_POLICY_REWRITE_ERROR_COUNTER_BSID,
1334                                bsid_pkts);
1335
1336   return from_frame->n_vectors;
1337 }
1338
1339 /* *INDENT-OFF* */
1340 VLIB_REGISTER_NODE (sr_policy_rewrite_encaps_node) = {
1341   .function = sr_policy_rewrite_encaps,
1342   .name = "sr-pl-rewrite-encaps",
1343   .vector_size = sizeof (u32),
1344   .format_trace = format_sr_policy_rewrite_trace,
1345   .type = VLIB_NODE_TYPE_INTERNAL,
1346   .n_errors = SR_POLICY_REWRITE_N_ERROR,
1347   .error_strings = sr_policy_rewrite_error_strings,
1348   .n_next_nodes = SR_POLICY_REWRITE_N_NEXT,
1349   .next_nodes = {
1350 #define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n,
1351     foreach_sr_policy_rewrite_next
1352 #undef _
1353   },
1354 };
1355 /* *INDENT-ON* */
1356
1357 /**
1358  * @brief IPv4 encapsulation processing as per RFC2473
1359  */
1360 static_always_inline void
1361 encaps_processing_v4 (vlib_node_runtime_t * node,
1362                       vlib_buffer_t * b0,
1363                       ip6_header_t * ip0, ip4_header_t * ip0_encap)
1364 {
1365   u32 new_l0;
1366   ip6_sr_header_t *sr0;
1367
1368   u32 checksum0;
1369
1370   /* Inner IPv4: Decrement TTL & update checksum */
1371   ip0_encap->ttl -= 1;
1372   checksum0 = ip0_encap->checksum + clib_host_to_net_u16 (0x0100);
1373   checksum0 += checksum0 >= 0xffff;
1374   ip0_encap->checksum = checksum0;
1375
1376   /* Outer IPv6: Update length, FL, proto */
1377   new_l0 = ip0->payload_length + clib_net_to_host_u16 (ip0_encap->length);
1378   ip0->payload_length = clib_host_to_net_u16 (new_l0);
1379   ip0->ip_version_traffic_class_and_flow_label =
1380     clib_host_to_net_u32 (0 | ((6 & 0xF) << 28) |
1381                           ((ip0_encap->tos & 0xFF) << 20));
1382   if (ip0->protocol == IP_PROTOCOL_IPV6_ROUTE)
1383     {
1384       sr0 = (void *) (ip0 + 1);
1385       sr0->protocol = IP_PROTOCOL_IP_IN_IP;
1386     }
1387   else
1388     ip0->protocol = IP_PROTOCOL_IP_IN_IP;
1389 }
1390
1391 /**
1392  * @brief Graph node for applying a SR policy into an IPv4 packet. Encapsulation
1393  */
1394 static uword
1395 sr_policy_rewrite_encaps_v4 (vlib_main_t * vm, vlib_node_runtime_t * node,
1396                              vlib_frame_t * from_frame)
1397 {
1398   ip6_sr_main_t *sm = &sr_main;
1399   u32 n_left_from, next_index, *from, *to_next;
1400
1401   from = vlib_frame_vector_args (from_frame);
1402   n_left_from = from_frame->n_vectors;
1403
1404   next_index = node->cached_next_index;
1405
1406   int encap_pkts = 0, bsid_pkts = 0;
1407
1408   while (n_left_from > 0)
1409     {
1410       u32 n_left_to_next;
1411
1412       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1413
1414       /* Quad - Loop */
1415       while (n_left_from >= 8 && n_left_to_next >= 4)
1416         {
1417           u32 bi0, bi1, bi2, bi3;
1418           vlib_buffer_t *b0, *b1, *b2, *b3;
1419           u32 next0, next1, next2, next3;
1420           next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
1421           ip6_header_t *ip0, *ip1, *ip2, *ip3;
1422           ip4_header_t *ip0_encap, *ip1_encap, *ip2_encap, *ip3_encap;
1423           ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3;
1424
1425           /* Prefetch next iteration. */
1426           {
1427             vlib_buffer_t *p4, *p5, *p6, *p7;
1428
1429             p4 = vlib_get_buffer (vm, from[4]);
1430             p5 = vlib_get_buffer (vm, from[5]);
1431             p6 = vlib_get_buffer (vm, from[6]);
1432             p7 = vlib_get_buffer (vm, from[7]);
1433
1434             /* Prefetch the buffer header and packet for the N+2 loop iteration */
1435             vlib_prefetch_buffer_header (p4, LOAD);
1436             vlib_prefetch_buffer_header (p5, LOAD);
1437             vlib_prefetch_buffer_header (p6, LOAD);
1438             vlib_prefetch_buffer_header (p7, LOAD);
1439
1440             CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
1441             CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
1442             CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE);
1443             CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE);
1444           }
1445
1446           to_next[0] = bi0 = from[0];
1447           to_next[1] = bi1 = from[1];
1448           to_next[2] = bi2 = from[2];
1449           to_next[3] = bi3 = from[3];
1450           from += 4;
1451           to_next += 4;
1452           n_left_from -= 4;
1453           n_left_to_next -= 4;
1454
1455           b0 = vlib_get_buffer (vm, bi0);
1456           b1 = vlib_get_buffer (vm, bi1);
1457           b2 = vlib_get_buffer (vm, bi2);
1458           b3 = vlib_get_buffer (vm, bi3);
1459
1460           sl0 =
1461             pool_elt_at_index (sm->sid_lists,
1462                                vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
1463           sl1 =
1464             pool_elt_at_index (sm->sid_lists,
1465                                vnet_buffer (b1)->ip.adj_index[VLIB_TX]);
1466           sl2 =
1467             pool_elt_at_index (sm->sid_lists,
1468                                vnet_buffer (b2)->ip.adj_index[VLIB_TX]);
1469           sl3 =
1470             pool_elt_at_index (sm->sid_lists,
1471                                vnet_buffer (b3)->ip.adj_index[VLIB_TX]);
1472           ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1473                   vec_len (sl0->rewrite));
1474           ASSERT (b1->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1475                   vec_len (sl1->rewrite));
1476           ASSERT (b2->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1477                   vec_len (sl2->rewrite));
1478           ASSERT (b3->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1479                   vec_len (sl3->rewrite));
1480
1481           ip0_encap = vlib_buffer_get_current (b0);
1482           ip1_encap = vlib_buffer_get_current (b1);
1483           ip2_encap = vlib_buffer_get_current (b2);
1484           ip3_encap = vlib_buffer_get_current (b3);
1485
1486           clib_memcpy_fast (((u8 *) ip0_encap) - vec_len (sl0->rewrite),
1487                             sl0->rewrite, vec_len (sl0->rewrite));
1488           clib_memcpy_fast (((u8 *) ip1_encap) - vec_len (sl1->rewrite),
1489                             sl1->rewrite, vec_len (sl1->rewrite));
1490           clib_memcpy_fast (((u8 *) ip2_encap) - vec_len (sl2->rewrite),
1491                             sl2->rewrite, vec_len (sl2->rewrite));
1492           clib_memcpy_fast (((u8 *) ip3_encap) - vec_len (sl3->rewrite),
1493                             sl3->rewrite, vec_len (sl3->rewrite));
1494
1495           vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
1496           vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite));
1497           vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite));
1498           vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite));
1499
1500           ip0 = vlib_buffer_get_current (b0);
1501           ip1 = vlib_buffer_get_current (b1);
1502           ip2 = vlib_buffer_get_current (b2);
1503           ip3 = vlib_buffer_get_current (b3);
1504
1505           encaps_processing_v4 (node, b0, ip0, ip0_encap);
1506           encaps_processing_v4 (node, b1, ip1, ip1_encap);
1507           encaps_processing_v4 (node, b2, ip2, ip2_encap);
1508           encaps_processing_v4 (node, b3, ip3, ip3_encap);
1509
1510           if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
1511             {
1512               if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1513                 {
1514                   sr_policy_rewrite_trace_t *tr =
1515                     vlib_add_trace (vm, node, b0, sizeof (*tr));
1516                   clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
1517                                     sizeof (tr->src.as_u8));
1518                   clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
1519                                     sizeof (tr->dst.as_u8));
1520                 }
1521
1522               if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
1523                 {
1524                   sr_policy_rewrite_trace_t *tr =
1525                     vlib_add_trace (vm, node, b1, sizeof (*tr));
1526                   clib_memcpy_fast (tr->src.as_u8, ip1->src_address.as_u8,
1527                                     sizeof (tr->src.as_u8));
1528                   clib_memcpy_fast (tr->dst.as_u8, ip1->dst_address.as_u8,
1529                                     sizeof (tr->dst.as_u8));
1530                 }
1531
1532               if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED))
1533                 {
1534                   sr_policy_rewrite_trace_t *tr =
1535                     vlib_add_trace (vm, node, b2, sizeof (*tr));
1536                   clib_memcpy_fast (tr->src.as_u8, ip2->src_address.as_u8,
1537                                     sizeof (tr->src.as_u8));
1538                   clib_memcpy_fast (tr->dst.as_u8, ip2->dst_address.as_u8,
1539                                     sizeof (tr->dst.as_u8));
1540                 }
1541
1542               if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED))
1543                 {
1544                   sr_policy_rewrite_trace_t *tr =
1545                     vlib_add_trace (vm, node, b3, sizeof (*tr));
1546                   clib_memcpy_fast (tr->src.as_u8, ip3->src_address.as_u8,
1547                                     sizeof (tr->src.as_u8));
1548                   clib_memcpy_fast (tr->dst.as_u8, ip3->dst_address.as_u8,
1549                                     sizeof (tr->dst.as_u8));
1550                 }
1551             }
1552
1553           encap_pkts += 4;
1554           vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
1555                                            n_left_to_next, bi0, bi1, bi2, bi3,
1556                                            next0, next1, next2, next3);
1557         }
1558
1559       /* Single loop for potentially the last three packets */
1560       while (n_left_from > 0 && n_left_to_next > 0)
1561         {
1562           u32 bi0;
1563           vlib_buffer_t *b0;
1564           ip6_header_t *ip0 = 0;
1565           ip4_header_t *ip0_encap = 0;
1566           ip6_sr_sl_t *sl0;
1567           u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
1568
1569           bi0 = from[0];
1570           to_next[0] = bi0;
1571           from += 1;
1572           to_next += 1;
1573           n_left_from -= 1;
1574           n_left_to_next -= 1;
1575           b0 = vlib_get_buffer (vm, bi0);
1576
1577           sl0 =
1578             pool_elt_at_index (sm->sid_lists,
1579                                vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
1580           ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1581                   vec_len (sl0->rewrite));
1582
1583           ip0_encap = vlib_buffer_get_current (b0);
1584
1585           clib_memcpy_fast (((u8 *) ip0_encap) - vec_len (sl0->rewrite),
1586                             sl0->rewrite, vec_len (sl0->rewrite));
1587           vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
1588
1589           ip0 = vlib_buffer_get_current (b0);
1590
1591           encaps_processing_v4 (node, b0, ip0, ip0_encap);
1592
1593           if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
1594               PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1595             {
1596               sr_policy_rewrite_trace_t *tr =
1597                 vlib_add_trace (vm, node, b0, sizeof (*tr));
1598               clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
1599                                 sizeof (tr->src.as_u8));
1600               clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
1601                                 sizeof (tr->dst.as_u8));
1602             }
1603
1604           encap_pkts++;
1605           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1606                                            n_left_to_next, bi0, next0);
1607         }
1608
1609       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1610     }
1611
1612   /* Update counters */
1613   vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index,
1614                                SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL,
1615                                encap_pkts);
1616   vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index,
1617                                SR_POLICY_REWRITE_ERROR_COUNTER_BSID,
1618                                bsid_pkts);
1619
1620   return from_frame->n_vectors;
1621 }
1622
1623 /* *INDENT-OFF* */
1624 VLIB_REGISTER_NODE (sr_policy_rewrite_encaps_v4_node) = {
1625   .function = sr_policy_rewrite_encaps_v4,
1626   .name = "sr-pl-rewrite-encaps-v4",
1627   .vector_size = sizeof (u32),
1628   .format_trace = format_sr_policy_rewrite_trace,
1629   .type = VLIB_NODE_TYPE_INTERNAL,
1630   .n_errors = SR_POLICY_REWRITE_N_ERROR,
1631   .error_strings = sr_policy_rewrite_error_strings,
1632   .n_next_nodes = SR_POLICY_REWRITE_N_NEXT,
1633   .next_nodes = {
1634 #define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n,
1635     foreach_sr_policy_rewrite_next
1636 #undef _
1637   },
1638 };
1639 /* *INDENT-ON* */
1640
1641 always_inline u32
1642 ip_flow_hash (void *data)
1643 {
1644   ip4_header_t *iph = (ip4_header_t *) data;
1645
1646   if ((iph->ip_version_and_header_length & 0xF0) == 0x40)
1647     return ip4_compute_flow_hash (iph, IP_FLOW_HASH_DEFAULT);
1648   else
1649     return ip6_compute_flow_hash ((ip6_header_t *) iph, IP_FLOW_HASH_DEFAULT);
1650 }
1651
1652 always_inline u64
1653 mac_to_u64 (u8 * m)
1654 {
1655   return (*((u64 *) m) & 0xffffffffffff);
1656 }
1657
1658 always_inline u32
1659 l2_flow_hash (vlib_buffer_t * b0)
1660 {
1661   ethernet_header_t *eh;
1662   u64 a, b, c;
1663   uword is_ip, eh_size;
1664   u16 eh_type;
1665
1666   eh = vlib_buffer_get_current (b0);
1667   eh_type = clib_net_to_host_u16 (eh->type);
1668   eh_size = ethernet_buffer_header_size (b0);
1669
1670   is_ip = (eh_type == ETHERNET_TYPE_IP4 || eh_type == ETHERNET_TYPE_IP6);
1671
1672   /* since we have 2 cache lines, use them */
1673   if (is_ip)
1674     a = ip_flow_hash ((u8 *) vlib_buffer_get_current (b0) + eh_size);
1675   else
1676     a = eh->type;
1677
1678   b = mac_to_u64 ((u8 *) eh->dst_address);
1679   c = mac_to_u64 ((u8 *) eh->src_address);
1680   hash_mix64 (a, b, c);
1681
1682   return (u32) c;
1683 }
1684
1685 /**
1686  * @brief Graph node for applying a SR policy into a L2 frame
1687  */
1688 static uword
1689 sr_policy_rewrite_encaps_l2 (vlib_main_t * vm, vlib_node_runtime_t * node,
1690                              vlib_frame_t * from_frame)
1691 {
1692   ip6_sr_main_t *sm = &sr_main;
1693   u32 n_left_from, next_index, *from, *to_next;
1694
1695   from = vlib_frame_vector_args (from_frame);
1696   n_left_from = from_frame->n_vectors;
1697
1698   next_index = node->cached_next_index;
1699
1700   int encap_pkts = 0, bsid_pkts = 0;
1701
1702   while (n_left_from > 0)
1703     {
1704       u32 n_left_to_next;
1705
1706       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1707
1708       /* Quad - Loop */
1709       while (n_left_from >= 8 && n_left_to_next >= 4)
1710         {
1711           u32 bi0, bi1, bi2, bi3;
1712           vlib_buffer_t *b0, *b1, *b2, *b3;
1713           u32 next0, next1, next2, next3;
1714           next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
1715           ethernet_header_t *en0, *en1, *en2, *en3;
1716           ip6_header_t *ip0, *ip1, *ip2, *ip3;
1717           ip6_sr_header_t *sr0, *sr1, *sr2, *sr3;
1718           ip6_sr_policy_t *sp0, *sp1, *sp2, *sp3;
1719           ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3;
1720
1721           /* Prefetch next iteration. */
1722           {
1723             vlib_buffer_t *p4, *p5, *p6, *p7;
1724
1725             p4 = vlib_get_buffer (vm, from[4]);
1726             p5 = vlib_get_buffer (vm, from[5]);
1727             p6 = vlib_get_buffer (vm, from[6]);
1728             p7 = vlib_get_buffer (vm, from[7]);
1729
1730             /* Prefetch the buffer header and packet for the N+2 loop iteration */
1731             vlib_prefetch_buffer_header (p4, LOAD);
1732             vlib_prefetch_buffer_header (p5, LOAD);
1733             vlib_prefetch_buffer_header (p6, LOAD);
1734             vlib_prefetch_buffer_header (p7, LOAD);
1735
1736             CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
1737             CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
1738             CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE);
1739             CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE);
1740           }
1741
1742           to_next[0] = bi0 = from[0];
1743           to_next[1] = bi1 = from[1];
1744           to_next[2] = bi2 = from[2];
1745           to_next[3] = bi3 = from[3];
1746           from += 4;
1747           to_next += 4;
1748           n_left_from -= 4;
1749           n_left_to_next -= 4;
1750
1751           b0 = vlib_get_buffer (vm, bi0);
1752           b1 = vlib_get_buffer (vm, bi1);
1753           b2 = vlib_get_buffer (vm, bi2);
1754           b3 = vlib_get_buffer (vm, bi3);
1755
1756           sp0 = pool_elt_at_index (sm->sr_policies,
1757                                    sm->sw_iface_sr_policies[vnet_buffer
1758                                                             (b0)->sw_if_index
1759                                                             [VLIB_RX]]);
1760
1761           sp1 = pool_elt_at_index (sm->sr_policies,
1762                                    sm->sw_iface_sr_policies[vnet_buffer
1763                                                             (b1)->sw_if_index
1764                                                             [VLIB_RX]]);
1765
1766           sp2 = pool_elt_at_index (sm->sr_policies,
1767                                    sm->sw_iface_sr_policies[vnet_buffer
1768                                                             (b2)->sw_if_index
1769                                                             [VLIB_RX]]);
1770
1771           sp3 = pool_elt_at_index (sm->sr_policies,
1772                                    sm->sw_iface_sr_policies[vnet_buffer
1773                                                             (b3)->sw_if_index
1774                                                             [VLIB_RX]]);
1775
1776           if (vec_len (sp0->segments_lists) == 1)
1777             vnet_buffer (b0)->ip.adj_index[VLIB_TX] = sp0->segments_lists[0];
1778           else
1779             {
1780               vnet_buffer (b0)->ip.flow_hash = l2_flow_hash (b0);
1781               vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
1782                 sp0->segments_lists[(vnet_buffer (b0)->ip.flow_hash &
1783                                      (vec_len (sp0->segments_lists) - 1))];
1784             }
1785
1786           if (vec_len (sp1->segments_lists) == 1)
1787             vnet_buffer (b1)->ip.adj_index[VLIB_TX] = sp1->segments_lists[1];
1788           else
1789             {
1790               vnet_buffer (b1)->ip.flow_hash = l2_flow_hash (b1);
1791               vnet_buffer (b1)->ip.adj_index[VLIB_TX] =
1792                 sp1->segments_lists[(vnet_buffer (b1)->ip.flow_hash &
1793                                      (vec_len (sp1->segments_lists) - 1))];
1794             }
1795
1796           if (vec_len (sp2->segments_lists) == 1)
1797             vnet_buffer (b2)->ip.adj_index[VLIB_TX] = sp2->segments_lists[2];
1798           else
1799             {
1800               vnet_buffer (b2)->ip.flow_hash = l2_flow_hash (b2);
1801               vnet_buffer (b2)->ip.adj_index[VLIB_TX] =
1802                 sp2->segments_lists[(vnet_buffer (b2)->ip.flow_hash &
1803                                      (vec_len (sp2->segments_lists) - 1))];
1804             }
1805
1806           if (vec_len (sp3->segments_lists) == 1)
1807             vnet_buffer (b3)->ip.adj_index[VLIB_TX] = sp3->segments_lists[3];
1808           else
1809             {
1810               vnet_buffer (b3)->ip.flow_hash = l2_flow_hash (b3);
1811               vnet_buffer (b3)->ip.adj_index[VLIB_TX] =
1812                 sp3->segments_lists[(vnet_buffer (b3)->ip.flow_hash &
1813                                      (vec_len (sp3->segments_lists) - 1))];
1814             }
1815
1816           sl0 =
1817             pool_elt_at_index (sm->sid_lists,
1818                                vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
1819           sl1 =
1820             pool_elt_at_index (sm->sid_lists,
1821                                vnet_buffer (b1)->ip.adj_index[VLIB_TX]);
1822           sl2 =
1823             pool_elt_at_index (sm->sid_lists,
1824                                vnet_buffer (b2)->ip.adj_index[VLIB_TX]);
1825           sl3 =
1826             pool_elt_at_index (sm->sid_lists,
1827                                vnet_buffer (b3)->ip.adj_index[VLIB_TX]);
1828
1829           ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1830                   vec_len (sl0->rewrite));
1831           ASSERT (b1->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1832                   vec_len (sl1->rewrite));
1833           ASSERT (b2->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1834                   vec_len (sl2->rewrite));
1835           ASSERT (b3->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1836                   vec_len (sl3->rewrite));
1837
1838           en0 = vlib_buffer_get_current (b0);
1839           en1 = vlib_buffer_get_current (b1);
1840           en2 = vlib_buffer_get_current (b2);
1841           en3 = vlib_buffer_get_current (b3);
1842
1843           clib_memcpy_fast (((u8 *) en0) - vec_len (sl0->rewrite),
1844                             sl0->rewrite, vec_len (sl0->rewrite));
1845           clib_memcpy_fast (((u8 *) en1) - vec_len (sl1->rewrite),
1846                             sl1->rewrite, vec_len (sl1->rewrite));
1847           clib_memcpy_fast (((u8 *) en2) - vec_len (sl2->rewrite),
1848                             sl2->rewrite, vec_len (sl2->rewrite));
1849           clib_memcpy_fast (((u8 *) en3) - vec_len (sl3->rewrite),
1850                             sl3->rewrite, vec_len (sl3->rewrite));
1851
1852           vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
1853           vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite));
1854           vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite));
1855           vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite));
1856
1857           ip0 = vlib_buffer_get_current (b0);
1858           ip1 = vlib_buffer_get_current (b1);
1859           ip2 = vlib_buffer_get_current (b2);
1860           ip3 = vlib_buffer_get_current (b3);
1861
1862           ip0->payload_length =
1863             clib_host_to_net_u16 (b0->current_length - sizeof (ip6_header_t));
1864           ip1->payload_length =
1865             clib_host_to_net_u16 (b1->current_length - sizeof (ip6_header_t));
1866           ip2->payload_length =
1867             clib_host_to_net_u16 (b2->current_length - sizeof (ip6_header_t));
1868           ip3->payload_length =
1869             clib_host_to_net_u16 (b3->current_length - sizeof (ip6_header_t));
1870
1871           if (ip0->protocol == IP_PROTOCOL_IPV6_ROUTE)
1872             {
1873               sr0 = (void *) (ip0 + 1);
1874               sr0->protocol = IP_PROTOCOL_IP6_NONXT;
1875             }
1876           else
1877             ip0->protocol = IP_PROTOCOL_IP6_NONXT;
1878
1879           if (ip1->protocol == IP_PROTOCOL_IPV6_ROUTE)
1880             {
1881               sr1 = (void *) (ip1 + 1);
1882               sr1->protocol = IP_PROTOCOL_IP6_NONXT;
1883             }
1884           else
1885             ip1->protocol = IP_PROTOCOL_IP6_NONXT;
1886
1887           if (ip2->protocol == IP_PROTOCOL_IPV6_ROUTE)
1888             {
1889               sr2 = (void *) (ip2 + 1);
1890               sr2->protocol = IP_PROTOCOL_IP6_NONXT;
1891             }
1892           else
1893             ip2->protocol = IP_PROTOCOL_IP6_NONXT;
1894
1895           if (ip3->protocol == IP_PROTOCOL_IPV6_ROUTE)
1896             {
1897               sr3 = (void *) (ip3 + 1);
1898               sr3->protocol = IP_PROTOCOL_IP6_NONXT;
1899             }
1900           else
1901             ip3->protocol = IP_PROTOCOL_IP6_NONXT;
1902
1903           /* Which Traffic class and flow label do I set ? */
1904           //ip0->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32(0|((6&0xF)<<28)|((ip0_encap->tos&0xFF)<<20));
1905
1906           if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
1907             {
1908               if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1909                 {
1910                   sr_policy_rewrite_trace_t *tr =
1911                     vlib_add_trace (vm, node, b0, sizeof (*tr));
1912                   clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
1913                                     sizeof (tr->src.as_u8));
1914                   clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
1915                                     sizeof (tr->dst.as_u8));
1916                 }
1917
1918               if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
1919                 {
1920                   sr_policy_rewrite_trace_t *tr =
1921                     vlib_add_trace (vm, node, b1, sizeof (*tr));
1922                   clib_memcpy_fast (tr->src.as_u8, ip1->src_address.as_u8,
1923                                     sizeof (tr->src.as_u8));
1924                   clib_memcpy_fast (tr->dst.as_u8, ip1->dst_address.as_u8,
1925                                     sizeof (tr->dst.as_u8));
1926                 }
1927
1928               if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED))
1929                 {
1930                   sr_policy_rewrite_trace_t *tr =
1931                     vlib_add_trace (vm, node, b2, sizeof (*tr));
1932                   clib_memcpy_fast (tr->src.as_u8, ip2->src_address.as_u8,
1933                                     sizeof (tr->src.as_u8));
1934                   clib_memcpy_fast (tr->dst.as_u8, ip2->dst_address.as_u8,
1935                                     sizeof (tr->dst.as_u8));
1936                 }
1937
1938               if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED))
1939                 {
1940                   sr_policy_rewrite_trace_t *tr =
1941                     vlib_add_trace (vm, node, b3, sizeof (*tr));
1942                   clib_memcpy_fast (tr->src.as_u8, ip3->src_address.as_u8,
1943                                     sizeof (tr->src.as_u8));
1944                   clib_memcpy_fast (tr->dst.as_u8, ip3->dst_address.as_u8,
1945                                     sizeof (tr->dst.as_u8));
1946                 }
1947             }
1948
1949           encap_pkts += 4;
1950           vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
1951                                            n_left_to_next, bi0, bi1, bi2, bi3,
1952                                            next0, next1, next2, next3);
1953         }
1954
1955       /* Single loop for potentially the last three packets */
1956       while (n_left_from > 0 && n_left_to_next > 0)
1957         {
1958           u32 bi0;
1959           vlib_buffer_t *b0;
1960           ip6_header_t *ip0 = 0;
1961           ip6_sr_header_t *sr0;
1962           ethernet_header_t *en0;
1963           ip6_sr_policy_t *sp0;
1964           ip6_sr_sl_t *sl0;
1965           u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
1966
1967           bi0 = from[0];
1968           to_next[0] = bi0;
1969           from += 1;
1970           to_next += 1;
1971           n_left_from -= 1;
1972           n_left_to_next -= 1;
1973           b0 = vlib_get_buffer (vm, bi0);
1974
1975           /* Find the SR policy */
1976           sp0 = pool_elt_at_index (sm->sr_policies,
1977                                    sm->sw_iface_sr_policies[vnet_buffer
1978                                                             (b0)->sw_if_index
1979                                                             [VLIB_RX]]);
1980
1981           /* In case there is more than one SL, LB among them */
1982           if (vec_len (sp0->segments_lists) == 1)
1983             vnet_buffer (b0)->ip.adj_index[VLIB_TX] = sp0->segments_lists[0];
1984           else
1985             {
1986               vnet_buffer (b0)->ip.flow_hash = l2_flow_hash (b0);
1987               vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
1988                 sp0->segments_lists[(vnet_buffer (b0)->ip.flow_hash &
1989                                      (vec_len (sp0->segments_lists) - 1))];
1990             }
1991           sl0 =
1992             pool_elt_at_index (sm->sid_lists,
1993                                vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
1994           ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
1995                   vec_len (sl0->rewrite));
1996
1997           en0 = vlib_buffer_get_current (b0);
1998
1999           clib_memcpy_fast (((u8 *) en0) - vec_len (sl0->rewrite),
2000                             sl0->rewrite, vec_len (sl0->rewrite));
2001
2002           vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
2003
2004           ip0 = vlib_buffer_get_current (b0);
2005
2006           ip0->payload_length =
2007             clib_host_to_net_u16 (b0->current_length - sizeof (ip6_header_t));
2008
2009           if (ip0->protocol == IP_PROTOCOL_IPV6_ROUTE)
2010             {
2011               sr0 = (void *) (ip0 + 1);
2012               sr0->protocol = IP_PROTOCOL_IP6_NONXT;
2013             }
2014           else
2015             ip0->protocol = IP_PROTOCOL_IP6_NONXT;
2016
2017           if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
2018               PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2019             {
2020               sr_policy_rewrite_trace_t *tr =
2021                 vlib_add_trace (vm, node, b0, sizeof (*tr));
2022               clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
2023                                 sizeof (tr->src.as_u8));
2024               clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
2025                                 sizeof (tr->dst.as_u8));
2026             }
2027
2028           encap_pkts++;
2029           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2030                                            n_left_to_next, bi0, next0);
2031         }
2032
2033       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2034     }
2035
2036   /* Update counters */
2037   vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index,
2038                                SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL,
2039                                encap_pkts);
2040   vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index,
2041                                SR_POLICY_REWRITE_ERROR_COUNTER_BSID,
2042                                bsid_pkts);
2043
2044   return from_frame->n_vectors;
2045 }
2046
2047 /* *INDENT-OFF* */
2048 VLIB_REGISTER_NODE (sr_policy_rewrite_encaps_l2_node) = {
2049   .function = sr_policy_rewrite_encaps_l2,
2050   .name = "sr-pl-rewrite-encaps-l2",
2051   .vector_size = sizeof (u32),
2052   .format_trace = format_sr_policy_rewrite_trace,
2053   .type = VLIB_NODE_TYPE_INTERNAL,
2054   .n_errors = SR_POLICY_REWRITE_N_ERROR,
2055   .error_strings = sr_policy_rewrite_error_strings,
2056   .n_next_nodes = SR_POLICY_REWRITE_N_NEXT,
2057   .next_nodes = {
2058 #define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n,
2059     foreach_sr_policy_rewrite_next
2060 #undef _
2061   },
2062 };
2063 /* *INDENT-ON* */
2064
2065 /**
2066  * @brief Graph node for applying a SR policy into a packet. SRH insertion.
2067  */
2068 static uword
2069 sr_policy_rewrite_insert (vlib_main_t * vm, vlib_node_runtime_t * node,
2070                           vlib_frame_t * from_frame)
2071 {
2072   ip6_sr_main_t *sm = &sr_main;
2073   u32 n_left_from, next_index, *from, *to_next;
2074
2075   from = vlib_frame_vector_args (from_frame);
2076   n_left_from = from_frame->n_vectors;
2077
2078   next_index = node->cached_next_index;
2079
2080   int insert_pkts = 0, bsid_pkts = 0;
2081
2082   while (n_left_from > 0)
2083     {
2084       u32 n_left_to_next;
2085
2086       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2087
2088       /* Quad - Loop */
2089       while (n_left_from >= 8 && n_left_to_next >= 4)
2090         {
2091           u32 bi0, bi1, bi2, bi3;
2092           vlib_buffer_t *b0, *b1, *b2, *b3;
2093           u32 next0, next1, next2, next3;
2094           next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
2095           ip6_header_t *ip0, *ip1, *ip2, *ip3;
2096           ip6_sr_header_t *sr0, *sr1, *sr2, *sr3;
2097           ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3;
2098           u16 new_l0, new_l1, new_l2, new_l3;
2099
2100           /* Prefetch next iteration. */
2101           {
2102             vlib_buffer_t *p4, *p5, *p6, *p7;
2103
2104             p4 = vlib_get_buffer (vm, from[4]);
2105             p5 = vlib_get_buffer (vm, from[5]);
2106             p6 = vlib_get_buffer (vm, from[6]);
2107             p7 = vlib_get_buffer (vm, from[7]);
2108
2109             /* Prefetch the buffer header and packet for the N+2 loop iteration */
2110             vlib_prefetch_buffer_header (p4, LOAD);
2111             vlib_prefetch_buffer_header (p5, LOAD);
2112             vlib_prefetch_buffer_header (p6, LOAD);
2113             vlib_prefetch_buffer_header (p7, LOAD);
2114
2115             CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
2116             CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
2117             CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE);
2118             CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE);
2119           }
2120
2121           to_next[0] = bi0 = from[0];
2122           to_next[1] = bi1 = from[1];
2123           to_next[2] = bi2 = from[2];
2124           to_next[3] = bi3 = from[3];
2125           from += 4;
2126           to_next += 4;
2127           n_left_from -= 4;
2128           n_left_to_next -= 4;
2129
2130           b0 = vlib_get_buffer (vm, bi0);
2131           b1 = vlib_get_buffer (vm, bi1);
2132           b2 = vlib_get_buffer (vm, bi2);
2133           b3 = vlib_get_buffer (vm, bi3);
2134
2135           sl0 =
2136             pool_elt_at_index (sm->sid_lists,
2137                                vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
2138           sl1 =
2139             pool_elt_at_index (sm->sid_lists,
2140                                vnet_buffer (b1)->ip.adj_index[VLIB_TX]);
2141           sl2 =
2142             pool_elt_at_index (sm->sid_lists,
2143                                vnet_buffer (b2)->ip.adj_index[VLIB_TX]);
2144           sl3 =
2145             pool_elt_at_index (sm->sid_lists,
2146                                vnet_buffer (b3)->ip.adj_index[VLIB_TX]);
2147           ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2148                   vec_len (sl0->rewrite));
2149           ASSERT (b1->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2150                   vec_len (sl1->rewrite));
2151           ASSERT (b2->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2152                   vec_len (sl2->rewrite));
2153           ASSERT (b3->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2154                   vec_len (sl3->rewrite));
2155
2156           ip0 = vlib_buffer_get_current (b0);
2157           ip1 = vlib_buffer_get_current (b1);
2158           ip2 = vlib_buffer_get_current (b2);
2159           ip3 = vlib_buffer_get_current (b3);
2160
2161           if (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2162             sr0 =
2163               (ip6_sr_header_t *) (((void *) (ip0 + 1)) +
2164                                    ip6_ext_header_len (ip0 + 1));
2165           else
2166             sr0 = (ip6_sr_header_t *) (ip0 + 1);
2167
2168           if (ip1->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2169             sr1 =
2170               (ip6_sr_header_t *) (((void *) (ip1 + 1)) +
2171                                    ip6_ext_header_len (ip1 + 1));
2172           else
2173             sr1 = (ip6_sr_header_t *) (ip1 + 1);
2174
2175           if (ip2->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2176             sr2 =
2177               (ip6_sr_header_t *) (((void *) (ip2 + 1)) +
2178                                    ip6_ext_header_len (ip2 + 1));
2179           else
2180             sr2 = (ip6_sr_header_t *) (ip2 + 1);
2181
2182           if (ip3->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2183             sr3 =
2184               (ip6_sr_header_t *) (((void *) (ip3 + 1)) +
2185                                    ip6_ext_header_len (ip3 + 1));
2186           else
2187             sr3 = (ip6_sr_header_t *) (ip3 + 1);
2188
2189           clib_memcpy_fast ((u8 *) ip0 - vec_len (sl0->rewrite), (u8 *) ip0,
2190                             (void *) sr0 - (void *) ip0);
2191           clib_memcpy_fast ((u8 *) ip1 - vec_len (sl1->rewrite), (u8 *) ip1,
2192                             (void *) sr1 - (void *) ip1);
2193           clib_memcpy_fast ((u8 *) ip2 - vec_len (sl2->rewrite), (u8 *) ip2,
2194                             (void *) sr2 - (void *) ip2);
2195           clib_memcpy_fast ((u8 *) ip3 - vec_len (sl3->rewrite), (u8 *) ip3,
2196                             (void *) sr3 - (void *) ip3);
2197
2198           clib_memcpy_fast (((u8 *) sr0 - vec_len (sl0->rewrite)),
2199                             sl0->rewrite, vec_len (sl0->rewrite));
2200           clib_memcpy_fast (((u8 *) sr1 - vec_len (sl1->rewrite)),
2201                             sl1->rewrite, vec_len (sl1->rewrite));
2202           clib_memcpy_fast (((u8 *) sr2 - vec_len (sl2->rewrite)),
2203                             sl2->rewrite, vec_len (sl2->rewrite));
2204           clib_memcpy_fast (((u8 *) sr3 - vec_len (sl3->rewrite)),
2205                             sl3->rewrite, vec_len (sl3->rewrite));
2206
2207           vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
2208           vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite));
2209           vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite));
2210           vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite));
2211
2212           ip0 = ((void *) ip0) - vec_len (sl0->rewrite);
2213           ip1 = ((void *) ip1) - vec_len (sl1->rewrite);
2214           ip2 = ((void *) ip2) - vec_len (sl2->rewrite);
2215           ip3 = ((void *) ip3) - vec_len (sl3->rewrite);
2216
2217           ip0->hop_limit -= 1;
2218           ip1->hop_limit -= 1;
2219           ip2->hop_limit -= 1;
2220           ip3->hop_limit -= 1;
2221
2222           new_l0 =
2223             clib_net_to_host_u16 (ip0->payload_length) +
2224             vec_len (sl0->rewrite);
2225           new_l1 =
2226             clib_net_to_host_u16 (ip1->payload_length) +
2227             vec_len (sl1->rewrite);
2228           new_l2 =
2229             clib_net_to_host_u16 (ip2->payload_length) +
2230             vec_len (sl2->rewrite);
2231           new_l3 =
2232             clib_net_to_host_u16 (ip3->payload_length) +
2233             vec_len (sl3->rewrite);
2234
2235           ip0->payload_length = clib_host_to_net_u16 (new_l0);
2236           ip1->payload_length = clib_host_to_net_u16 (new_l1);
2237           ip2->payload_length = clib_host_to_net_u16 (new_l2);
2238           ip3->payload_length = clib_host_to_net_u16 (new_l3);
2239
2240           sr0 = ((void *) sr0) - vec_len (sl0->rewrite);
2241           sr1 = ((void *) sr1) - vec_len (sl1->rewrite);
2242           sr2 = ((void *) sr2) - vec_len (sl2->rewrite);
2243           sr3 = ((void *) sr3) - vec_len (sl3->rewrite);
2244
2245           sr0->segments->as_u64[0] = ip0->dst_address.as_u64[0];
2246           sr0->segments->as_u64[1] = ip0->dst_address.as_u64[1];
2247           sr1->segments->as_u64[0] = ip1->dst_address.as_u64[0];
2248           sr1->segments->as_u64[1] = ip1->dst_address.as_u64[1];
2249           sr2->segments->as_u64[0] = ip2->dst_address.as_u64[0];
2250           sr2->segments->as_u64[1] = ip2->dst_address.as_u64[1];
2251           sr3->segments->as_u64[0] = ip3->dst_address.as_u64[0];
2252           sr3->segments->as_u64[1] = ip3->dst_address.as_u64[1];
2253
2254           ip0->dst_address.as_u64[0] =
2255             (sr0->segments + sr0->segments_left)->as_u64[0];
2256           ip0->dst_address.as_u64[1] =
2257             (sr0->segments + sr0->segments_left)->as_u64[1];
2258           ip1->dst_address.as_u64[0] =
2259             (sr1->segments + sr1->segments_left)->as_u64[0];
2260           ip1->dst_address.as_u64[1] =
2261             (sr1->segments + sr1->segments_left)->as_u64[1];
2262           ip2->dst_address.as_u64[0] =
2263             (sr2->segments + sr2->segments_left)->as_u64[0];
2264           ip2->dst_address.as_u64[1] =
2265             (sr2->segments + sr2->segments_left)->as_u64[1];
2266           ip3->dst_address.as_u64[0] =
2267             (sr3->segments + sr3->segments_left)->as_u64[0];
2268           ip3->dst_address.as_u64[1] =
2269             (sr3->segments + sr3->segments_left)->as_u64[1];
2270
2271           ip6_ext_header_t *ip_ext;
2272           if (ip0 + 1 == (void *) sr0)
2273             {
2274               sr0->protocol = ip0->protocol;
2275               ip0->protocol = IP_PROTOCOL_IPV6_ROUTE;
2276             }
2277           else
2278             {
2279               ip_ext = (void *) (ip0 + 1);
2280               sr0->protocol = ip_ext->next_hdr;
2281               ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2282             }
2283
2284           if (ip1 + 1 == (void *) sr1)
2285             {
2286               sr1->protocol = ip1->protocol;
2287               ip1->protocol = IP_PROTOCOL_IPV6_ROUTE;
2288             }
2289           else
2290             {
2291               ip_ext = (void *) (ip2 + 1);
2292               sr2->protocol = ip_ext->next_hdr;
2293               ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2294             }
2295
2296           if (ip2 + 1 == (void *) sr2)
2297             {
2298               sr2->protocol = ip2->protocol;
2299               ip2->protocol = IP_PROTOCOL_IPV6_ROUTE;
2300             }
2301           else
2302             {
2303               ip_ext = (void *) (ip2 + 1);
2304               sr2->protocol = ip_ext->next_hdr;
2305               ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2306             }
2307
2308           if (ip3 + 1 == (void *) sr3)
2309             {
2310               sr3->protocol = ip3->protocol;
2311               ip3->protocol = IP_PROTOCOL_IPV6_ROUTE;
2312             }
2313           else
2314             {
2315               ip_ext = (void *) (ip3 + 1);
2316               sr3->protocol = ip_ext->next_hdr;
2317               ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2318             }
2319
2320           insert_pkts += 4;
2321
2322           if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
2323             {
2324               if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2325                 {
2326                   sr_policy_rewrite_trace_t *tr =
2327                     vlib_add_trace (vm, node, b0, sizeof (*tr));
2328                   clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
2329                                     sizeof (tr->src.as_u8));
2330                   clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
2331                                     sizeof (tr->dst.as_u8));
2332                 }
2333
2334               if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
2335                 {
2336                   sr_policy_rewrite_trace_t *tr =
2337                     vlib_add_trace (vm, node, b1, sizeof (*tr));
2338                   clib_memcpy_fast (tr->src.as_u8, ip1->src_address.as_u8,
2339                                     sizeof (tr->src.as_u8));
2340                   clib_memcpy_fast (tr->dst.as_u8, ip1->dst_address.as_u8,
2341                                     sizeof (tr->dst.as_u8));
2342                 }
2343
2344               if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED))
2345                 {
2346                   sr_policy_rewrite_trace_t *tr =
2347                     vlib_add_trace (vm, node, b2, sizeof (*tr));
2348                   clib_memcpy_fast (tr->src.as_u8, ip2->src_address.as_u8,
2349                                     sizeof (tr->src.as_u8));
2350                   clib_memcpy_fast (tr->dst.as_u8, ip2->dst_address.as_u8,
2351                                     sizeof (tr->dst.as_u8));
2352                 }
2353
2354               if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED))
2355                 {
2356                   sr_policy_rewrite_trace_t *tr =
2357                     vlib_add_trace (vm, node, b3, sizeof (*tr));
2358                   clib_memcpy_fast (tr->src.as_u8, ip3->src_address.as_u8,
2359                                     sizeof (tr->src.as_u8));
2360                   clib_memcpy_fast (tr->dst.as_u8, ip3->dst_address.as_u8,
2361                                     sizeof (tr->dst.as_u8));
2362                 }
2363             }
2364
2365           vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
2366                                            n_left_to_next, bi0, bi1, bi2, bi3,
2367                                            next0, next1, next2, next3);
2368         }
2369
2370       /* Single loop for potentially the last three packets */
2371       while (n_left_from > 0 && n_left_to_next > 0)
2372         {
2373           u32 bi0;
2374           vlib_buffer_t *b0;
2375           ip6_header_t *ip0 = 0;
2376           ip6_sr_header_t *sr0 = 0;
2377           ip6_sr_sl_t *sl0;
2378           u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
2379           u16 new_l0 = 0;
2380
2381           bi0 = from[0];
2382           to_next[0] = bi0;
2383           from += 1;
2384           to_next += 1;
2385           n_left_from -= 1;
2386           n_left_to_next -= 1;
2387
2388           b0 = vlib_get_buffer (vm, bi0);
2389           sl0 =
2390             pool_elt_at_index (sm->sid_lists,
2391                                vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
2392           ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2393                   vec_len (sl0->rewrite));
2394
2395           ip0 = vlib_buffer_get_current (b0);
2396
2397           if (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2398             sr0 =
2399               (ip6_sr_header_t *) (((void *) (ip0 + 1)) +
2400                                    ip6_ext_header_len (ip0 + 1));
2401           else
2402             sr0 = (ip6_sr_header_t *) (ip0 + 1);
2403
2404           clib_memcpy_fast ((u8 *) ip0 - vec_len (sl0->rewrite), (u8 *) ip0,
2405                             (void *) sr0 - (void *) ip0);
2406           clib_memcpy_fast (((u8 *) sr0 - vec_len (sl0->rewrite)),
2407                             sl0->rewrite, vec_len (sl0->rewrite));
2408
2409           vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
2410
2411           ip0 = ((void *) ip0) - vec_len (sl0->rewrite);
2412           ip0->hop_limit -= 1;
2413           new_l0 =
2414             clib_net_to_host_u16 (ip0->payload_length) +
2415             vec_len (sl0->rewrite);
2416           ip0->payload_length = clib_host_to_net_u16 (new_l0);
2417
2418           sr0 = ((void *) sr0) - vec_len (sl0->rewrite);
2419           sr0->segments->as_u64[0] = ip0->dst_address.as_u64[0];
2420           sr0->segments->as_u64[1] = ip0->dst_address.as_u64[1];
2421
2422           ip0->dst_address.as_u64[0] =
2423             (sr0->segments + sr0->segments_left)->as_u64[0];
2424           ip0->dst_address.as_u64[1] =
2425             (sr0->segments + sr0->segments_left)->as_u64[1];
2426
2427           if (ip0 + 1 == (void *) sr0)
2428             {
2429               sr0->protocol = ip0->protocol;
2430               ip0->protocol = IP_PROTOCOL_IPV6_ROUTE;
2431             }
2432           else
2433             {
2434               ip6_ext_header_t *ip_ext = (void *) (ip0 + 1);
2435               sr0->protocol = ip_ext->next_hdr;
2436               ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2437             }
2438
2439           if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
2440               PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2441             {
2442               sr_policy_rewrite_trace_t *tr =
2443                 vlib_add_trace (vm, node, b0, sizeof (*tr));
2444               clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
2445                                 sizeof (tr->src.as_u8));
2446               clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
2447                                 sizeof (tr->dst.as_u8));
2448             }
2449
2450           insert_pkts++;
2451
2452           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2453                                            n_left_to_next, bi0, next0);
2454         }
2455
2456       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2457     }
2458
2459   /* Update counters */
2460   vlib_node_increment_counter (vm, sr_policy_rewrite_insert_node.index,
2461                                SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL,
2462                                insert_pkts);
2463   vlib_node_increment_counter (vm, sr_policy_rewrite_insert_node.index,
2464                                SR_POLICY_REWRITE_ERROR_COUNTER_BSID,
2465                                bsid_pkts);
2466   return from_frame->n_vectors;
2467 }
2468
2469 /* *INDENT-OFF* */
2470 VLIB_REGISTER_NODE (sr_policy_rewrite_insert_node) = {
2471   .function = sr_policy_rewrite_insert,
2472   .name = "sr-pl-rewrite-insert",
2473   .vector_size = sizeof (u32),
2474   .format_trace = format_sr_policy_rewrite_trace,
2475   .type = VLIB_NODE_TYPE_INTERNAL,
2476   .n_errors = SR_POLICY_REWRITE_N_ERROR,
2477   .error_strings = sr_policy_rewrite_error_strings,
2478   .n_next_nodes = SR_POLICY_REWRITE_N_NEXT,
2479   .next_nodes = {
2480 #define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n,
2481     foreach_sr_policy_rewrite_next
2482 #undef _
2483   },
2484 };
2485 /* *INDENT-ON* */
2486
2487 /**
2488  * @brief Graph node for applying a SR policy into a packet. BSID - SRH insertion.
2489  */
2490 static uword
2491 sr_policy_rewrite_b_insert (vlib_main_t * vm, vlib_node_runtime_t * node,
2492                             vlib_frame_t * from_frame)
2493 {
2494   ip6_sr_main_t *sm = &sr_main;
2495   u32 n_left_from, next_index, *from, *to_next;
2496
2497   from = vlib_frame_vector_args (from_frame);
2498   n_left_from = from_frame->n_vectors;
2499
2500   next_index = node->cached_next_index;
2501
2502   int insert_pkts = 0, bsid_pkts = 0;
2503
2504   while (n_left_from > 0)
2505     {
2506       u32 n_left_to_next;
2507
2508       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2509
2510       /* Quad - Loop */
2511       while (n_left_from >= 8 && n_left_to_next >= 4)
2512         {
2513           u32 bi0, bi1, bi2, bi3;
2514           vlib_buffer_t *b0, *b1, *b2, *b3;
2515           u32 next0, next1, next2, next3;
2516           next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
2517           ip6_header_t *ip0, *ip1, *ip2, *ip3;
2518           ip6_sr_header_t *sr0, *sr1, *sr2, *sr3;
2519           ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3;
2520           u16 new_l0, new_l1, new_l2, new_l3;
2521
2522           /* Prefetch next iteration. */
2523           {
2524             vlib_buffer_t *p4, *p5, *p6, *p7;
2525
2526             p4 = vlib_get_buffer (vm, from[4]);
2527             p5 = vlib_get_buffer (vm, from[5]);
2528             p6 = vlib_get_buffer (vm, from[6]);
2529             p7 = vlib_get_buffer (vm, from[7]);
2530
2531             /* Prefetch the buffer header and packet for the N+2 loop iteration */
2532             vlib_prefetch_buffer_header (p4, LOAD);
2533             vlib_prefetch_buffer_header (p5, LOAD);
2534             vlib_prefetch_buffer_header (p6, LOAD);
2535             vlib_prefetch_buffer_header (p7, LOAD);
2536
2537             CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
2538             CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
2539             CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE);
2540             CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE);
2541           }
2542
2543           to_next[0] = bi0 = from[0];
2544           to_next[1] = bi1 = from[1];
2545           to_next[2] = bi2 = from[2];
2546           to_next[3] = bi3 = from[3];
2547           from += 4;
2548           to_next += 4;
2549           n_left_from -= 4;
2550           n_left_to_next -= 4;
2551
2552           b0 = vlib_get_buffer (vm, bi0);
2553           b1 = vlib_get_buffer (vm, bi1);
2554           b2 = vlib_get_buffer (vm, bi2);
2555           b3 = vlib_get_buffer (vm, bi3);
2556
2557           sl0 =
2558             pool_elt_at_index (sm->sid_lists,
2559                                vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
2560           sl1 =
2561             pool_elt_at_index (sm->sid_lists,
2562                                vnet_buffer (b1)->ip.adj_index[VLIB_TX]);
2563           sl2 =
2564             pool_elt_at_index (sm->sid_lists,
2565                                vnet_buffer (b2)->ip.adj_index[VLIB_TX]);
2566           sl3 =
2567             pool_elt_at_index (sm->sid_lists,
2568                                vnet_buffer (b3)->ip.adj_index[VLIB_TX]);
2569           ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2570                   vec_len (sl0->rewrite_bsid));
2571           ASSERT (b1->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2572                   vec_len (sl1->rewrite_bsid));
2573           ASSERT (b2->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2574                   vec_len (sl2->rewrite_bsid));
2575           ASSERT (b3->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2576                   vec_len (sl3->rewrite_bsid));
2577
2578           ip0 = vlib_buffer_get_current (b0);
2579           ip1 = vlib_buffer_get_current (b1);
2580           ip2 = vlib_buffer_get_current (b2);
2581           ip3 = vlib_buffer_get_current (b3);
2582
2583           if (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2584             sr0 =
2585               (ip6_sr_header_t *) (((void *) (ip0 + 1)) +
2586                                    ip6_ext_header_len (ip0 + 1));
2587           else
2588             sr0 = (ip6_sr_header_t *) (ip0 + 1);
2589
2590           if (ip1->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2591             sr1 =
2592               (ip6_sr_header_t *) (((void *) (ip1 + 1)) +
2593                                    ip6_ext_header_len (ip1 + 1));
2594           else
2595             sr1 = (ip6_sr_header_t *) (ip1 + 1);
2596
2597           if (ip2->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2598             sr2 =
2599               (ip6_sr_header_t *) (((void *) (ip2 + 1)) +
2600                                    ip6_ext_header_len (ip2 + 1));
2601           else
2602             sr2 = (ip6_sr_header_t *) (ip2 + 1);
2603
2604           if (ip3->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2605             sr3 =
2606               (ip6_sr_header_t *) (((void *) (ip3 + 1)) +
2607                                    ip6_ext_header_len (ip3 + 1));
2608           else
2609             sr3 = (ip6_sr_header_t *) (ip3 + 1);
2610
2611           clib_memcpy_fast ((u8 *) ip0 - vec_len (sl0->rewrite_bsid),
2612                             (u8 *) ip0, (void *) sr0 - (void *) ip0);
2613           clib_memcpy_fast ((u8 *) ip1 - vec_len (sl1->rewrite_bsid),
2614                             (u8 *) ip1, (void *) sr1 - (void *) ip1);
2615           clib_memcpy_fast ((u8 *) ip2 - vec_len (sl2->rewrite_bsid),
2616                             (u8 *) ip2, (void *) sr2 - (void *) ip2);
2617           clib_memcpy_fast ((u8 *) ip3 - vec_len (sl3->rewrite_bsid),
2618                             (u8 *) ip3, (void *) sr3 - (void *) ip3);
2619
2620           clib_memcpy_fast (((u8 *) sr0 - vec_len (sl0->rewrite_bsid)),
2621                             sl0->rewrite_bsid, vec_len (sl0->rewrite_bsid));
2622           clib_memcpy_fast (((u8 *) sr1 - vec_len (sl1->rewrite_bsid)),
2623                             sl1->rewrite_bsid, vec_len (sl1->rewrite_bsid));
2624           clib_memcpy_fast (((u8 *) sr2 - vec_len (sl2->rewrite_bsid)),
2625                             sl2->rewrite_bsid, vec_len (sl2->rewrite_bsid));
2626           clib_memcpy_fast (((u8 *) sr3 - vec_len (sl3->rewrite_bsid)),
2627                             sl3->rewrite_bsid, vec_len (sl3->rewrite_bsid));
2628
2629           vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite_bsid));
2630           vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite_bsid));
2631           vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite_bsid));
2632           vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite_bsid));
2633
2634           ip0 = ((void *) ip0) - vec_len (sl0->rewrite_bsid);
2635           ip1 = ((void *) ip1) - vec_len (sl1->rewrite_bsid);
2636           ip2 = ((void *) ip2) - vec_len (sl2->rewrite_bsid);
2637           ip3 = ((void *) ip3) - vec_len (sl3->rewrite_bsid);
2638
2639           ip0->hop_limit -= 1;
2640           ip1->hop_limit -= 1;
2641           ip2->hop_limit -= 1;
2642           ip3->hop_limit -= 1;
2643
2644           new_l0 =
2645             clib_net_to_host_u16 (ip0->payload_length) +
2646             vec_len (sl0->rewrite_bsid);
2647           new_l1 =
2648             clib_net_to_host_u16 (ip1->payload_length) +
2649             vec_len (sl1->rewrite_bsid);
2650           new_l2 =
2651             clib_net_to_host_u16 (ip2->payload_length) +
2652             vec_len (sl2->rewrite_bsid);
2653           new_l3 =
2654             clib_net_to_host_u16 (ip3->payload_length) +
2655             vec_len (sl3->rewrite_bsid);
2656
2657           ip0->payload_length = clib_host_to_net_u16 (new_l0);
2658           ip1->payload_length = clib_host_to_net_u16 (new_l1);
2659           ip2->payload_length = clib_host_to_net_u16 (new_l2);
2660           ip3->payload_length = clib_host_to_net_u16 (new_l3);
2661
2662           sr0 = ((void *) sr0) - vec_len (sl0->rewrite_bsid);
2663           sr1 = ((void *) sr1) - vec_len (sl1->rewrite_bsid);
2664           sr2 = ((void *) sr2) - vec_len (sl2->rewrite_bsid);
2665           sr3 = ((void *) sr3) - vec_len (sl3->rewrite_bsid);
2666
2667           ip0->dst_address.as_u64[0] =
2668             (sr0->segments + sr0->segments_left)->as_u64[0];
2669           ip0->dst_address.as_u64[1] =
2670             (sr0->segments + sr0->segments_left)->as_u64[1];
2671           ip1->dst_address.as_u64[0] =
2672             (sr1->segments + sr1->segments_left)->as_u64[0];
2673           ip1->dst_address.as_u64[1] =
2674             (sr1->segments + sr1->segments_left)->as_u64[1];
2675           ip2->dst_address.as_u64[0] =
2676             (sr2->segments + sr2->segments_left)->as_u64[0];
2677           ip2->dst_address.as_u64[1] =
2678             (sr2->segments + sr2->segments_left)->as_u64[1];
2679           ip3->dst_address.as_u64[0] =
2680             (sr3->segments + sr3->segments_left)->as_u64[0];
2681           ip3->dst_address.as_u64[1] =
2682             (sr3->segments + sr3->segments_left)->as_u64[1];
2683
2684           ip6_ext_header_t *ip_ext;
2685           if (ip0 + 1 == (void *) sr0)
2686             {
2687               sr0->protocol = ip0->protocol;
2688               ip0->protocol = IP_PROTOCOL_IPV6_ROUTE;
2689             }
2690           else
2691             {
2692               ip_ext = (void *) (ip0 + 1);
2693               sr0->protocol = ip_ext->next_hdr;
2694               ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2695             }
2696
2697           if (ip1 + 1 == (void *) sr1)
2698             {
2699               sr1->protocol = ip1->protocol;
2700               ip1->protocol = IP_PROTOCOL_IPV6_ROUTE;
2701             }
2702           else
2703             {
2704               ip_ext = (void *) (ip2 + 1);
2705               sr2->protocol = ip_ext->next_hdr;
2706               ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2707             }
2708
2709           if (ip2 + 1 == (void *) sr2)
2710             {
2711               sr2->protocol = ip2->protocol;
2712               ip2->protocol = IP_PROTOCOL_IPV6_ROUTE;
2713             }
2714           else
2715             {
2716               ip_ext = (void *) (ip2 + 1);
2717               sr2->protocol = ip_ext->next_hdr;
2718               ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2719             }
2720
2721           if (ip3 + 1 == (void *) sr3)
2722             {
2723               sr3->protocol = ip3->protocol;
2724               ip3->protocol = IP_PROTOCOL_IPV6_ROUTE;
2725             }
2726           else
2727             {
2728               ip_ext = (void *) (ip3 + 1);
2729               sr3->protocol = ip_ext->next_hdr;
2730               ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2731             }
2732
2733           insert_pkts += 4;
2734
2735           if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
2736             {
2737               if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2738                 {
2739                   sr_policy_rewrite_trace_t *tr =
2740                     vlib_add_trace (vm, node, b0, sizeof (*tr));
2741                   clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
2742                                     sizeof (tr->src.as_u8));
2743                   clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
2744                                     sizeof (tr->dst.as_u8));
2745                 }
2746
2747               if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
2748                 {
2749                   sr_policy_rewrite_trace_t *tr =
2750                     vlib_add_trace (vm, node, b1, sizeof (*tr));
2751                   clib_memcpy_fast (tr->src.as_u8, ip1->src_address.as_u8,
2752                                     sizeof (tr->src.as_u8));
2753                   clib_memcpy_fast (tr->dst.as_u8, ip1->dst_address.as_u8,
2754                                     sizeof (tr->dst.as_u8));
2755                 }
2756
2757               if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED))
2758                 {
2759                   sr_policy_rewrite_trace_t *tr =
2760                     vlib_add_trace (vm, node, b2, sizeof (*tr));
2761                   clib_memcpy_fast (tr->src.as_u8, ip2->src_address.as_u8,
2762                                     sizeof (tr->src.as_u8));
2763                   clib_memcpy_fast (tr->dst.as_u8, ip2->dst_address.as_u8,
2764                                     sizeof (tr->dst.as_u8));
2765                 }
2766
2767               if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED))
2768                 {
2769                   sr_policy_rewrite_trace_t *tr =
2770                     vlib_add_trace (vm, node, b3, sizeof (*tr));
2771                   clib_memcpy_fast (tr->src.as_u8, ip3->src_address.as_u8,
2772                                     sizeof (tr->src.as_u8));
2773                   clib_memcpy_fast (tr->dst.as_u8, ip3->dst_address.as_u8,
2774                                     sizeof (tr->dst.as_u8));
2775                 }
2776             }
2777
2778           vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
2779                                            n_left_to_next, bi0, bi1, bi2, bi3,
2780                                            next0, next1, next2, next3);
2781         }
2782
2783       /* Single loop for potentially the last three packets */
2784       while (n_left_from > 0 && n_left_to_next > 0)
2785         {
2786           u32 bi0;
2787           vlib_buffer_t *b0;
2788           ip6_header_t *ip0 = 0;
2789           ip6_sr_header_t *sr0 = 0;
2790           ip6_sr_sl_t *sl0;
2791           u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
2792           u16 new_l0 = 0;
2793
2794           bi0 = from[0];
2795           to_next[0] = bi0;
2796           from += 1;
2797           to_next += 1;
2798           n_left_from -= 1;
2799           n_left_to_next -= 1;
2800
2801           b0 = vlib_get_buffer (vm, bi0);
2802           sl0 =
2803             pool_elt_at_index (sm->sid_lists,
2804                                vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
2805           ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
2806                   vec_len (sl0->rewrite_bsid));
2807
2808           ip0 = vlib_buffer_get_current (b0);
2809
2810           if (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
2811             sr0 =
2812               (ip6_sr_header_t *) (((void *) (ip0 + 1)) +
2813                                    ip6_ext_header_len (ip0 + 1));
2814           else
2815             sr0 = (ip6_sr_header_t *) (ip0 + 1);
2816
2817           clib_memcpy_fast ((u8 *) ip0 - vec_len (sl0->rewrite_bsid),
2818                             (u8 *) ip0, (void *) sr0 - (void *) ip0);
2819           clib_memcpy_fast (((u8 *) sr0 - vec_len (sl0->rewrite_bsid)),
2820                             sl0->rewrite_bsid, vec_len (sl0->rewrite_bsid));
2821
2822           vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite_bsid));
2823
2824           ip0 = ((void *) ip0) - vec_len (sl0->rewrite_bsid);
2825           ip0->hop_limit -= 1;
2826           new_l0 =
2827             clib_net_to_host_u16 (ip0->payload_length) +
2828             vec_len (sl0->rewrite_bsid);
2829           ip0->payload_length = clib_host_to_net_u16 (new_l0);
2830
2831           sr0 = ((void *) sr0) - vec_len (sl0->rewrite_bsid);
2832
2833           ip0->dst_address.as_u64[0] =
2834             (sr0->segments + sr0->segments_left)->as_u64[0];
2835           ip0->dst_address.as_u64[1] =
2836             (sr0->segments + sr0->segments_left)->as_u64[1];
2837
2838           if (ip0 + 1 == (void *) sr0)
2839             {
2840               sr0->protocol = ip0->protocol;
2841               ip0->protocol = IP_PROTOCOL_IPV6_ROUTE;
2842             }
2843           else
2844             {
2845               ip6_ext_header_t *ip_ext = (void *) (ip0 + 1);
2846               sr0->protocol = ip_ext->next_hdr;
2847               ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE;
2848             }
2849
2850           if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
2851               PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2852             {
2853               sr_policy_rewrite_trace_t *tr =
2854                 vlib_add_trace (vm, node, b0, sizeof (*tr));
2855               clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
2856                                 sizeof (tr->src.as_u8));
2857               clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
2858                                 sizeof (tr->dst.as_u8));
2859             }
2860
2861           insert_pkts++;
2862
2863           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2864                                            n_left_to_next, bi0, next0);
2865         }
2866
2867       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2868     }
2869
2870   /* Update counters */
2871   vlib_node_increment_counter (vm, sr_policy_rewrite_insert_node.index,
2872                                SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL,
2873                                insert_pkts);
2874   vlib_node_increment_counter (vm, sr_policy_rewrite_insert_node.index,
2875                                SR_POLICY_REWRITE_ERROR_COUNTER_BSID,
2876                                bsid_pkts);
2877   return from_frame->n_vectors;
2878 }
2879
2880 /* *INDENT-OFF* */
2881 VLIB_REGISTER_NODE (sr_policy_rewrite_b_insert_node) = {
2882   .function = sr_policy_rewrite_b_insert,
2883   .name = "sr-pl-rewrite-b-insert",
2884   .vector_size = sizeof (u32),
2885   .format_trace = format_sr_policy_rewrite_trace,
2886   .type = VLIB_NODE_TYPE_INTERNAL,
2887   .n_errors = SR_POLICY_REWRITE_N_ERROR,
2888   .error_strings = sr_policy_rewrite_error_strings,
2889   .n_next_nodes = SR_POLICY_REWRITE_N_NEXT,
2890   .next_nodes = {
2891 #define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n,
2892     foreach_sr_policy_rewrite_next
2893 #undef _
2894   },
2895 };
2896 /* *INDENT-ON* */
2897
2898 /**
2899  * @brief Function BSID encapsulation
2900  */
2901 static_always_inline void
2902 end_bsid_encaps_srh_processing (vlib_node_runtime_t * node,
2903                                 vlib_buffer_t * b0,
2904                                 ip6_header_t * ip0,
2905                                 ip6_sr_header_t * sr0, u32 * next0)
2906 {
2907   ip6_address_t *new_dst0;
2908
2909   if (PREDICT_FALSE (!sr0))
2910     goto error_bsid_encaps;
2911
2912   if (PREDICT_TRUE (sr0->type == ROUTING_HEADER_TYPE_SR))
2913     {
2914       if (PREDICT_TRUE (sr0->segments_left != 0))
2915         {
2916           sr0->segments_left -= 1;
2917           new_dst0 = (ip6_address_t *) (sr0->segments);
2918           new_dst0 += sr0->segments_left;
2919           ip0->dst_address.as_u64[0] = new_dst0->as_u64[0];
2920           ip0->dst_address.as_u64[1] = new_dst0->as_u64[1];
2921           return;
2922         }
2923     }
2924
2925 error_bsid_encaps:
2926   *next0 = SR_POLICY_REWRITE_NEXT_ERROR;
2927   b0->error = node->errors[SR_POLICY_REWRITE_ERROR_BSID_ZERO];
2928 }
2929
2930 /**
2931  * @brief Graph node for applying a SR policy BSID - Encapsulation
2932  */
2933 static uword
2934 sr_policy_rewrite_b_encaps (vlib_main_t * vm, vlib_node_runtime_t * node,
2935                             vlib_frame_t * from_frame)
2936 {
2937   ip6_sr_main_t *sm = &sr_main;
2938   u32 n_left_from, next_index, *from, *to_next;
2939
2940   from = vlib_frame_vector_args (from_frame);
2941   n_left_from = from_frame->n_vectors;
2942
2943   next_index = node->cached_next_index;
2944
2945   int encap_pkts = 0, bsid_pkts = 0;
2946
2947   while (n_left_from > 0)
2948     {
2949       u32 n_left_to_next;
2950
2951       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2952
2953       /* Quad - Loop */
2954       while (n_left_from >= 8 && n_left_to_next >= 4)
2955         {
2956           u32 bi0, bi1, bi2, bi3;
2957           vlib_buffer_t *b0, *b1, *b2, *b3;
2958           u32 next0, next1, next2, next3;
2959           next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
2960           ip6_header_t *ip0, *ip1, *ip2, *ip3;
2961           ip6_header_t *ip0_encap, *ip1_encap, *ip2_encap, *ip3_encap;
2962           ip6_sr_header_t *sr0, *sr1, *sr2, *sr3;
2963           ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3;
2964
2965           /* Prefetch next iteration. */
2966           {
2967             vlib_buffer_t *p4, *p5, *p6, *p7;
2968
2969             p4 = vlib_get_buffer (vm, from[4]);
2970             p5 = vlib_get_buffer (vm, from[5]);
2971             p6 = vlib_get_buffer (vm, from[6]);
2972             p7 = vlib_get_buffer (vm, from[7]);
2973
2974             /* Prefetch the buffer header and packet for the N+2 loop iteration */
2975             vlib_prefetch_buffer_header (p4, LOAD);
2976             vlib_prefetch_buffer_header (p5, LOAD);
2977             vlib_prefetch_buffer_header (p6, LOAD);
2978             vlib_prefetch_buffer_header (p7, LOAD);
2979
2980             CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
2981             CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
2982             CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE);
2983             CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE);
2984           }
2985
2986           to_next[0] = bi0 = from[0];
2987           to_next[1] = bi1 = from[1];
2988           to_next[2] = bi2 = from[2];
2989           to_next[3] = bi3 = from[3];
2990           from += 4;
2991           to_next += 4;
2992           n_left_from -= 4;
2993           n_left_to_next -= 4;
2994
2995           b0 = vlib_get_buffer (vm, bi0);
2996           b1 = vlib_get_buffer (vm, bi1);
2997           b2 = vlib_get_buffer (vm, bi2);
2998           b3 = vlib_get_buffer (vm, bi3);
2999
3000           sl0 =
3001             pool_elt_at_index (sm->sid_lists,
3002                                vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
3003           sl1 =
3004             pool_elt_at_index (sm->sid_lists,
3005                                vnet_buffer (b1)->ip.adj_index[VLIB_TX]);
3006           sl2 =
3007             pool_elt_at_index (sm->sid_lists,
3008                                vnet_buffer (b2)->ip.adj_index[VLIB_TX]);
3009           sl3 =
3010             pool_elt_at_index (sm->sid_lists,
3011                                vnet_buffer (b3)->ip.adj_index[VLIB_TX]);
3012           ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
3013                   vec_len (sl0->rewrite));
3014           ASSERT (b1->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
3015                   vec_len (sl1->rewrite));
3016           ASSERT (b2->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
3017                   vec_len (sl2->rewrite));
3018           ASSERT (b3->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
3019                   vec_len (sl3->rewrite));
3020
3021           ip0_encap = vlib_buffer_get_current (b0);
3022           ip1_encap = vlib_buffer_get_current (b1);
3023           ip2_encap = vlib_buffer_get_current (b2);
3024           ip3_encap = vlib_buffer_get_current (b3);
3025
3026           sr0 =
3027             ip6_ext_header_find (vm, b0, ip0_encap, IP_PROTOCOL_IPV6_ROUTE,
3028                                  NULL);
3029           sr1 =
3030             ip6_ext_header_find (vm, b1, ip1_encap, IP_PROTOCOL_IPV6_ROUTE,
3031                                  NULL);
3032           sr2 =
3033             ip6_ext_header_find (vm, b2, ip2_encap, IP_PROTOCOL_IPV6_ROUTE,
3034                                  NULL);
3035           sr3 =
3036             ip6_ext_header_find (vm, b3, ip3_encap, IP_PROTOCOL_IPV6_ROUTE,
3037                                  NULL);
3038
3039           end_bsid_encaps_srh_processing (node, b0, ip0_encap, sr0, &next0);
3040           end_bsid_encaps_srh_processing (node, b1, ip1_encap, sr1, &next1);
3041           end_bsid_encaps_srh_processing (node, b2, ip2_encap, sr2, &next2);
3042           end_bsid_encaps_srh_processing (node, b3, ip3_encap, sr3, &next3);
3043
3044           clib_memcpy_fast (((u8 *) ip0_encap) - vec_len (sl0->rewrite),
3045                             sl0->rewrite, vec_len (sl0->rewrite));
3046           clib_memcpy_fast (((u8 *) ip1_encap) - vec_len (sl1->rewrite),
3047                             sl1->rewrite, vec_len (sl1->rewrite));
3048           clib_memcpy_fast (((u8 *) ip2_encap) - vec_len (sl2->rewrite),
3049                             sl2->rewrite, vec_len (sl2->rewrite));
3050           clib_memcpy_fast (((u8 *) ip3_encap) - vec_len (sl3->rewrite),
3051                             sl3->rewrite, vec_len (sl3->rewrite));
3052
3053           vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
3054           vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite));
3055           vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite));
3056           vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite));
3057
3058           ip0 = vlib_buffer_get_current (b0);
3059           ip1 = vlib_buffer_get_current (b1);
3060           ip2 = vlib_buffer_get_current (b2);
3061           ip3 = vlib_buffer_get_current (b3);
3062
3063           encaps_processing_v6 (node, b0, ip0, ip0_encap);
3064           encaps_processing_v6 (node, b1, ip1, ip1_encap);
3065           encaps_processing_v6 (node, b2, ip2, ip2_encap);
3066           encaps_processing_v6 (node, b3, ip3, ip3_encap);
3067
3068           if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
3069             {
3070               if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
3071                 {
3072                   sr_policy_rewrite_trace_t *tr =
3073                     vlib_add_trace (vm, node, b0, sizeof (*tr));
3074                   clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
3075                                     sizeof (tr->src.as_u8));
3076                   clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
3077                                     sizeof (tr->dst.as_u8));
3078                 }
3079
3080               if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
3081                 {
3082                   sr_policy_rewrite_trace_t *tr =
3083                     vlib_add_trace (vm, node, b1, sizeof (*tr));
3084                   clib_memcpy_fast (tr->src.as_u8, ip1->src_address.as_u8,
3085                                     sizeof (tr->src.as_u8));
3086                   clib_memcpy_fast (tr->dst.as_u8, ip1->dst_address.as_u8,
3087                                     sizeof (tr->dst.as_u8));
3088                 }
3089
3090               if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED))
3091                 {
3092                   sr_policy_rewrite_trace_t *tr =
3093                     vlib_add_trace (vm, node, b2, sizeof (*tr));
3094                   clib_memcpy_fast (tr->src.as_u8, ip2->src_address.as_u8,
3095                                     sizeof (tr->src.as_u8));
3096                   clib_memcpy_fast (tr->dst.as_u8, ip2->dst_address.as_u8,
3097                                     sizeof (tr->dst.as_u8));
3098                 }
3099
3100               if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED))
3101                 {
3102                   sr_policy_rewrite_trace_t *tr =
3103                     vlib_add_trace (vm, node, b3, sizeof (*tr));
3104                   clib_memcpy_fast (tr->src.as_u8, ip3->src_address.as_u8,
3105                                     sizeof (tr->src.as_u8));
3106                   clib_memcpy_fast (tr->dst.as_u8, ip3->dst_address.as_u8,
3107                                     sizeof (tr->dst.as_u8));
3108                 }
3109             }
3110
3111           encap_pkts += 4;
3112           vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
3113                                            n_left_to_next, bi0, bi1, bi2, bi3,
3114                                            next0, next1, next2, next3);
3115         }
3116
3117       /* Single loop for potentially the last three packets */
3118       while (n_left_from > 0 && n_left_to_next > 0)
3119         {
3120           u32 bi0;
3121           vlib_buffer_t *b0;
3122           ip6_header_t *ip0 = 0, *ip0_encap = 0;
3123           ip6_sr_header_t *sr0;
3124           ip6_sr_sl_t *sl0;
3125           u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP;
3126
3127           bi0 = from[0];
3128           to_next[0] = bi0;
3129           from += 1;
3130           to_next += 1;
3131           n_left_from -= 1;
3132           n_left_to_next -= 1;
3133           b0 = vlib_get_buffer (vm, bi0);
3134
3135           sl0 =
3136             pool_elt_at_index (sm->sid_lists,
3137                                vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
3138           ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >=
3139                   vec_len (sl0->rewrite));
3140
3141           ip0_encap = vlib_buffer_get_current (b0);
3142           sr0 =
3143             ip6_ext_header_find (vm, b0, ip0_encap, IP_PROTOCOL_IPV6_ROUTE,
3144                                  NULL);
3145           end_bsid_encaps_srh_processing (node, b0, ip0_encap, sr0, &next0);
3146
3147           clib_memcpy_fast (((u8 *) ip0_encap) - vec_len (sl0->rewrite),
3148                             sl0->rewrite, vec_len (sl0->rewrite));
3149           vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite));
3150
3151           ip0 = vlib_buffer_get_current (b0);
3152
3153           encaps_processing_v6 (node, b0, ip0, ip0_encap);
3154
3155           if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
3156               PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
3157             {
3158               sr_policy_rewrite_trace_t *tr =
3159                 vlib_add_trace (vm, node, b0, sizeof (*tr));
3160               clib_memcpy_fast (tr->src.as_u8, ip0->src_address.as_u8,
3161                                 sizeof (tr->src.as_u8));
3162               clib_memcpy_fast (tr->dst.as_u8, ip0->dst_address.as_u8,
3163                                 sizeof (tr->dst.as_u8));
3164             }
3165
3166           encap_pkts++;
3167           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
3168                                            n_left_to_next, bi0, next0);
3169         }
3170
3171       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
3172     }
3173
3174   /* Update counters */
3175   vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index,
3176                                SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL,
3177                                encap_pkts);
3178   vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index,
3179                                SR_POLICY_REWRITE_ERROR_COUNTER_BSID,
3180                                bsid_pkts);
3181
3182   return from_frame->n_vectors;
3183 }
3184
3185 /* *INDENT-OFF* */
3186 VLIB_REGISTER_NODE (sr_policy_rewrite_b_encaps_node) = {
3187   .function = sr_policy_rewrite_b_encaps,
3188   .name = "sr-pl-rewrite-b-encaps",
3189   .vector_size = sizeof (u32),
3190   .format_trace = format_sr_policy_rewrite_trace,
3191   .type = VLIB_NODE_TYPE_INTERNAL,
3192   .n_errors = SR_POLICY_REWRITE_N_ERROR,
3193   .error_strings = sr_policy_rewrite_error_strings,
3194   .n_next_nodes = SR_POLICY_REWRITE_N_NEXT,
3195   .next_nodes = {
3196 #define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n,
3197     foreach_sr_policy_rewrite_next
3198 #undef _
3199   },
3200 };
3201 /* *INDENT-ON* */
3202
3203 /*************************** SR Segment Lists DPOs ****************************/
3204 static u8 *
3205 format_sr_segment_list_dpo (u8 * s, va_list * args)
3206 {
3207   ip6_sr_main_t *sm = &sr_main;
3208   ip6_address_t *addr;
3209   ip6_sr_sl_t *sl;
3210
3211   index_t index = va_arg (*args, index_t);
3212   CLIB_UNUSED (u32 indent) = va_arg (*args, u32);
3213   s = format (s, "SR: Segment List index:[%d]", index);
3214   s = format (s, "\n\tSegments:");
3215
3216   sl = pool_elt_at_index (sm->sid_lists, index);
3217
3218   s = format (s, "< ");
3219   vec_foreach (addr, sl->segments)
3220   {
3221     s = format (s, "%U, ", format_ip6_address, addr);
3222   }
3223   s = format (s, "\b\b > - ");
3224   s = format (s, "Weight: %u", sl->weight);
3225
3226   return s;
3227 }
3228
3229 const static dpo_vft_t sr_policy_rewrite_vft = {
3230   .dv_lock = sr_dpo_lock,
3231   .dv_unlock = sr_dpo_unlock,
3232   .dv_format = format_sr_segment_list_dpo,
3233 };
3234
3235 const static char *const sr_pr_encaps_ip6_nodes[] = {
3236   "sr-pl-rewrite-encaps",
3237   NULL,
3238 };
3239
3240 const static char *const sr_pr_encaps_ip4_nodes[] = {
3241   "sr-pl-rewrite-encaps-v4",
3242   NULL,
3243 };
3244
3245 const static char *const *const sr_pr_encaps_nodes[DPO_PROTO_NUM] = {
3246   [DPO_PROTO_IP6] = sr_pr_encaps_ip6_nodes,
3247   [DPO_PROTO_IP4] = sr_pr_encaps_ip4_nodes,
3248 };
3249
3250 const static char *const sr_pr_insert_ip6_nodes[] = {
3251   "sr-pl-rewrite-insert",
3252   NULL,
3253 };
3254
3255 const static char *const *const sr_pr_insert_nodes[DPO_PROTO_NUM] = {
3256   [DPO_PROTO_IP6] = sr_pr_insert_ip6_nodes,
3257 };
3258
3259 const static char *const sr_pr_bsid_insert_ip6_nodes[] = {
3260   "sr-pl-rewrite-b-insert",
3261   NULL,
3262 };
3263
3264 const static char *const *const sr_pr_bsid_insert_nodes[DPO_PROTO_NUM] = {
3265   [DPO_PROTO_IP6] = sr_pr_bsid_insert_ip6_nodes,
3266 };
3267
3268 const static char *const sr_pr_bsid_encaps_ip6_nodes[] = {
3269   "sr-pl-rewrite-b-encaps",
3270   NULL,
3271 };
3272
3273 const static char *const *const sr_pr_bsid_encaps_nodes[DPO_PROTO_NUM] = {
3274   [DPO_PROTO_IP6] = sr_pr_bsid_encaps_ip6_nodes,
3275 };
3276
3277 /********************* SR Policy Rewrite initialization ***********************/
3278 /**
3279  * @brief SR Policy Rewrite initialization
3280  */
3281 clib_error_t *
3282 sr_policy_rewrite_init (vlib_main_t * vm)
3283 {
3284   ip6_sr_main_t *sm = &sr_main;
3285
3286   /* Init memory for sr policy keys (bsid <-> ip6_address_t) */
3287   mhash_init (&sm->sr_policies_index_hash, sizeof (uword),
3288               sizeof (ip6_address_t));
3289
3290   /* Init SR VPO DPOs type */
3291   sr_pr_encaps_dpo_type =
3292     dpo_register_new_type (&sr_policy_rewrite_vft, sr_pr_encaps_nodes);
3293
3294   sr_pr_insert_dpo_type =
3295     dpo_register_new_type (&sr_policy_rewrite_vft, sr_pr_insert_nodes);
3296
3297   sr_pr_bsid_encaps_dpo_type =
3298     dpo_register_new_type (&sr_policy_rewrite_vft, sr_pr_bsid_encaps_nodes);
3299
3300   sr_pr_bsid_insert_dpo_type =
3301     dpo_register_new_type (&sr_policy_rewrite_vft, sr_pr_bsid_insert_nodes);
3302
3303   /* Register the L2 encaps node used in HW redirect */
3304   sm->l2_sr_policy_rewrite_index = sr_policy_rewrite_encaps_node.index;
3305
3306   sm->fib_table_ip6 = (u32) ~ 0;
3307   sm->fib_table_ip4 = (u32) ~ 0;
3308
3309   return 0;
3310 }
3311
3312 VLIB_INIT_FUNCTION (sr_policy_rewrite_init);
3313
3314
3315 /*
3316 * fd.io coding-style-patch-verification: ON
3317 *
3318 * Local Variables:
3319 * eval: (c-set-style "gnu")
3320 * End:
3321 */