2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include <vnet/plugin/plugin.h>
18 #include <vpp/app/version.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/udp/udp.h>
22 //GC runs at most once every so many seconds
23 #define LB_GARBAGE_RUN 60
25 //After so many seconds. It is assumed that inter-core race condition will not occur.
26 #define LB_CONCURRENCY_TIMEOUT 10
30 #define lb_get_writer_lock() do {} while(__sync_lock_test_and_set (lb_main.writer_lock, 1))
31 #define lb_put_writer_lock() lb_main.writer_lock[0] = 0
33 static void lb_as_stack (lb_as_t *as);
36 const static char * const lb_dpo_gre4_ip4[] = { "lb4-gre4" , NULL };
37 const static char * const lb_dpo_gre4_ip6[] = { "lb6-gre4" , NULL };
38 const static char* const * const lb_dpo_gre4_nodes[DPO_PROTO_NUM] =
40 [DPO_PROTO_IP4] = lb_dpo_gre4_ip4,
41 [DPO_PROTO_IP6] = lb_dpo_gre4_ip6,
44 const static char * const lb_dpo_gre6_ip4[] = { "lb4-gre6" , NULL };
45 const static char * const lb_dpo_gre6_ip6[] = { "lb6-gre6" , NULL };
46 const static char* const * const lb_dpo_gre6_nodes[DPO_PROTO_NUM] =
48 [DPO_PROTO_IP4] = lb_dpo_gre6_ip4,
49 [DPO_PROTO_IP6] = lb_dpo_gre6_ip6,
52 const static char * const lb_dpo_gre4_ip4_port[] = { "lb4-gre4-port" , NULL };
53 const static char * const lb_dpo_gre4_ip6_port[] = { "lb6-gre4-port" , NULL };
54 const static char* const * const lb_dpo_gre4_port_nodes[DPO_PROTO_NUM] =
56 [DPO_PROTO_IP4] = lb_dpo_gre4_ip4_port,
57 [DPO_PROTO_IP6] = lb_dpo_gre4_ip6_port,
60 const static char * const lb_dpo_gre6_ip4_port[] = { "lb4-gre6-port" , NULL };
61 const static char * const lb_dpo_gre6_ip6_port[] = { "lb6-gre6-port" , NULL };
62 const static char* const * const lb_dpo_gre6_port_nodes[DPO_PROTO_NUM] =
64 [DPO_PROTO_IP4] = lb_dpo_gre6_ip4_port,
65 [DPO_PROTO_IP6] = lb_dpo_gre6_ip6_port,
68 const static char * const lb_dpo_l3dsr_ip4[] = {"lb4-l3dsr" , NULL};
69 const static char* const * const lb_dpo_l3dsr_nodes[DPO_PROTO_NUM] =
71 [DPO_PROTO_IP4] = lb_dpo_l3dsr_ip4,
74 const static char * const lb_dpo_l3dsr_ip4_port[] = {"lb4-l3dsr-port" , NULL};
75 const static char* const * const lb_dpo_l3dsr_port_nodes[DPO_PROTO_NUM] =
77 [DPO_PROTO_IP4] = lb_dpo_l3dsr_ip4_port,
80 const static char * const lb_dpo_nat4_ip4_port[] = { "lb4-nat4-port" , NULL };
81 const static char* const * const lb_dpo_nat4_port_nodes[DPO_PROTO_NUM] =
83 [DPO_PROTO_IP4] = lb_dpo_nat4_ip4_port,
86 const static char * const lb_dpo_nat6_ip6_port[] = { "lb6-nat6-port" , NULL };
87 const static char* const * const lb_dpo_nat6_port_nodes[DPO_PROTO_NUM] =
89 [DPO_PROTO_IP6] = lb_dpo_nat6_ip6_port,
92 u32 lb_hash_time_now(vlib_main_t * vm)
94 return (u32) (vlib_time_now(vm) + 10000);
97 u8 *format_lb_main (u8 * s, va_list * args)
99 vlib_thread_main_t *tm = vlib_get_thread_main();
100 lb_main_t *lbm = &lb_main;
101 s = format(s, "lb_main");
102 s = format(s, " ip4-src-address: %U \n", format_ip4_address, &lbm->ip4_src_address);
103 s = format(s, " ip6-src-address: %U \n", format_ip6_address, &lbm->ip6_src_address);
104 s = format(s, " #vips: %u\n", pool_elts(lbm->vips));
105 s = format(s, " #ass: %u\n", pool_elts(lbm->ass) - 1);
108 for(thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++ ) {
109 lb_hash_t *h = lbm->per_cpu[thread_index].sticky_ht;
111 s = format(s, "core %d\n", thread_index);
112 s = format(s, " timeout: %ds\n", h->timeout);
113 s = format(s, " usage: %d / %d\n", lb_hash_elts(h, lb_hash_time_now(vlib_get_main())), lb_hash_size(h));
120 static char *lb_vip_type_strings[] = {
121 [LB_VIP_TYPE_IP6_GRE6] = "ip6-gre6",
122 [LB_VIP_TYPE_IP6_GRE4] = "ip6-gre4",
123 [LB_VIP_TYPE_IP4_GRE6] = "ip4-gre6",
124 [LB_VIP_TYPE_IP4_GRE4] = "ip4-gre4",
125 [LB_VIP_TYPE_IP4_L3DSR] = "ip4-l3dsr",
126 [LB_VIP_TYPE_IP4_NAT4] = "ip4-nat4",
127 [LB_VIP_TYPE_IP6_NAT6] = "ip6-nat6",
130 u8 *format_lb_vip_type (u8 * s, va_list * args)
132 lb_vip_type_t vipt = va_arg (*args, lb_vip_type_t);
134 for (i=0; i<LB_VIP_N_TYPES; i++)
136 return format(s, lb_vip_type_strings[i]);
137 return format(s, "_WRONG_TYPE_");
140 uword unformat_lb_vip_type (unformat_input_t * input, va_list * args)
142 lb_vip_type_t *vipt = va_arg (*args, lb_vip_type_t *);
144 for (i=0; i<LB_VIP_N_TYPES; i++)
145 if (unformat(input, lb_vip_type_strings[i])) {
152 u8 *format_lb_vip (u8 * s, va_list * args)
154 lb_vip_t *vip = va_arg (*args, lb_vip_t *);
155 s = format(s, "%U %U new_size:%u #as:%u%s",
156 format_lb_vip_type, vip->type,
157 format_ip46_prefix, &vip->prefix, vip->plen, IP46_TYPE_ANY,
158 vip->new_flow_table_mask + 1,
159 pool_elts(vip->as_indexes),
160 (vip->flags & LB_VIP_FLAGS_USED)?"":" removed");
164 s = format(s, " protocol:%u port:%u ", vip->protocol, vip->port);
167 if (vip->type == LB_VIP_TYPE_IP4_L3DSR)
169 s = format(s, " dscp:%u", vip->encap_args.dscp);
171 else if ((vip->type == LB_VIP_TYPE_IP4_NAT4)
172 || (vip->type == LB_VIP_TYPE_IP6_NAT6))
174 s = format (s, " type:%s port:%u target_port:%u",
175 (vip->encap_args.srv_type == LB_SRV_TYPE_CLUSTERIP)?"clusterip":
177 ntohs(vip->port), ntohs(vip->encap_args.target_port));
183 u8 *format_lb_as (u8 * s, va_list * args)
185 lb_as_t *as = va_arg (*args, lb_as_t *);
186 return format(s, "%U %s", format_ip46_address,
187 &as->address, IP46_TYPE_ANY,
188 (as->flags & LB_AS_FLAGS_USED)?"used":"removed");
191 u8 *format_lb_vip_detailed (u8 * s, va_list * args)
193 lb_main_t *lbm = &lb_main;
194 lb_vip_t *vip = va_arg (*args, lb_vip_t *);
195 u32 indent = format_get_indent (s);
197 s = format(s, "%U %U [%lu] %U%s\n"
199 format_white_space, indent,
200 format_lb_vip_type, vip->type,
202 format_ip46_prefix, &vip->prefix, (u32) vip->plen, IP46_TYPE_ANY,
203 (vip->flags & LB_VIP_FLAGS_USED)?"":" removed",
204 format_white_space, indent,
205 vip->new_flow_table_mask + 1);
209 s = format(s, "%U protocol:%u port:%u\n",
210 format_white_space, indent,
211 vip->protocol, vip->port);
214 if (vip->type == LB_VIP_TYPE_IP4_L3DSR)
216 s = format(s, "%U dscp:%u\n",
217 format_white_space, indent,
218 vip->encap_args.dscp);
220 else if ((vip->type == LB_VIP_TYPE_IP4_NAT4)
221 || (vip->type == LB_VIP_TYPE_IP6_NAT6))
223 s = format (s, "%U type:%s port:%u target_port:%u",
224 format_white_space, indent,
225 (vip->encap_args.srv_type == LB_SRV_TYPE_CLUSTERIP)?"clusterip":
227 ntohs(vip->port), ntohs(vip->encap_args.target_port));
231 s = format(s, "%U counters:\n",
232 format_white_space, indent);
234 for (i=0; i<LB_N_VIP_COUNTERS; i++)
235 s = format(s, "%U %s: %d\n",
236 format_white_space, indent,
237 lbm->vip_counters[i].name,
238 vlib_get_simple_counter(&lbm->vip_counters[i], vip - lbm->vips));
241 s = format(s, "%U #as:%u\n",
242 format_white_space, indent,
243 pool_elts(vip->as_indexes));
245 //Let's count the buckets for each AS
247 vec_validate(count, pool_len(lbm->ass)); //Possibly big alloc for not much...
248 lb_new_flow_entry_t *nfe;
249 vec_foreach(nfe, vip->new_flow_table)
250 count[nfe->as_index]++;
254 pool_foreach(as_index, vip->as_indexes, {
255 as = &lbm->ass[*as_index];
256 s = format(s, "%U %U %d buckets %d flows dpo:%u %s\n",
257 format_white_space, indent,
258 format_ip46_address, &as->address, IP46_TYPE_ANY,
259 count[as - lbm->ass],
260 vlib_refcount_get(&lbm->as_refcount, as - lbm->ass),
262 (as->flags & LB_AS_FLAGS_USED)?"used":" removed");
275 static int lb_pseudorand_compare(void *a, void *b)
278 lb_main_t *lbm = &lb_main;
279 asa = &lbm->ass[((lb_pseudorand_t *)a)->as_index];
280 asb = &lbm->ass[((lb_pseudorand_t *)b)->as_index];
281 return memcmp(&asa->address, &asb->address, sizeof(asb->address));
284 static void lb_vip_garbage_collection(lb_vip_t *vip)
286 lb_main_t *lbm = &lb_main;
287 lb_snat4_key_t m_key4;
288 clib_bihash_kv_8_8_t kv4, value4;
289 lb_snat6_key_t m_key6;
290 clib_bihash_kv_24_8_t kv6, value6;
291 lb_snat_mapping_t *m = 0;
292 ASSERT (lbm->writer_lock[0]);
294 u32 now = (u32) vlib_time_now(vlib_get_main());
295 if (!clib_u32_loop_gt(now, vip->last_garbage_collection + LB_GARBAGE_RUN))
298 vip->last_garbage_collection = now;
301 pool_foreach(as_index, vip->as_indexes, {
302 as = &lbm->ass[*as_index];
303 if (!(as->flags & LB_AS_FLAGS_USED) && //Not used
304 clib_u32_loop_gt(now, as->last_used + LB_CONCURRENCY_TIMEOUT) &&
305 (vlib_refcount_get(&lbm->as_refcount, as - lbm->ass) == 0))
308 if (lb_vip_is_nat4_port(vip)) {
309 m_key4.addr = as->address.ip4;
310 m_key4.port = vip->encap_args.target_port;
312 m_key4.fib_index = 0;
314 kv4.key = m_key4.as_u64;
315 if(!clib_bihash_search_8_8(&lbm->mapping_by_as4, &kv4, &value4))
316 m = pool_elt_at_index (lbm->snat_mappings, value4.value);
319 kv4.value = m - lbm->snat_mappings;
320 clib_bihash_add_del_8_8(&lbm->mapping_by_as4, &kv4, 0);
321 pool_put (lbm->snat_mappings, m);
322 } else if (lb_vip_is_nat6_port(vip)) {
323 m_key6.addr.as_u64[0] = as->address.ip6.as_u64[0];
324 m_key6.addr.as_u64[1] = as->address.ip6.as_u64[1];
325 m_key6.port = vip->encap_args.target_port;
327 m_key6.fib_index = 0;
329 kv6.key[0] = m_key6.as_u64[0];
330 kv6.key[1] = m_key6.as_u64[1];
331 kv6.key[2] = m_key6.as_u64[2];
333 if (!clib_bihash_search_24_8 (&lbm->mapping_by_as6, &kv6, &value6))
334 m = pool_elt_at_index (lbm->snat_mappings, value6.value);
337 kv6.value = m - lbm->snat_mappings;
338 clib_bihash_add_del_24_8(&lbm->mapping_by_as6, &kv6, 0);
339 pool_put (lbm->snat_mappings, m);
341 fib_entry_child_remove(as->next_hop_fib_entry_index,
342 as->next_hop_child_index);
343 fib_table_entry_delete_index(as->next_hop_fib_entry_index,
345 as->next_hop_fib_entry_index = FIB_NODE_INDEX_INVALID;
347 pool_put(vip->as_indexes, as_index);
348 pool_put(lbm->ass, as);
353 void lb_garbage_collection()
355 lb_main_t *lbm = &lb_main;
356 lb_get_writer_lock();
358 u32 *to_be_removed_vips = 0, *i;
359 pool_foreach(vip, lbm->vips, {
360 lb_vip_garbage_collection(vip);
362 if (!(vip->flags & LB_VIP_FLAGS_USED) &&
363 (pool_elts(vip->as_indexes) == 0)) {
364 vec_add1(to_be_removed_vips, vip - lbm->vips);
368 vec_foreach(i, to_be_removed_vips) {
369 vip = &lbm->vips[*i];
370 pool_put(lbm->vips, vip);
371 pool_free(vip->as_indexes);
374 vec_free(to_be_removed_vips);
375 lb_put_writer_lock();
378 static void lb_vip_update_new_flow_table(lb_vip_t *vip)
380 lb_main_t *lbm = &lb_main;
381 lb_new_flow_entry_t *old_table;
383 lb_new_flow_entry_t *new_flow_table = 0;
385 lb_pseudorand_t *pr, *sort_arr = 0;
388 ASSERT (lbm->writer_lock[0]); //We must have the lock
390 //Check if some AS is configured or not
392 pool_foreach(as_index, vip->as_indexes, {
393 as = &lbm->ass[*as_index];
394 if (as->flags & LB_AS_FLAGS_USED) { //Not used anymore
396 goto out; //Not sure 'break' works in this macro-loop
402 //Only the default. i.e. no AS
403 vec_validate(new_flow_table, vip->new_flow_table_mask);
404 for (i=0; i<vec_len(new_flow_table); i++)
405 new_flow_table[i].as_index = 0;
410 //First, let's sort the ASs
412 vec_alloc(sort_arr, pool_elts(vip->as_indexes));
415 pool_foreach(as_index, vip->as_indexes, {
416 as = &lbm->ass[*as_index];
417 if (!(as->flags & LB_AS_FLAGS_USED)) //Not used anymore
420 sort_arr[i].as_index = as - lbm->ass;
423 _vec_len(sort_arr) = i;
425 vec_sort_with_function(sort_arr, lb_pseudorand_compare);
427 //Now let's pseudo-randomly generate permutations
428 vec_foreach(pr, sort_arr) {
429 lb_as_t *as = &lbm->ass[pr->as_index];
431 u64 seed = clib_xxhash(as->address.as_u64[0] ^
432 as->address.as_u64[1]);
433 /* We have 2^n buckets.
434 * skip must be prime with 2^n.
435 * So skip must be odd.
436 * MagLev actually state that M should be prime,
437 * but this has a big computation cost (% operation).
438 * Using 2^n is more better (& operation).
440 pr->skip = ((seed & 0xffffffff) | 1) & vip->new_flow_table_mask;
441 pr->last = (seed >> 32) & vip->new_flow_table_mask;
444 //Let's create a new flow table
445 vec_validate(new_flow_table, vip->new_flow_table_mask);
446 for (i=0; i<vec_len(new_flow_table); i++)
447 new_flow_table[i].as_index = ~0;
451 vec_foreach(pr, sort_arr) {
454 pr->last = (pr->last + pr->skip) & vip->new_flow_table_mask;
455 if (new_flow_table[last].as_index == ~0) {
456 new_flow_table[last].as_index = pr->as_index;
461 if (done == vec_len(new_flow_table))
470 //Count number of changed entries
472 for (i=0; i<vec_len(new_flow_table); i++)
473 if (vip->new_flow_table == 0 ||
474 new_flow_table[i].as_index != vip->new_flow_table[i].as_index)
477 old_table = vip->new_flow_table;
478 vip->new_flow_table = new_flow_table;
482 int lb_conf(ip4_address_t *ip4_address, ip6_address_t *ip6_address,
483 u32 per_cpu_sticky_buckets, u32 flow_timeout)
485 lb_main_t *lbm = &lb_main;
487 if (!is_pow2(per_cpu_sticky_buckets))
488 return VNET_API_ERROR_INVALID_MEMORY_SIZE;
490 lb_get_writer_lock(); //Not exactly necessary but just a reminder that it exists for my future self
491 lbm->ip4_src_address = *ip4_address;
492 lbm->ip6_src_address = *ip6_address;
493 lbm->per_cpu_sticky_buckets = per_cpu_sticky_buckets;
494 lbm->flow_timeout = flow_timeout;
495 lb_put_writer_lock();
502 int lb_vip_port_find_index(ip46_address_t *prefix, u8 plen,
503 u8 protocol, u16 port,
504 lb_lkp_type_t lkp_type,
507 lb_main_t *lbm = &lb_main;
509 ASSERT (lbm->writer_lock[0]); //This must be called with the lock owned
510 ip46_prefix_normalize(prefix, plen);
511 pool_foreach(vip, lbm->vips, {
512 if ((vip->flags & LB_AS_FLAGS_USED) &&
514 vip->prefix.as_u64[0] == prefix->as_u64[0] &&
515 vip->prefix.as_u64[1] == prefix->as_u64[1])
517 if((lkp_type == LB_LKP_SAME_IP_PORT &&
518 vip->protocol == protocol &&
519 vip->port == port) ||
520 (lkp_type == LB_LKP_ALL_PORT_IP &&
522 (lkp_type == LB_LKP_DIFF_IP_PORT &&
523 (vip->protocol != protocol ||
524 vip->port != port) ) )
526 *vip_index = vip - lbm->vips;
531 return VNET_API_ERROR_NO_SUCH_ENTRY;
535 int lb_vip_port_find_index_with_lock(ip46_address_t *prefix, u8 plen,
536 u8 protocol, u16 port, u32 *vip_index)
538 return lb_vip_port_find_index(prefix, plen, protocol, port,
539 LB_LKP_SAME_IP_PORT, vip_index);
543 int lb_vip_port_find_all_port_vip(ip46_address_t *prefix, u8 plen,
546 return lb_vip_port_find_index(prefix, plen, ~0, 0,
547 LB_LKP_ALL_PORT_IP, vip_index);
550 /* Find out per-port-vip entry with different protocol and port */
552 int lb_vip_port_find_diff_port(ip46_address_t *prefix, u8 plen,
553 u8 protocol, u16 port, u32 *vip_index)
555 return lb_vip_port_find_index(prefix, plen, protocol, port,
556 LB_LKP_DIFF_IP_PORT, vip_index);
559 int lb_vip_find_index(ip46_address_t *prefix, u8 plen, u8 protocol,
560 u16 port, u32 *vip_index)
563 lb_get_writer_lock();
564 ret = lb_vip_port_find_index_with_lock(prefix, plen,
565 protocol, port, vip_index);
566 lb_put_writer_lock();
570 static int lb_as_find_index_vip(lb_vip_t *vip, ip46_address_t *address, u32 *as_index)
572 lb_main_t *lbm = &lb_main;
573 ASSERT (lbm->writer_lock[0]); //This must be called with the lock owned
576 pool_foreach(asi, vip->as_indexes, {
577 as = &lbm->ass[*asi];
578 if (as->vip_index == (vip - lbm->vips) &&
579 as->address.as_u64[0] == address->as_u64[0] &&
580 as->address.as_u64[1] == address->as_u64[1])
582 *as_index = as - lbm->ass;
589 int lb_vip_add_ass(u32 vip_index, ip46_address_t *addresses, u32 n)
591 lb_main_t *lbm = &lb_main;
592 lb_get_writer_lock();
594 if (!(vip = lb_vip_get_by_index(vip_index))) {
595 lb_put_writer_lock();
596 return VNET_API_ERROR_NO_SUCH_ENTRY;
599 ip46_type_t type = lb_encap_is_ip4(vip)?IP46_TYPE_IP4:IP46_TYPE_IP6;
600 u32 *to_be_added = 0;
601 u32 *to_be_updated = 0;
604 lb_snat_mapping_t *m;
609 if (!lb_as_find_index_vip(vip, &addresses[n], &i)) {
610 if (lbm->ass[i].flags & LB_AS_FLAGS_USED) {
611 vec_free(to_be_added);
612 vec_free(to_be_updated);
613 lb_put_writer_lock();
614 return VNET_API_ERROR_VALUE_EXIST;
616 vec_add1(to_be_updated, i);
620 if (ip46_address_type(&addresses[n]) != type) {
621 vec_free(to_be_added);
622 vec_free(to_be_updated);
623 lb_put_writer_lock();
624 return VNET_API_ERROR_INVALID_ADDRESS_FAMILY;
629 while(n2--) //Check for duplicates
630 if (addresses[n2].as_u64[0] == addresses[n].as_u64[0] &&
631 addresses[n2].as_u64[1] == addresses[n].as_u64[1])
635 vec_add1(to_be_added, n);
642 vec_foreach(ip, to_be_updated) {
643 lbm->ass[*ip].flags = LB_AS_FLAGS_USED;
645 vec_free(to_be_updated);
647 //Create those who have to be created
648 vec_foreach(ip, to_be_added) {
651 pool_get(lbm->ass, as);
652 as->address = addresses[*ip];
653 as->flags = LB_AS_FLAGS_USED;
654 as->vip_index = vip_index;
655 pool_get(vip->as_indexes, as_index);
656 *as_index = as - lbm->ass;
659 * become a child of the FIB entry
660 * so we are informed when its forwarding changes
662 fib_prefix_t nh = {};
663 if (lb_encap_is_ip4(vip)) {
664 nh.fp_addr.ip4 = as->address.ip4;
666 nh.fp_proto = FIB_PROTOCOL_IP4;
668 nh.fp_addr.ip6 = as->address.ip6;
670 nh.fp_proto = FIB_PROTOCOL_IP6;
673 as->next_hop_fib_entry_index =
674 fib_table_entry_special_add(0,
677 FIB_ENTRY_FLAG_NONE);
678 as->next_hop_child_index =
679 fib_entry_child_add(as->next_hop_fib_entry_index,
685 if ( lb_vip_is_nat4_port(vip) || lb_vip_is_nat6_port(vip) )
687 /* Add SNAT static mapping */
688 pool_get (lbm->snat_mappings, m);
689 memset (m, 0, sizeof (*m));
690 if (lb_vip_is_nat4_port(vip)) {
691 lb_snat4_key_t m_key4;
692 clib_bihash_kv_8_8_t kv4;
693 m_key4.addr = as->address.ip4;
694 m_key4.port = vip->encap_args.target_port;
696 m_key4.fib_index = 0;
698 if (vip->encap_args.srv_type == LB_SRV_TYPE_CLUSTERIP)
700 m->src_ip.ip4 = vip->prefix.ip4;
702 else if (vip->encap_args.srv_type == LB_SRV_TYPE_NODEPORT)
704 m->src_ip.ip4 = lbm->ip4_src_address;
706 m->src_ip_is_ipv6 = 0;
707 m->as_ip.ip4 = as->address.ip4;
708 m->as_ip_is_ipv6 = 0;
709 m->src_port = vip->port;
710 m->target_port = vip->encap_args.target_port;
714 kv4.key = m_key4.as_u64;
715 kv4.value = m - lbm->snat_mappings;
716 clib_bihash_add_del_8_8(&lbm->mapping_by_as4, &kv4, 1);
718 lb_snat6_key_t m_key6;
719 clib_bihash_kv_24_8_t kv6;
720 m_key6.addr.as_u64[0] = as->address.ip6.as_u64[0];
721 m_key6.addr.as_u64[1] = as->address.ip6.as_u64[1];
722 m_key6.port = vip->encap_args.target_port;
724 m_key6.fib_index = 0;
726 if (vip->encap_args.srv_type == LB_SRV_TYPE_CLUSTERIP)
728 m->src_ip.ip6.as_u64[0] = vip->prefix.ip6.as_u64[0];
729 m->src_ip.ip6.as_u64[1] = vip->prefix.ip6.as_u64[1];
731 else if (vip->encap_args.srv_type == LB_SRV_TYPE_NODEPORT)
733 m->src_ip.ip6.as_u64[0] = lbm->ip6_src_address.as_u64[0];
734 m->src_ip.ip6.as_u64[1] = lbm->ip6_src_address.as_u64[1];
736 m->src_ip_is_ipv6 = 1;
737 m->as_ip.ip6.as_u64[0] = as->address.ip6.as_u64[0];
738 m->as_ip.ip6.as_u64[1] = as->address.ip6.as_u64[1];
739 m->as_ip_is_ipv6 = 1;
740 m->src_port = vip->port;
741 m->target_port = vip->encap_args.target_port;
745 kv6.key[0] = m_key6.as_u64[0];
746 kv6.key[1] = m_key6.as_u64[1];
747 kv6.key[2] = m_key6.as_u64[2];
748 kv6.value = m - lbm->snat_mappings;
749 clib_bihash_add_del_24_8(&lbm->mapping_by_as6, &kv6, 1);
753 vec_free(to_be_added);
756 lb_vip_update_new_flow_table(vip);
758 //Garbage collection maybe
759 lb_vip_garbage_collection(vip);
761 lb_put_writer_lock();
765 int lb_vip_del_ass_withlock(u32 vip_index, ip46_address_t *addresses, u32 n)
767 lb_main_t *lbm = &lb_main;
768 u32 now = (u32) vlib_time_now(vlib_get_main());
773 if (!(vip = lb_vip_get_by_index(vip_index))) {
774 return VNET_API_ERROR_NO_SUCH_ENTRY;
779 if (lb_as_find_index_vip(vip, &addresses[n], &as_index)) {
781 return VNET_API_ERROR_NO_SUCH_ENTRY;
784 if (n) { //Check for duplicates
787 if (addresses[n2].as_u64[0] == addresses[n].as_u64[0] &&
788 addresses[n2].as_u64[1] == addresses[n].as_u64[1])
793 vec_add1(indexes, as_index);
798 //Garbage collection maybe
799 lb_vip_garbage_collection(vip);
801 if (indexes != NULL) {
802 vec_foreach(ip, indexes) {
803 lbm->ass[*ip].flags &= ~LB_AS_FLAGS_USED;
804 lbm->ass[*ip].last_used = now;
808 lb_vip_update_new_flow_table(vip);
815 int lb_vip_del_ass(u32 vip_index, ip46_address_t *addresses, u32 n)
817 lb_get_writer_lock();
818 int ret = lb_vip_del_ass_withlock(vip_index, addresses, n);
819 lb_put_writer_lock();
825 lb_vip_prefix_index_alloc (lb_main_t *lbm)
828 * Check for dynamically allocaetd instance number.
832 bit = clib_bitmap_first_clear (lbm->vip_prefix_indexes);
834 lbm->vip_prefix_indexes = clib_bitmap_set(lbm->vip_prefix_indexes, bit, 1);
840 lb_vip_prefix_index_free (lb_main_t *lbm, u32 instance)
843 if (clib_bitmap_get (lbm->vip_prefix_indexes, instance) == 0)
848 lbm->vip_prefix_indexes = clib_bitmap_set (lbm->vip_prefix_indexes,
855 * Add the VIP adjacency to the ip4 or ip6 fib
857 static void lb_vip_add_adjacency(lb_main_t *lbm, lb_vip_t *vip,
858 u32 *vip_prefix_index)
860 dpo_proto_t proto = 0;
861 dpo_type_t dpo_type = 0;
866 /* for per-port vip, if VIP adjacency has been added,
867 * no need to add adjacency. */
868 if (!lb_vip_port_find_diff_port(&(vip->prefix), vip->plen,
869 vip->protocol, vip->port, &vip_idx))
874 /* Allocate an index for per-port vip */
875 *vip_prefix_index = lb_vip_prefix_index_alloc(lbm);
879 *vip_prefix_index = vip - lbm->vips;
882 dpo_id_t dpo = DPO_INVALID;
883 fib_prefix_t pfx = {};
884 if (lb_vip_is_ip4(vip->type)) {
885 pfx.fp_addr.ip4 = vip->prefix.ip4;
886 pfx.fp_len = vip->plen - 96;
887 pfx.fp_proto = FIB_PROTOCOL_IP4;
888 proto = DPO_PROTO_IP4;
890 pfx.fp_addr.ip6 = vip->prefix.ip6;
891 pfx.fp_len = vip->plen;
892 pfx.fp_proto = FIB_PROTOCOL_IP6;
893 proto = DPO_PROTO_IP6;
896 if (lb_vip_is_gre4(vip))
897 dpo_type = lbm->dpo_gre4_type;
898 else if (lb_vip_is_gre6(vip))
899 dpo_type = lbm->dpo_gre6_type;
900 else if (lb_vip_is_gre4_port(vip))
901 dpo_type = lbm->dpo_gre4_port_type;
902 else if (lb_vip_is_gre6_port(vip))
903 dpo_type = lbm->dpo_gre6_port_type;
904 else if (lb_vip_is_l3dsr(vip))
905 dpo_type = lbm->dpo_l3dsr_type;
906 else if (lb_vip_is_l3dsr_port(vip))
907 dpo_type = lbm->dpo_l3dsr_port_type;
908 else if(lb_vip_is_nat4_port(vip))
909 dpo_type = lbm->dpo_nat4_port_type;
910 else if (lb_vip_is_nat6_port(vip))
911 dpo_type = lbm->dpo_nat6_port_type;
913 dpo_set(&dpo, dpo_type, proto, *vip_prefix_index);
914 fib_table_entry_special_dpo_add(0,
916 FIB_SOURCE_PLUGIN_HI,
917 FIB_ENTRY_FLAG_EXCLUSIVE,
923 * Add the VIP filter entry
925 static int lb_vip_add_port_filter(lb_main_t *lbm, lb_vip_t *vip,
926 u32 vip_prefix_index, u32 vip_idx)
929 clib_bihash_kv_8_8_t kv;
931 key.vip_prefix_index = vip_prefix_index;
932 key.protocol = vip->protocol;
933 key.port = clib_host_to_net_u16(vip->port);
938 clib_bihash_add_del_8_8(&lbm->vip_index_per_port, &kv, 1);
944 * Del the VIP filter entry
946 static int lb_vip_del_port_filter(lb_main_t *lbm, lb_vip_t *vip)
949 clib_bihash_kv_8_8_t kv, value;
952 key.vip_prefix_index = vip->vip_prefix_index;
953 key.protocol = vip->protocol;
954 key.port = clib_host_to_net_u16(vip->port);
957 if(clib_bihash_search_8_8(&lbm->vip_index_per_port, &kv, &value) == 0)
958 m = pool_elt_at_index (lbm->vips, value.value);
961 kv.value = m - lbm->vips;
962 clib_bihash_add_del_8_8(&lbm->vip_index_per_port, &kv, 0);
968 * Deletes the adjacency associated with the VIP
970 static void lb_vip_del_adjacency(lb_main_t *lbm, lb_vip_t *vip)
972 fib_prefix_t pfx = {};
977 /* If this vip adjacency is used by other per-port vip,
978 * no need to del this adjacency. */
979 if (!lb_vip_port_find_diff_port(&(vip->prefix), vip->plen,
980 vip->protocol, vip->port, &vip_idx))
982 lb_put_writer_lock();
986 /* Return vip_prefix_index for per-port vip */
987 lb_vip_prefix_index_free(lbm, vip->vip_prefix_index);
991 if (lb_vip_is_ip4(vip->type)) {
992 pfx.fp_addr.ip4 = vip->prefix.ip4;
993 pfx.fp_len = vip->plen - 96;
994 pfx.fp_proto = FIB_PROTOCOL_IP4;
996 pfx.fp_addr.ip6 = vip->prefix.ip6;
997 pfx.fp_len = vip->plen;
998 pfx.fp_proto = FIB_PROTOCOL_IP6;
1000 fib_table_entry_special_remove(0, &pfx, FIB_SOURCE_PLUGIN_HI);
1003 int lb_vip_add(lb_vip_add_args_t args, u32 *vip_index)
1005 lb_main_t *lbm = &lb_main;
1006 vlib_main_t *vm = vlib_get_main();
1008 lb_vip_type_t type = args.type;
1009 u32 vip_prefix_index = 0;
1011 lb_get_writer_lock();
1012 ip46_prefix_normalize(&(args.prefix), args.plen);
1014 if (!lb_vip_port_find_index_with_lock(&(args.prefix), args.plen,
1015 args.protocol, args.port,
1018 lb_put_writer_lock();
1019 return VNET_API_ERROR_VALUE_EXIST;
1022 /* Make sure we can't add a per-port VIP entry
1023 * when there already is an all-port VIP for the same prefix. */
1024 if ((args.port != 0) &&
1025 !lb_vip_port_find_all_port_vip(&(args.prefix), args.plen, vip_index))
1027 lb_put_writer_lock();
1028 return VNET_API_ERROR_VALUE_EXIST;
1031 /* Make sure we can't add a all-port VIP entry
1032 * when there already is an per-port VIP for the same prefix. */
1033 if ((args.port == 0) &&
1034 !lb_vip_port_find_diff_port(&(args.prefix), args.plen,
1035 args.protocol, args.port, vip_index))
1037 lb_put_writer_lock();
1038 return VNET_API_ERROR_VALUE_EXIST;
1041 /* Make sure all VIP for a given prefix (using different ports) have the same type. */
1042 if ((args.port != 0) &&
1043 !lb_vip_port_find_diff_port(&(args.prefix), args.plen,
1044 args.protocol, args.port, vip_index)
1045 && (args.type != lbm->vips[*vip_index].type))
1047 lb_put_writer_lock();
1048 return VNET_API_ERROR_INVALID_ARGUMENT;
1051 if (!is_pow2(args.new_length)) {
1052 lb_put_writer_lock();
1053 return VNET_API_ERROR_INVALID_MEMORY_SIZE;
1056 if (ip46_prefix_is_ip4(&(args.prefix), args.plen) &&
1057 !lb_vip_is_ip4(type)) {
1058 lb_put_writer_lock();
1059 return VNET_API_ERROR_INVALID_ADDRESS_FAMILY;
1062 if ((!ip46_prefix_is_ip4(&(args.prefix), args.plen)) &&
1063 !lb_vip_is_ip6(type)) {
1064 lb_put_writer_lock();
1065 return VNET_API_ERROR_INVALID_ADDRESS_FAMILY;
1068 if ((type == LB_VIP_TYPE_IP4_L3DSR) &&
1069 (args.encap_args.dscp >= 64) )
1071 lb_put_writer_lock();
1072 return VNET_API_ERROR_VALUE_EXIST;
1076 pool_get(lbm->vips, vip);
1079 memcpy (&(vip->prefix), &(args.prefix), sizeof(args.prefix));
1080 vip->plen = args.plen;
1083 vip->protocol = args.protocol;
1084 vip->port = args.port;
1088 vip->protocol = (u8)~0;
1091 vip->last_garbage_collection = (u32) vlib_time_now(vlib_get_main());
1092 vip->type = args.type;
1094 if (args.type == LB_VIP_TYPE_IP4_L3DSR) {
1095 vip->encap_args.dscp = args.encap_args.dscp;
1097 else if ((args.type == LB_VIP_TYPE_IP4_NAT4)
1098 ||(args.type == LB_VIP_TYPE_IP6_NAT6)) {
1099 vip->encap_args.srv_type = args.encap_args.srv_type;
1100 vip->encap_args.target_port =
1101 clib_host_to_net_u16(args.encap_args.target_port);
1104 vip->flags = LB_VIP_FLAGS_USED;
1105 vip->as_indexes = 0;
1109 for (i = 0; i < LB_N_VIP_COUNTERS; i++) {
1110 vlib_validate_simple_counter(&lbm->vip_counters[i], vip - lbm->vips);
1111 vlib_zero_simple_counter(&lbm->vip_counters[i], vip - lbm->vips);
1114 //Configure new flow table
1115 vip->new_flow_table_mask = args.new_length - 1;
1116 vip->new_flow_table = 0;
1118 //Update flow hash table
1119 lb_vip_update_new_flow_table(vip);
1121 //Create adjacency to direct traffic
1122 lb_vip_add_adjacency(lbm, vip, &vip_prefix_index);
1124 if ( (lb_vip_is_nat4_port(vip) || lb_vip_is_nat6_port(vip))
1125 && (args.encap_args.srv_type == LB_SRV_TYPE_NODEPORT) )
1130 //Create maping from nodeport to vip_index
1131 key = clib_host_to_net_u16(args.port);
1132 entry = hash_get_mem (lbm->vip_index_by_nodeport, &key);
1134 lb_put_writer_lock();
1135 return VNET_API_ERROR_VALUE_EXIST;
1138 hash_set_mem (lbm->vip_index_by_nodeport, &key, vip - lbm->vips);
1140 /* receive packets destined to NodeIP:NodePort */
1141 udp_register_dst_port (vm, args.port, lb4_nodeport_node.index, 1);
1142 udp_register_dst_port (vm, args.port, lb6_nodeport_node.index, 0);
1145 *vip_index = vip - lbm->vips;
1146 //Create per-port vip filtering table
1149 lb_vip_add_port_filter(lbm, vip, vip_prefix_index, *vip_index);
1150 vip->vip_prefix_index = vip_prefix_index;
1153 lb_put_writer_lock();
1157 int lb_vip_del(u32 vip_index)
1159 lb_main_t *lbm = &lb_main;
1162 /* Does not remove default vip, i.e. vip_index = 0 */
1166 lb_get_writer_lock();
1167 if (!(vip = lb_vip_get_by_index(vip_index))) {
1168 lb_put_writer_lock();
1169 return VNET_API_ERROR_NO_SUCH_ENTRY;
1172 //FIXME: This operation is actually not working
1173 //We will need to remove state before performing this.
1177 ip46_address_t *ass = 0;
1180 pool_foreach(as_index, vip->as_indexes, {
1181 as = &lbm->ass[*as_index];
1182 vec_add1(ass, as->address);
1185 lb_vip_del_ass_withlock(vip_index, ass, vec_len(ass));
1190 lb_vip_del_adjacency(lbm, vip);
1192 //Delete per-port vip filtering entry
1195 lb_vip_del_port_filter(lbm, vip);
1198 //Set the VIP as unused
1199 vip->flags &= ~LB_VIP_FLAGS_USED;
1201 lb_put_writer_lock();
1206 VLIB_PLUGIN_REGISTER () = {
1207 .version = VPP_BUILD_VER,
1208 .description = "Load Balancer",
1212 u8 *format_lb_dpo (u8 * s, va_list * va)
1214 index_t index = va_arg (*va, index_t);
1215 CLIB_UNUSED(u32 indent) = va_arg (*va, u32);
1216 lb_main_t *lbm = &lb_main;
1217 lb_vip_t *vip = pool_elt_at_index (lbm->vips, index);
1218 return format (s, "%U", format_lb_vip, vip);
1221 static void lb_dpo_lock (dpo_id_t *dpo) {}
1222 static void lb_dpo_unlock (dpo_id_t *dpo) {}
1225 lb_fib_node_get_node (fib_node_index_t index)
1227 lb_main_t *lbm = &lb_main;
1228 lb_as_t *as = pool_elt_at_index (lbm->ass, index);
1229 return (&as->fib_node);
1233 lb_fib_node_last_lock_gone (fib_node_t *node)
1238 lb_as_from_fib_node (fib_node_t *node)
1240 return ((lb_as_t*)(((char*)node) -
1241 STRUCT_OFFSET_OF(lb_as_t, fib_node)));
1245 lb_as_stack (lb_as_t *as)
1247 lb_main_t *lbm = &lb_main;
1248 lb_vip_t *vip = &lbm->vips[as->vip_index];
1249 dpo_type_t dpo_type = 0;
1251 if (lb_vip_is_gre4(vip))
1252 dpo_type = lbm->dpo_gre4_type;
1253 else if (lb_vip_is_gre6(vip))
1254 dpo_type = lbm->dpo_gre6_type;
1255 else if (lb_vip_is_gre4_port(vip))
1256 dpo_type = lbm->dpo_gre4_port_type;
1257 else if (lb_vip_is_gre6_port(vip))
1258 dpo_type = lbm->dpo_gre6_port_type;
1259 else if (lb_vip_is_l3dsr(vip))
1260 dpo_type = lbm->dpo_l3dsr_type;
1261 else if (lb_vip_is_l3dsr_port(vip))
1262 dpo_type = lbm->dpo_l3dsr_port_type;
1263 else if(lb_vip_is_nat4_port(vip))
1264 dpo_type = lbm->dpo_nat4_port_type;
1265 else if (lb_vip_is_nat6_port(vip))
1266 dpo_type = lbm->dpo_nat6_port_type;
1269 lb_vip_is_ip4(vip->type)?DPO_PROTO_IP4:DPO_PROTO_IP6,
1271 fib_entry_contribute_ip_forwarding(
1272 as->next_hop_fib_entry_index));
1275 static fib_node_back_walk_rc_t
1276 lb_fib_node_back_walk_notify (fib_node_t *node,
1277 fib_node_back_walk_ctx_t *ctx)
1279 lb_as_stack(lb_as_from_fib_node(node));
1280 return (FIB_NODE_BACK_WALK_CONTINUE);
1283 int lb_nat4_interface_add_del (u32 sw_if_index, int is_del)
1287 vnet_feature_enable_disable ("ip4-unicast", "lb-nat4-in2out",
1288 sw_if_index, 0, 0, 0);
1292 vnet_feature_enable_disable ("ip4-unicast", "lb-nat4-in2out",
1293 sw_if_index, 1, 0, 0);
1299 int lb_nat6_interface_add_del (u32 sw_if_index, int is_del)
1303 vnet_feature_enable_disable ("ip6-unicast", "lb-nat6-in2out",
1304 sw_if_index, 0, 0, 0);
1308 vnet_feature_enable_disable ("ip6-unicast", "lb-nat6-in2out",
1309 sw_if_index, 1, 0, 0);
1316 lb_init (vlib_main_t * vm)
1318 vlib_thread_main_t *tm = vlib_get_thread_main ();
1319 lb_main_t *lbm = &lb_main;
1320 lbm->vnet_main = vnet_get_main ();
1321 lbm->vlib_main = vm;
1323 lb_vip_t *default_vip;
1324 lb_as_t *default_as;
1325 fib_node_vft_t lb_fib_node_vft = {
1326 .fnv_get = lb_fib_node_get_node,
1327 .fnv_last_lock = lb_fib_node_last_lock_gone,
1328 .fnv_back_walk = lb_fib_node_back_walk_notify,
1330 dpo_vft_t lb_vft = {
1331 .dv_lock = lb_dpo_lock,
1332 .dv_unlock = lb_dpo_unlock,
1333 .dv_format = format_lb_dpo,
1336 //Allocate and init default VIP.
1338 pool_get(lbm->vips, default_vip);
1339 default_vip->prefix.ip6.as_u64[0] = 0xffffffffffffffffL;
1340 default_vip->prefix.ip6.as_u64[1] = 0xffffffffffffffffL;
1341 default_vip->protocol = ~0;
1342 default_vip->port = 0;
1343 default_vip->flags = LB_VIP_FLAGS_USED;
1346 vec_validate(lbm->per_cpu, tm->n_vlib_mains - 1);
1347 lbm->writer_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
1348 lbm->writer_lock[0] = 0;
1349 lbm->per_cpu_sticky_buckets = LB_DEFAULT_PER_CPU_STICKY_BUCKETS;
1350 lbm->flow_timeout = LB_DEFAULT_FLOW_TIMEOUT;
1351 lbm->ip4_src_address.as_u32 = 0xffffffff;
1352 lbm->ip6_src_address.as_u64[0] = 0xffffffffffffffffL;
1353 lbm->ip6_src_address.as_u64[1] = 0xffffffffffffffffL;
1354 lbm->dpo_gre4_type = dpo_register_new_type(&lb_vft, lb_dpo_gre4_nodes);
1355 lbm->dpo_gre6_type = dpo_register_new_type(&lb_vft, lb_dpo_gre6_nodes);
1356 lbm->dpo_gre4_port_type = dpo_register_new_type(&lb_vft,
1357 lb_dpo_gre4_port_nodes);
1358 lbm->dpo_gre6_port_type = dpo_register_new_type(&lb_vft,
1359 lb_dpo_gre6_port_nodes);
1360 lbm->dpo_l3dsr_type = dpo_register_new_type(&lb_vft,
1361 lb_dpo_l3dsr_nodes);
1362 lbm->dpo_l3dsr_port_type = dpo_register_new_type(&lb_vft,
1363 lb_dpo_l3dsr_port_nodes);
1364 lbm->dpo_nat4_port_type = dpo_register_new_type(&lb_vft,
1365 lb_dpo_nat4_port_nodes);
1366 lbm->dpo_nat6_port_type = dpo_register_new_type(&lb_vft,
1367 lb_dpo_nat6_port_nodes);
1368 lbm->fib_node_type = fib_node_register_new_type(&lb_fib_node_vft);
1370 //Init AS reference counters
1371 vlib_refcount_init(&lbm->as_refcount);
1373 //Allocate and init default AS.
1375 pool_get(lbm->ass, default_as);
1376 default_as->flags = 0;
1377 default_as->dpo.dpoi_next_node = LB_NEXT_DROP;
1378 default_as->vip_index = ~0;
1379 default_as->address.ip6.as_u64[0] = 0xffffffffffffffffL;
1380 default_as->address.ip6.as_u64[1] = 0xffffffffffffffffL;
1382 lbm->vip_index_by_nodeport
1383 = hash_create_mem (0, sizeof(u16), sizeof (uword));
1385 clib_bihash_init_8_8 (&lbm->vip_index_per_port,
1386 "vip_index_per_port", LB_VIP_PER_PORT_BUCKETS,
1387 LB_VIP_PER_PORT_MEMORY_SIZE);
1389 clib_bihash_init_8_8 (&lbm->mapping_by_as4,
1390 "mapping_by_as4", LB_MAPPING_BUCKETS,
1391 LB_MAPPING_MEMORY_SIZE);
1393 clib_bihash_init_24_8 (&lbm->mapping_by_as6,
1394 "mapping_by_as6", LB_MAPPING_BUCKETS,
1395 LB_MAPPING_MEMORY_SIZE);
1397 #define _(a,b,c) lbm->vip_counters[c].name = b;
1398 lb_foreach_vip_counter
1403 VLIB_INIT_FUNCTION (lb_init);