2 * Copyright (c) 2017 Intel and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or anated to in writing, software
10 * distributed under the License is distributed on an "POD IS" BPODIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <kubeproxy/kp.h>
17 #include <vnet/plugin/plugin.h>
18 #include <vpp/app/version.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/udp/udp.h>
22 //GC runs at most once every so many seconds
23 #define KP_GARBAGE_RUN 60
25 //After so many seconds. It is assumed that inter-core race condition will not occur.
26 #define KP_CONCURRENCY_TIMEOUT 10
30 #define kp_get_writer_lock() do {} while(__sync_lock_test_and_set (kp_main.writer_lock, 1))
31 #define kp_put_writer_lock() kp_main.writer_lock[0] = 0
33 static void kp_pod_stack (kp_pod_t *pod);
35 void ip46_prefix_normalize(ip46_address_t *prefix, u8 plen)
38 prefix->as_u64[0] = 0;
39 prefix->as_u64[1] = 0;
40 } else if (plen <= 64) {
41 prefix->as_u64[0] &= clib_host_to_net_u64(0xffffffffffffffffL << (64 - plen));
42 prefix->as_u64[1] = 0;
44 prefix->as_u64[1] &= clib_host_to_net_u64(0xffffffffffffffffL << (128 - plen));
49 uword unformat_ip46_prefix (unformat_input_t * input, va_list * args)
51 ip46_address_t *ip46 = va_arg (*args, ip46_address_t *);
52 u8 *len = va_arg (*args, u8 *);
53 ip46_type_t type = va_arg (*args, ip46_type_t);
56 if ((type != IP46_TYPE_IP6) && unformat(input, "%U/%u", unformat_ip4_address, &ip46->ip4, &l)) {
60 ip46->pad[0] = ip46->pad[1] = ip46->pad[2] = 0;
61 } else if ((type != IP46_TYPE_IP4) && unformat(input, "%U/%u", unformat_ip6_address, &ip46->ip6, &l)) {
71 u8 *format_ip46_prefix (u8 * s, va_list * args)
73 ip46_address_t *ip46 = va_arg (*args, ip46_address_t *);
74 u32 len = va_arg (*args, u32); //va_arg cannot use u8 or u16
75 ip46_type_t type = va_arg (*args, ip46_type_t);
78 if (type == IP46_TYPE_IP4)
80 else if (type == IP46_TYPE_IP6)
83 is_ip4 = (len >= 96) && ip46_address_is_ip4(ip46);
86 format(s, "%U/%d", format_ip4_address, &ip46->ip4, len - 96):
87 format(s, "%U/%d", format_ip6_address, &ip46->ip6, len);
90 const static char * const kp_dpo_nat4_ip4[] = { "kp4-nat4" , NULL };
91 const static char * const kp_dpo_nat4_ip6[] = { "kp6-nat4" , NULL };
92 const static char* const * const kp_dpo_nat4_nodes[DPO_PROTO_NUM] =
94 [DPO_PROTO_IP4] = kp_dpo_nat4_ip4,
95 [DPO_PROTO_IP6] = kp_dpo_nat4_ip6,
98 const static char * const kp_dpo_nat6_ip4[] = { "kp4-nat6" , NULL };
99 const static char * const kp_dpo_nat6_ip6[] = { "kp6-nat6" , NULL };
100 const static char* const * const kp_dpo_nat6_nodes[DPO_PROTO_NUM] =
102 [DPO_PROTO_IP4] = kp_dpo_nat6_ip4,
103 [DPO_PROTO_IP6] = kp_dpo_nat6_ip6,
106 u32 kp_hash_time_now(vlib_main_t * vm)
108 return (u32) (vlib_time_now(vm) + 10000);
111 u8 *format_kp_main (u8 * s, va_list * args)
113 vlib_thread_main_t *tm = vlib_get_thread_main();
114 kp_main_t *kpm = &kp_main;
115 s = format(s, "kp_main");
116 s = format(s, " #vips: %u\n", pool_elts(kpm->vips));
117 s = format(s, " #pods: %u\n", pool_elts(kpm->pods) - 1);
120 for(thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++ ) {
121 kp_hash_t *h = kpm->per_cpu[thread_index].sticky_ht;
123 s = format(s, "core %d\n", thread_index);
124 s = format(s, " timeout: %ds\n", h->timeout);
125 s = format(s, " usage: %d / %d\n", kp_hash_elts(h, kp_hash_time_now(vlib_get_main())), kp_hash_size(h));
132 static char *kp_vip_type_strings[] = {
133 [KP_VIP_TYPE_IP4_NAT44] = "ip4-nat44",
134 [KP_VIP_TYPE_IP4_NAT46] = "ip4-nat46",
135 [KP_VIP_TYPE_IP6_NAT64] = "ip6-nat64",
136 [KP_VIP_TYPE_IP6_NAT66] = "ip6-nat66",
139 u8 *format_kp_vip_type (u8 * s, va_list * args)
141 kp_vip_type_t vipt = va_arg (*args, kp_vip_type_t);
143 for (i=0; i<KP_VIP_N_TYPES; i++)
145 return format(s, kp_vip_type_strings[i]);
146 return format(s, "_WRONG_TYPE_");
149 uword unformat_kp_vip_type (unformat_input_t * input, va_list * args)
151 kp_vip_type_t *vipt = va_arg (*args, kp_vip_type_t *);
153 for (i=0; i<KP_VIP_N_TYPES; i++)
154 if (unformat(input, kp_vip_type_strings[i])) {
161 u8 *format_kp_vip (u8 * s, va_list * args)
163 kp_vip_t *vip = va_arg (*args, kp_vip_t *);
164 return format(s, "%U %U port:%u target_port:%u node_port:%u "
165 "new_size:%u #pod:%u%s",
166 format_kp_vip_type, vip->type,
167 format_ip46_prefix, &vip->prefix, vip->plen, IP46_TYPE_ANY,
168 ntohs(vip->port), ntohs(vip->target_port),
169 ntohs(vip->node_port),
170 vip->new_flow_table_mask + 1,
171 pool_elts(vip->pod_indexes),
172 (vip->flags & KP_VIP_FLAGS_USED)?"":" removed");
175 u8 *format_kp_pod (u8 * s, va_list * args)
177 kp_pod_t *pod = va_arg (*args, kp_pod_t *);
178 return format(s, "%U %s", format_ip46_address,
179 &pod->address, IP46_TYPE_ANY,
180 (pod->flags & KP_POD_FLAGS_USED)?"used":"removed");
183 u8 *format_kp_vip_detailed (u8 * s, va_list * args)
185 kp_main_t *kpm = &kp_main;
186 kp_vip_t *vip = va_arg (*args, kp_vip_t *);
187 uword indent = format_get_indent (s);
189 s = format(s, "%U %U [%u] %U port:%u target_port:%u node_port:%u%s\n"
191 format_white_space, indent,
192 format_kp_vip_type, vip->type,
193 vip - kpm->vips, format_ip46_prefix, &vip->prefix, vip->plen, IP46_TYPE_ANY,
194 ntohs(vip->port), ntohs(vip->target_port),
195 ntohs(vip->node_port),
196 (vip->flags & KP_VIP_FLAGS_USED)?"":" removed",
197 format_white_space, indent,
198 vip->new_flow_table_mask + 1);
201 s = format(s, "%U counters:\n",
202 format_white_space, indent);
204 for (i=0; i<KP_N_VIP_COUNTERS; i++)
205 s = format(s, "%U %s: %d\n",
206 format_white_space, indent,
207 kpm->vip_counters[i].name,
208 vlib_get_simple_counter(&kpm->vip_counters[i], vip - kpm->vips));
211 s = format(s, "%U #pod:%u\n",
212 format_white_space, indent,
213 pool_elts(vip->pod_indexes));
215 //Let's count the buckets for each POD
217 vec_validate(count, pool_len(kpm->pods)); //Possibly big alloc for not much...
218 kp_new_flow_entry_t *nfe;
219 vec_foreach(nfe, vip->new_flow_table)
220 count[nfe->pod_index]++;
224 pool_foreach(pod_index, vip->pod_indexes, {
225 pod = &kpm->pods[*pod_index];
226 s = format(s, "%U %U %d buckets %d flows dpo:%u %s\n",
227 format_white_space, indent,
228 format_ip46_address, &pod->address, IP46_TYPE_ANY,
229 count[pod - kpm->pods],
230 vlib_refcount_get(&kpm->pod_refcount, pod - kpm->pods),
232 (pod->flags & KP_POD_FLAGS_USED)?"used":" removed");
238 s = format(s, "%U new flows table:\n", format_white_space, indent);
239 kp_new_flow_entry_t *nfe;
240 vec_foreach(nfe, vip->new_flow_table) {
241 s = format(s, "%U %d: %d\n", format_white_space, indent, nfe - vip->new_flow_table, nfe->pod_index);
253 static int kp_pseudorand_compare(void *a, void *b)
255 kp_pod_t *poda, *podb;
256 kp_main_t *kpm = &kp_main;
257 poda = &kpm->pods[((kp_pseudorand_t *)a)->pod_index];
258 podb = &kpm->pods[((kp_pseudorand_t *)b)->pod_index];
259 return memcmp(&poda->address, &podb->address, sizeof(podb->address));
262 static void kp_vip_garbage_collection(kp_vip_t *vip)
264 kp_main_t *kpm = &kp_main;
265 ASSERT (kpm->writer_lock[0]);
267 u32 now = (u32) vlib_time_now(vlib_get_main());
268 if (!clib_u32_loop_gt(now, vip->last_garbage_collection + KP_GARBAGE_RUN))
271 vip->last_garbage_collection = now;
274 pool_foreach(pod_index, vip->pod_indexes, {
275 pod = &kpm->pods[*pod_index];
276 if (!(pod->flags & KP_POD_FLAGS_USED) && //Not used
277 clib_u32_loop_gt(now, pod->last_used + KP_CONCURRENCY_TIMEOUT) && //Not recently used
278 (vlib_refcount_get(&kpm->pod_refcount, pod - kpm->pods) == 0))
280 fib_entry_child_remove(pod->next_hop_fib_entry_index,
281 pod->next_hop_child_index);
282 fib_table_entry_delete_index(pod->next_hop_fib_entry_index,
284 pod->next_hop_fib_entry_index = FIB_NODE_INDEX_INVALID;
286 pool_put(vip->pod_indexes, pod_index);
287 pool_put(kpm->pods, pod);
292 void kp_garbage_collection()
294 kp_main_t *kpm = &kp_main;
295 kp_get_writer_lock();
297 u32 *to_be_removed_vips = 0, *i;
298 pool_foreach(vip, kpm->vips, {
299 kp_vip_garbage_collection(vip);
301 if (!(vip->flags & KP_VIP_FLAGS_USED) &&
302 (pool_elts(vip->pod_indexes) == 0)) {
303 vec_add1(to_be_removed_vips, vip - kpm->vips);
307 vec_foreach(i, to_be_removed_vips) {
308 vip = &kpm->vips[*i];
309 pool_put(kpm->vips, vip);
310 pool_free(vip->pod_indexes);
313 vec_free(to_be_removed_vips);
314 kp_put_writer_lock();
317 static void kp_vip_update_new_flow_table(kp_vip_t *vip)
319 kp_main_t *kpm = &kp_main;
320 kp_new_flow_entry_t *old_table;
322 kp_new_flow_entry_t *new_flow_table = 0;
324 kp_pseudorand_t *pr, *sort_arr = 0;
327 ASSERT (kpm->writer_lock[0]); //We must have the lock
329 //Check if some POD is configured or not
331 pool_foreach(pod_index, vip->pod_indexes, {
332 pod = &kpm->pods[*pod_index];
333 if (pod->flags & KP_POD_FLAGS_USED) { //Not used anymore
335 goto out; //Not sure 'break' works in this macro-loop
341 //Only the default. i.e. no POD
342 vec_validate(new_flow_table, vip->new_flow_table_mask);
343 for (i=0; i<vec_len(new_flow_table); i++)
344 new_flow_table[i].pod_index = 0;
349 //First, let's sort the PODs
351 vec_alloc(sort_arr, pool_elts(vip->pod_indexes));
354 pool_foreach(pod_index, vip->pod_indexes, {
355 pod = &kpm->pods[*pod_index];
356 if (!(pod->flags & KP_POD_FLAGS_USED)) //Not used anymore
359 sort_arr[i].pod_index = pod - kpm->pods;
362 _vec_len(sort_arr) = i;
364 vec_sort_with_function(sort_arr, kp_pseudorand_compare);
366 //Now let's pseudo-randomly generate permutations
367 vec_foreach(pr, sort_arr) {
368 kp_pod_t *pod = &kpm->pods[pr->pod_index];
370 u64 seed = clib_xxhash(pod->address.as_u64[0] ^
371 pod->address.as_u64[1]);
372 /* We have 2^n buckets.
373 * skip must be prime with 2^n.
374 * So skip must be odd.
375 * MagLev actually state that M should be prime,
376 * but this has a big computation cost (% operation).
377 * Using 2^n is more better (& operation).
379 pr->skip = ((seed & 0xffffffff) | 1) & vip->new_flow_table_mask;
380 pr->last = (seed >> 32) & vip->new_flow_table_mask;
383 //Let's create a new flow table
384 vec_validate(new_flow_table, vip->new_flow_table_mask);
385 for (i=0; i<vec_len(new_flow_table); i++)
386 new_flow_table[i].pod_index = ~0;
390 vec_foreach(pr, sort_arr) {
393 pr->last = (pr->last + pr->skip) & vip->new_flow_table_mask;
394 if (new_flow_table[last].pod_index == ~0) {
395 new_flow_table[last].pod_index = pr->pod_index;
400 if (done == vec_len(new_flow_table))
409 //Count number of changed entries
411 for (i=0; i<vec_len(new_flow_table); i++)
412 if (vip->new_flow_table == 0 ||
413 new_flow_table[i].pod_index != vip->new_flow_table[i].pod_index)
416 old_table = vip->new_flow_table;
417 vip->new_flow_table = new_flow_table;
421 int kp_conf(u32 per_cpu_sticky_buckets, u32 flow_timeout)
423 kp_main_t *kpm = &kp_main;
425 if (!is_pow2(per_cpu_sticky_buckets))
426 return VNET_API_ERROR_INVALID_MEMORY_SIZE;
428 kp_get_writer_lock(); //Not exactly necessary but just a reminder that it exists for my future self
429 kpm->per_cpu_sticky_buckets = per_cpu_sticky_buckets;
430 kpm->flow_timeout = flow_timeout;
431 kp_put_writer_lock();
436 int kp_vip_find_index_with_lock(ip46_address_t *prefix, u8 plen, u32 *vip_index)
438 kp_main_t *kpm = &kp_main;
440 ASSERT (kpm->writer_lock[0]); //This must be called with the lock owned
441 ip46_prefix_normalize(prefix, plen);
442 pool_foreach(vip, kpm->vips, {
443 if ((vip->flags & KP_POD_FLAGS_USED) &&
445 vip->prefix.as_u64[0] == prefix->as_u64[0] &&
446 vip->prefix.as_u64[1] == prefix->as_u64[1]) {
447 *vip_index = vip - kpm->vips;
451 return VNET_API_ERROR_NO_SUCH_ENTRY;
454 int kp_vip_find_index(ip46_address_t *prefix, u8 plen, u32 *vip_index)
457 kp_get_writer_lock();
458 ret = kp_vip_find_index_with_lock(prefix, plen, vip_index);
459 kp_put_writer_lock();
463 static int kp_pod_find_index_vip(kp_vip_t *vip, ip46_address_t *address, u32 *pod_index)
465 kp_main_t *kpm = &kp_main;
466 ASSERT (kpm->writer_lock[0]); //This must be called with the lock owned
469 pool_foreach(podi, vip->pod_indexes, {
470 pod = &kpm->pods[*podi];
471 if (pod->vip_index == (vip - kpm->vips) &&
472 pod->address.as_u64[0] == address->as_u64[0] &&
473 pod->address.as_u64[1] == address->as_u64[1]) {
474 *pod_index = pod - kpm->pods;
481 int kp_vip_add_pods(u32 vip_index, ip46_address_t *addresses, u32 n)
483 kp_main_t *kpm = &kp_main;
484 kp_get_writer_lock();
486 if (!(vip = kp_vip_get_by_index(vip_index))) {
487 kp_put_writer_lock();
488 return VNET_API_ERROR_NO_SUCH_ENTRY;
491 ip46_type_t type = kp_vip_is_nat4(vip)?IP46_TYPE_IP4:IP46_TYPE_IP6;
492 u32 *to_be_added = 0;
493 u32 *to_be_updated = 0;
496 kp_snat_mapping_t *m;
497 kp_snat4_key_t m_key4;
498 clib_bihash_kv_8_8_t kv;
503 if (!kp_pod_find_index_vip(vip, &addresses[n], &i)) {
504 if (kpm->pods[i].flags & KP_POD_FLAGS_USED) {
505 vec_free(to_be_added);
506 vec_free(to_be_updated);
507 kp_put_writer_lock();
508 return VNET_API_ERROR_VALUE_EXIST;
510 vec_add1(to_be_updated, i);
514 if (ip46_address_type(&addresses[n]) != type) {
515 vec_free(to_be_added);
516 vec_free(to_be_updated);
517 kp_put_writer_lock();
518 return VNET_API_ERROR_INVALID_ADDRESS_FAMILY;
523 while(n2--) //Check for duplicates
524 if (addresses[n2].as_u64[0] == addresses[n].as_u64[0] &&
525 addresses[n2].as_u64[1] == addresses[n].as_u64[1])
529 vec_add1(to_be_added, n);
536 vec_foreach(ip, to_be_updated) {
537 kpm->pods[*ip].flags = KP_POD_FLAGS_USED;
539 vec_free(to_be_updated);
541 //Create those who have to be created
542 vec_foreach(ip, to_be_added) {
545 pool_get(kpm->pods, pod);
546 pod->address = addresses[*ip];
547 pod->flags = KP_POD_FLAGS_USED;
548 pod->vip_index = vip_index;
549 pool_get(vip->pod_indexes, pod_index);
550 *pod_index = pod - kpm->pods;
553 * become a child of the FIB entry
554 * so we are informed when its forwarding changes
556 fib_prefix_t nh = {};
557 if (kp_vip_is_nat4(vip)) {
558 nh.fp_addr.ip4 = pod->address.ip4;
560 nh.fp_proto = FIB_PROTOCOL_IP4;
562 nh.fp_addr.ip6 = pod->address.ip6;
564 nh.fp_proto = FIB_PROTOCOL_IP6;
567 pod->next_hop_fib_entry_index =
568 fib_table_entry_special_add(0,
571 FIB_ENTRY_FLAG_NONE);
572 pod->next_hop_child_index =
573 fib_entry_child_add(pod->next_hop_fib_entry_index,
579 /* Add SNAT static mapping */
580 pool_get (kpm->snat_mappings, m);
581 memset (m, 0, sizeof (*m));
582 if (kp_vip_is_nat4(vip)) {
583 m_key4.addr = pod->address.ip4;
584 m_key4.port = vip->target_port;
586 m_key4.fib_index = 0;
588 m->vip.ip4 = vip->prefix.ip4;;
589 m->node_ip.ip4.as_u32 = 0;
590 m->pod_ip.ip4 = pod->address.ip4;
592 m->node_ip_is_ipv6 = 0;
593 m->pod_ip_is_ipv6 = 0;
595 m->node_port = vip->node_port;
596 m->target_port = vip->target_port;
600 kv.key = m_key4.as_u64;
601 kv.value = m - kpm->snat_mappings;
602 clib_bihash_add_del_8_8(&kpm->mapping_by_pod, &kv, 1);
608 vec_free(to_be_added);
611 kp_vip_update_new_flow_table(vip);
613 //Garbage collection maybe
614 kp_vip_garbage_collection(vip);
616 kp_put_writer_lock();
620 int kp_vip_del_pods_withlock(u32 vip_index, ip46_address_t *addresses, u32 n)
622 kp_main_t *kpm = &kp_main;
623 u32 now = (u32) vlib_time_now(vlib_get_main());
627 if (!(vip = kp_vip_get_by_index(vip_index))) {
628 return VNET_API_ERROR_NO_SUCH_ENTRY;
634 if (kp_pod_find_index_vip(vip, &addresses[n], &i)) {
636 return VNET_API_ERROR_NO_SUCH_ENTRY;
639 if (n) { //Check for duplicates
642 if (addresses[n2].as_u64[0] == addresses[n].as_u64[0] &&
643 addresses[n2].as_u64[1] == addresses[n].as_u64[1])
648 vec_add1(indexes, i);
653 //Garbage collection maybe
654 kp_vip_garbage_collection(vip);
656 if (indexes != NULL) {
657 vec_foreach(ip, indexes) {
658 kpm->pods[*ip].flags &= ~KP_POD_FLAGS_USED;
659 kpm->pods[*ip].last_used = now;
663 kp_vip_update_new_flow_table(vip);
670 int kp_vip_del_pods(u32 vip_index, ip46_address_t *addresses, u32 n)
672 kp_get_writer_lock();
673 int ret = kp_vip_del_pods_withlock(vip_index, addresses, n);
674 kp_put_writer_lock();
679 * Add the VIP adjacency to the ip4 or ip6 fib
681 static void kp_vip_add_adjacency(kp_main_t *kpm, kp_vip_t *vip)
683 dpo_proto_t proto = 0;
684 dpo_id_t dpo = DPO_INVALID;
685 fib_prefix_t pfx = {};
686 if (kp_vip_is_ip4(vip)) {
687 pfx.fp_addr.ip4 = vip->prefix.ip4;
688 pfx.fp_len = vip->plen - 96;
689 pfx.fp_proto = FIB_PROTOCOL_IP4;
690 proto = DPO_PROTO_IP4;
692 pfx.fp_addr.ip6 = vip->prefix.ip6;
693 pfx.fp_len = vip->plen;
694 pfx.fp_proto = FIB_PROTOCOL_IP6;
695 proto = DPO_PROTO_IP6;
697 dpo_set(&dpo, kp_vip_is_nat4(vip)?kpm->dpo_nat4_type:kpm->dpo_nat6_type,
698 proto, vip - kpm->vips);
699 fib_table_entry_special_dpo_add(0,
701 FIB_SOURCE_PLUGIN_HI,
702 FIB_ENTRY_FLAG_EXCLUSIVE,
708 * Deletes the adjacency podsociated with the VIP
710 static void kp_vip_del_adjacency(kp_main_t *kpm, kp_vip_t *vip)
712 fib_prefix_t pfx = {};
713 if (kp_vip_is_ip4(vip)) {
714 pfx.fp_addr.ip4 = vip->prefix.ip4;
715 pfx.fp_len = vip->plen - 96;
716 pfx.fp_proto = FIB_PROTOCOL_IP4;
718 pfx.fp_addr.ip6 = vip->prefix.ip6;
719 pfx.fp_len = vip->plen;
720 pfx.fp_proto = FIB_PROTOCOL_IP6;
722 fib_table_entry_special_remove(0, &pfx, FIB_SOURCE_PLUGIN_HI);
725 int kp_vip_add(ip46_address_t *prefix, u8 plen, kp_vip_type_t type,
726 u32 new_length, u32 *vip_index,
727 u16 port, u16 target_port, u16 node_port)
729 kp_main_t *kpm = &kp_main;
730 vlib_main_t *vm = kpm->vlib_main;
735 kp_get_writer_lock();
736 ip46_prefix_normalize(prefix, plen);
738 if (!kp_vip_find_index_with_lock(prefix, plen, vip_index)) {
739 kp_put_writer_lock();
740 return VNET_API_ERROR_VALUE_EXIST;
743 if (!is_pow2(new_length)) {
744 kp_put_writer_lock();
745 return VNET_API_ERROR_INVALID_MEMORY_SIZE;
748 if (ip46_prefix_is_ip4(prefix, plen) &&
749 (type != KP_VIP_TYPE_IP4_NAT44) &&
750 (type != KP_VIP_TYPE_IP4_NAT46)) {
751 kp_put_writer_lock();
752 return VNET_API_ERROR_INVALID_ADDRESS_FAMILY;
757 pool_get(kpm->vips, vip);
760 vip->prefix = *prefix;
762 vip->port = clib_host_to_net_u16(port);
763 vip->target_port = clib_host_to_net_u16(target_port);
764 vip->node_port = clib_host_to_net_u16(node_port);
765 vip->last_garbage_collection = (u32) vlib_time_now(vlib_get_main());
767 vip->flags = KP_VIP_FLAGS_USED;
768 vip->pod_indexes = 0;
772 for (i = 0; i < KP_N_VIP_COUNTERS; i++) {
773 vlib_validate_simple_counter(&kpm->vip_counters[i], vip - kpm->vips);
774 vlib_zero_simple_counter(&kpm->vip_counters[i], vip - kpm->vips);
777 //Configure new flow table
778 vip->new_flow_table_mask = new_length - 1;
779 vip->new_flow_table = 0;
781 //Create a new flow hash table full of the default entry
782 kp_vip_update_new_flow_table(vip);
784 //Create adjacency to direct traffic
785 kp_vip_add_adjacency(kpm, vip);
787 //Create maping from nodeport to vip_index
788 key = clib_host_to_net_u16(node_port);
789 entry = hash_get_mem (kpm->nodeport_by_key, &key);
791 kp_put_writer_lock();
792 return VNET_API_ERROR_VALUE_EXIST;
795 key_copy = clib_mem_alloc (sizeof (*key_copy));
796 clib_memcpy (key_copy, &key, sizeof (*key_copy));
797 hash_set_mem (kpm->nodeport_by_key, key_copy, vip - kpm->vips);
799 /* receive packets destined to NodeIP:NodePort */
800 udp_register_dst_port (vm, node_port, kp4_nodeport_node.index, 1);
801 udp_register_dst_port (vm, node_port, kp6_nodeport_node.index, 0);
804 *vip_index = vip - kpm->vips;
806 kp_put_writer_lock();
810 int kp_vip_del(u32 vip_index)
812 kp_main_t *kpm = &kp_main;
814 kp_get_writer_lock();
815 if (!(vip = kp_vip_get_by_index(vip_index))) {
816 kp_put_writer_lock();
817 return VNET_API_ERROR_NO_SUCH_ENTRY;
820 //FIXME: This operation is actually not working
821 //We will need to remove state before performing this.
825 ip46_address_t *pods = 0;
828 pool_foreach(pod_index, vip->pod_indexes, {
829 pod = &kpm->pods[*pod_index];
830 vec_add1(pods, pod->address);
833 kp_vip_del_pods_withlock(vip_index, pods, vec_len(pods));
838 kp_vip_del_adjacency(kpm, vip);
840 //Set the VIP pod unused
841 vip->flags &= ~KP_VIP_FLAGS_USED;
843 kp_put_writer_lock();
848 VLIB_PLUGIN_REGISTER () = {
849 .version = VPP_BUILD_VER,
850 .description = "kube-proxy data plane",
854 u8 *format_kp_dpo (u8 * s, va_list * va)
856 index_t index = va_arg (*va, index_t);
857 CLIB_UNUSED(u32 indent) = va_arg (*va, u32);
858 kp_main_t *kpm = &kp_main;
859 kp_vip_t *vip = pool_elt_at_index (kpm->vips, index);
860 return format (s, "%U", format_kp_vip, vip);
863 static void kp_dpo_lock (dpo_id_t *dpo) {}
864 static void kp_dpo_unlock (dpo_id_t *dpo) {}
867 kp_fib_node_get_node (fib_node_index_t index)
869 kp_main_t *kpm = &kp_main;
870 kp_pod_t *pod = pool_elt_at_index (kpm->pods, index);
871 return (&pod->fib_node);
875 kp_fib_node_last_lock_gone (fib_node_t *node)
880 kp_pod_from_fib_node (fib_node_t *node)
882 return ((kp_pod_t*)(((char*)node) -
883 STRUCT_OFFSET_OF(kp_pod_t, fib_node)));
887 kp_pod_stack (kp_pod_t *pod)
889 kp_main_t *kpm = &kp_main;
890 kp_vip_t *vip = &kpm->vips[pod->vip_index];
891 dpo_stack(kp_vip_is_nat4(vip)?kpm->dpo_nat4_type:kpm->dpo_nat6_type,
892 kp_vip_is_ip4(vip)?DPO_PROTO_IP4:DPO_PROTO_IP6,
894 fib_entry_contribute_ip_forwarding(
895 pod->next_hop_fib_entry_index));
898 static fib_node_back_walk_rc_t
899 kp_fib_node_back_walk_notify (fib_node_t *node,
900 fib_node_back_walk_ctx_t *ctx)
902 kp_pod_stack(kp_pod_from_fib_node(node));
903 return (FIB_NODE_BACK_WALK_CONTINUE);
906 int kp_nat4_interface_add_del (u32 sw_if_index, int is_del)
910 vnet_feature_enable_disable ("ip4-unicast", "kp-nat4-in2out",
911 sw_if_index, 0, 0, 0);
915 vnet_feature_enable_disable ("ip4-unicast", "kp-nat4-in2out",
916 sw_if_index, 1, 0, 0);
923 kp_init (vlib_main_t * vm)
925 vlib_thread_main_t *tm = vlib_get_thread_main ();
926 kp_main_t *kpm = &kp_main;
927 kpm->vnet_main = vnet_get_main ();
930 kp_pod_t *default_pod;
931 fib_node_vft_t kp_fib_node_vft = {
932 .fnv_get = kp_fib_node_get_node,
933 .fnv_last_lock = kp_fib_node_last_lock_gone,
934 .fnv_back_walk = kp_fib_node_back_walk_notify,
937 .dv_lock = kp_dpo_lock,
938 .dv_unlock = kp_dpo_unlock,
939 .dv_format = format_kp_dpo,
944 vec_validate(kpm->per_cpu, tm->n_vlib_mains - 1);
945 kpm->writer_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
946 kpm->writer_lock[0] = 0;
947 kpm->per_cpu_sticky_buckets = KP_DEFAULT_PER_CPU_STICKY_BUCKETS;
948 kpm->flow_timeout = KP_DEFAULT_FLOW_TIMEOUT;
949 kpm->dpo_nat4_type = dpo_register_new_type(&kp_vft, kp_dpo_nat4_nodes);
950 kpm->dpo_nat6_type = dpo_register_new_type(&kp_vft, kp_dpo_nat6_nodes);
951 kpm->fib_node_type = fib_node_register_new_type(&kp_fib_node_vft);
953 //Init POD reference counters
954 vlib_refcount_init(&kpm->pod_refcount);
956 //Allocate and init default POD.
958 pool_get(kpm->pods, default_pod);
959 default_pod->flags = 0;
960 default_pod->dpo.dpoi_next_node = KP_NEXT_DROP;
961 default_pod->vip_index = ~0;
962 default_pod->address.ip6.as_u64[0] = 0xffffffffffffffffL;
963 default_pod->address.ip6.as_u64[1] = 0xffffffffffffffffL;
966 = hash_create_mem (0, sizeof(u16), sizeof (uword));
968 clib_bihash_init_8_8 (&kpm->mapping_by_pod,
969 "mapping_by_pod", KP_MAPPING_BUCKETS,
970 KP_MAPPING_MEMORY_SIZE);
972 #define _(a,b,c) kpm->vip_counters[c].name = b;
973 kp_foreach_vip_counter
978 VLIB_INIT_FUNCTION (kp_init);