2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
18 #include <vnet/gre/packet.h>
19 #include <lb/lbhash.h>
21 #define foreach_lb_error \
23 _(PROTO_NOT_SUPPORTED, "protocol not supported") \
24 _(NO_SERVER, "no configured application server")
27 #define _(sym,str) LB_ERROR_##sym,
33 static char *lb_error_strings[] = {
34 #define _(sym,string) string,
45 format_lb_trace (u8 * s, va_list * args)
47 lb_main_t *lbm = &lb_main;
48 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
49 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
50 lb_trace_t *t = va_arg (*args, lb_trace_t *);
51 if (pool_is_free_index(lbm->vips, t->vip_index)) {
52 s = format(s, "lb vip[%d]: This VIP was freed since capture\n");
54 s = format(s, "lb vip[%d]: %U\n", t->vip_index, format_lb_vip, &lbm->vips[t->vip_index]);
56 if (pool_is_free_index(lbm->ass, t->as_index)) {
57 s = format(s, "lb as[%d]: This AS was freed since capture\n");
59 s = format(s, "lb as[%d]: %U\n", t->as_index, format_lb_as, &lbm->ass[t->as_index]);
64 lb_hash_t *lb_get_sticky_table(u32 cpu_index)
66 lb_main_t *lbm = &lb_main;
67 lb_hash_t *sticky_ht = lbm->per_cpu[cpu_index].sticky_ht;
68 //Check if size changed
69 if (PREDICT_FALSE(sticky_ht && (lbm->per_cpu_sticky_buckets != lb_hash_nbuckets(sticky_ht)))) {
71 //Dereference everything in there
73 lb_hash_foreach_entry(sticky_ht, e) {
74 vlib_refcount_add(&lbm->as_refcount, cpu_index, e->value, -1);
75 vlib_refcount_add(&lbm->as_refcount, cpu_index, 0, -1);
78 lb_hash_free(sticky_ht);
83 if (PREDICT_FALSE(sticky_ht == NULL)) {
84 lbm->per_cpu[cpu_index].sticky_ht = lb_hash_alloc(lbm->per_cpu_sticky_buckets, lbm->flow_timeout);
85 sticky_ht = lbm->per_cpu[cpu_index].sticky_ht;
86 clib_warning("Regenerated sticky table %p", sticky_ht);
92 sticky_ht->timeout = lbm->flow_timeout;
96 static_always_inline uword
97 lb_node_fn (vlib_main_t * vm,
98 vlib_node_runtime_t * node, vlib_frame_t * frame,
99 u8 is_input_v4, //Compile-time parameter stating that is input is v4 (or v6)
100 u8 is_encap_v4) //Compile-time parameter stating that is GRE encap is v4 (or v6)
102 lb_main_t *lbm = &lb_main;
103 vlib_node_runtime_t *error_node = node;
104 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
105 u32 cpu_index = os_get_cpu_number();
106 u32 lb_time = lb_hash_time_now(vm);
108 lb_hash_t *sticky_ht = lb_get_sticky_table(cpu_index);
109 from = vlib_frame_vector_args (frame);
110 n_left_from = frame->n_vectors;
111 next_index = node->cached_next_index;
113 while (n_left_from > 0)
115 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
116 while (n_left_from > 0 && n_left_to_next > 0)
124 u32 value0, available_index0, hash0;
126 lb_error_t error0 = LB_ERROR_NONE;
128 if (PREDICT_TRUE(n_left_from > 1))
131 p2 = vlib_get_buffer(vm, from[1]);
132 vlib_prefetch_buffer_header(p2, STORE);
133 /* IPv4 + 8 = 28. possibly plus -40 */
134 CLIB_PREFETCH (vlib_buffer_get_current(p2) - 40, 128, STORE);
137 pi0 = to_next[0] = from[0];
143 p0 = vlib_get_buffer (vm, pi0);
144 vip0 = pool_elt_at_index (lbm->vips,
145 vnet_buffer (p0)->ip.adj_index[VLIB_TX]);
149 ip40 = vlib_buffer_get_current (p0);
150 len0 = clib_net_to_host_u16(ip40->length);
151 key0[0] = (u64) ip40->src_address.as_u32;
152 key0[1] = (u64) ip40->dst_address.as_u32;
155 key0[4] = ((u64)((udp_header_t *)(ip40 + 1))->src_port << 32) |
156 ((u64)((udp_header_t *)(ip40 + 1))->dst_port << 16);
158 hash0 = lb_hash_hash(key0);
161 ip60 = vlib_buffer_get_current (p0);
162 len0 = clib_net_to_host_u16(ip60->payload_length) + sizeof(ip6_header_t);
163 key0[0] = ip60->src_address.as_u64[0];
164 key0[1] = ip60->src_address.as_u64[1];
165 key0[2] = ip60->dst_address.as_u64[0];
166 key0[3] = ip60->dst_address.as_u64[1];
167 key0[4] = ((u64)((udp_header_t *)(ip60 + 1))->src_port << 32) |
168 ((u64)((udp_header_t *)(ip60 + 1))->dst_port << 16);
170 hash0 = lb_hash_hash(key0);
173 //NOTE: This is an ugly trick to not include the VIP index in the hash calculation
174 //but actually use it in the key determination.
175 key0[4] |= ((vip0 - lbm->vips));
177 lb_hash_get(sticky_ht, key0, hash0, lb_time, &available_index0, &value0);
178 if (PREDICT_TRUE(value0 != ~0)) {
179 //Found an existing entry
180 as0 = &lbm->ass[value0];
181 } else if (PREDICT_TRUE(available_index0 != ~0)) {
182 //There is an available slot for a new flow
183 as0 = &lbm->ass[vip0->new_flow_table[hash0 & vip0->new_flow_table_mask].as_index];
184 if (PREDICT_FALSE(as0 == lbm->ass)) { //Special first element
185 error0 = LB_ERROR_NO_SERVER;
187 vlib_increment_simple_counter(&lbm->vip_counters[LB_VIP_COUNTER_TRACKED_SESSION],
188 cpu_index, vip0 - lbm->vips, 1);
191 //TODO: There are race conditions with as0 and vip0 manipulation.
192 //Configuration may be changed, vectors resized, etc...
194 //Dereference previously used
195 vlib_refcount_add(&lbm->as_refcount, cpu_index, lb_hash_available_value(sticky_ht, available_index0), -1);
196 vlib_refcount_add(&lbm->as_refcount, cpu_index, as0 - lbm->ass, 1);
199 //Note that when there is no AS configured, an entry is configured anyway.
200 //But no configured AS is not something that should happen
201 lb_hash_put(sticky_ht, key0, as0 - lbm->ass, available_index0, lb_time);
203 //Could not store new entry in the table
204 as0 = &lbm->ass[vip0->new_flow_table[hash0 & vip0->new_flow_table_mask].as_index];
205 vlib_increment_simple_counter(&lbm->vip_counters[LB_VIP_COUNTER_UNTRACKED_PACKET],
206 cpu_index, vip0 - lbm->vips, 1);
212 vlib_buffer_advance(p0, - sizeof(ip4_header_t) - sizeof(gre_header_t));
213 ip40 = vlib_buffer_get_current(p0);
214 gre0 = (gre_header_t *)(ip40 + 1);
215 ip40->src_address = lbm->ip4_src_address;
216 ip40->dst_address = as0->address.ip4;
217 ip40->ip_version_and_header_length = 0x45;
219 ip40->length = clib_host_to_net_u16(len0 + sizeof(gre_header_t) + sizeof(ip4_header_t));
220 ip40->protocol = IP_PROTOCOL_GRE;
221 ip40->checksum = ip4_header_checksum (ip40);
224 vlib_buffer_advance(p0, - sizeof(ip6_header_t) - sizeof(gre_header_t));
225 ip60 = vlib_buffer_get_current(p0);
226 gre0 = (gre_header_t *)(ip60 + 1);
227 ip60->dst_address = as0->address.ip6;
228 ip60->src_address = lbm->ip6_src_address;
229 ip60->hop_limit = 128;
230 ip60->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32 (0x6<<28);
231 ip60->payload_length = clib_host_to_net_u16(len0 + sizeof(gre_header_t));
232 ip60->protocol = IP_PROTOCOL_GRE;
235 gre0->flags_and_version = 0;
236 gre0->protocol = (is_input_v4)?
237 clib_host_to_net_u16(0x0800):
238 clib_host_to_net_u16(0x86DD);
240 vnet_buffer (p0)->ip.adj_index[VLIB_TX] = as0->dpo.dpoi_index;
242 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
244 lb_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
245 tr->as_index = as0 - lbm->ass;
246 tr->vip_index = vip0 - lbm->vips;
249 p0->error = error_node->errors[error0];
250 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
252 as0->dpo.dpoi_next_node);
254 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
257 return frame->n_vectors;
261 lb6_gre6_node_fn (vlib_main_t * vm,
262 vlib_node_runtime_t * node, vlib_frame_t * frame)
264 return lb_node_fn(vm, node, frame, 0, 0);
268 lb6_gre4_node_fn (vlib_main_t * vm,
269 vlib_node_runtime_t * node, vlib_frame_t * frame)
271 return lb_node_fn(vm, node, frame, 0, 1);
275 lb4_gre6_node_fn (vlib_main_t * vm,
276 vlib_node_runtime_t * node, vlib_frame_t * frame)
278 return lb_node_fn(vm, node, frame, 1, 0);
282 lb4_gre4_node_fn (vlib_main_t * vm,
283 vlib_node_runtime_t * node, vlib_frame_t * frame)
285 return lb_node_fn(vm, node, frame, 1, 1);
288 VLIB_REGISTER_NODE (lb6_gre6_node) =
290 .function = lb6_gre6_node_fn,
292 .vector_size = sizeof (u32),
293 .format_trace = format_lb_trace,
295 .n_errors = LB_N_ERROR,
296 .error_strings = lb_error_strings,
298 .n_next_nodes = LB_N_NEXT,
301 [LB_NEXT_DROP] = "error-drop"
305 VLIB_REGISTER_NODE (lb6_gre4_node) =
307 .function = lb6_gre4_node_fn,
309 .vector_size = sizeof (u32),
310 .format_trace = format_lb_trace,
312 .n_errors = LB_N_ERROR,
313 .error_strings = lb_error_strings,
315 .n_next_nodes = LB_N_NEXT,
318 [LB_NEXT_DROP] = "error-drop"
322 VLIB_REGISTER_NODE (lb4_gre6_node) =
324 .function = lb4_gre6_node_fn,
326 .vector_size = sizeof (u32),
327 .format_trace = format_lb_trace,
329 .n_errors = LB_N_ERROR,
330 .error_strings = lb_error_strings,
332 .n_next_nodes = LB_N_NEXT,
335 [LB_NEXT_DROP] = "error-drop"
339 VLIB_REGISTER_NODE (lb4_gre4_node) =
341 .function = lb4_gre4_node_fn,
343 .vector_size = sizeof (u32),
344 .format_trace = format_lb_trace,
346 .n_errors = LB_N_ERROR,
347 .error_strings = lb_error_strings,
349 .n_next_nodes = LB_N_NEXT,
352 [LB_NEXT_DROP] = "error-drop"