2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include "../ip/ip_frag.h"
19 #define IP6_MAP_T_DUAL_LOOP
22 IP6_MAPT_NEXT_MAPT_TCP_UDP,
23 IP6_MAPT_NEXT_MAPT_ICMP,
24 IP6_MAPT_NEXT_MAPT_FRAGMENTED,
30 IP6_MAPT_ICMP_NEXT_IP4_LOOKUP,
31 IP6_MAPT_ICMP_NEXT_IP4_FRAG,
32 IP6_MAPT_ICMP_NEXT_DROP,
34 } ip6_mapt_icmp_next_t;
37 IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP,
38 IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG,
39 IP6_MAPT_TCP_UDP_NEXT_DROP,
40 IP6_MAPT_TCP_UDP_N_NEXT
41 } ip6_mapt_tcp_udp_next_t;
44 IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP,
45 IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG,
46 IP6_MAPT_FRAGMENTED_NEXT_DROP,
47 IP6_MAPT_FRAGMENTED_N_NEXT
48 } ip6_mapt_fragmented_next_t;
50 static_always_inline int
51 ip6_map_fragment_cache (ip6_header_t *ip6, ip6_frag_hdr_t *frag, map_domain_t *d, u16 port)
55 map_ip4_reass_t *r = map_ip4_reass_get(map_get_ip4(&ip6->src_address), ip6_map_t_embedded_address(d, &ip6->dst_address),
56 frag_id_6to4(frag->identification),
57 (ip6->protocol == IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : ip6->protocol,
62 map_ip4_reass_unlock();
66 /* Returns the associated port or -1 */
67 static_always_inline i32
68 ip6_map_fragment_get(ip6_header_t *ip6, ip6_frag_hdr_t *frag, map_domain_t *d)
72 map_ip4_reass_t *r = map_ip4_reass_get(map_get_ip4(&ip6->src_address), ip6_map_t_embedded_address(d, &ip6->dst_address),
73 frag_id_6to4(frag->identification),
74 (ip6->protocol == IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : ip6->protocol,
76 i32 ret = r?r->port:-1;
77 map_ip4_reass_unlock();
81 static_always_inline u8
82 ip6_translate_tos(const ip6_header_t *ip6)
84 #ifdef IP6_MAP_T_OVERRIDE_TOS
85 return IP6_MAP_T_OVERRIDE_TOS;
87 return (clib_net_to_host_u32(ip6->ip_version_traffic_class_and_flow_label) & 0x0ff00000) >> 20;
91 //TODO: Find right place in memory for that
92 static u8 icmp6_to_icmp_updater_pointer_table[] =
105 static_always_inline int
106 ip6_icmp_to_icmp6_in_place (icmp46_header_t *icmp, u32 icmp_len,
107 i32 *sender_port, ip6_header_t **inner_ip6)
110 switch (icmp->type) {
111 case ICMP6_echo_request:
112 *sender_port = ((u16 *)icmp)[2];
113 icmp->type = ICMP4_echo_request;
115 case ICMP6_echo_reply:
116 *sender_port = ((u16 *)icmp)[2];
117 icmp->type = ICMP4_echo_reply;
119 case ICMP6_destination_unreachable:
120 *inner_ip6 = (ip6_header_t *) u8_ptr_add(icmp, 8);
121 *sender_port = ip6_get_port(*inner_ip6, MAP_RECEIVER, icmp_len);
123 switch (icmp->code) {
124 case ICMP6_destination_unreachable_no_route_to_destination: //0
125 case ICMP6_destination_unreachable_beyond_scope_of_source_address: //2
126 case ICMP6_destination_unreachable_address_unreachable: //3
127 icmp->type = ICMP4_destination_unreachable;
128 icmp->code = ICMP4_destination_unreachable_destination_unreachable_host;
130 case ICMP6_destination_unreachable_destination_administratively_prohibited: //1
131 icmp->type = ICMP4_destination_unreachable;
132 icmp->code = ICMP4_destination_unreachable_communication_administratively_prohibited;
134 case ICMP6_destination_unreachable_port_unreachable:
135 icmp->type = ICMP4_destination_unreachable;
136 icmp->code = ICMP4_destination_unreachable_port_unreachable;
142 case ICMP6_packet_too_big:
143 *inner_ip6 = (ip6_header_t *) u8_ptr_add(icmp, 8);
144 *sender_port = ip6_get_port(*inner_ip6, MAP_RECEIVER, icmp_len);
146 icmp->type = ICMP4_destination_unreachable;
149 u32 advertised_mtu = clib_net_to_host_u32(*((u32 *)(icmp + 1)));
150 advertised_mtu -= 20;
151 //FIXME: = minimum(advertised MTU-20, MTU_of_IPv4_nexthop, (MTU_of_IPv6_nexthop)-20)
152 ((u16 *)(icmp))[3] = clib_host_to_net_u16(advertised_mtu);
156 case ICMP6_time_exceeded:
157 *inner_ip6 = (ip6_header_t *) u8_ptr_add(icmp, 8);
158 *sender_port = ip6_get_port(*inner_ip6, MAP_RECEIVER, icmp_len);
160 icmp->type = ICMP4_time_exceeded;
163 case ICMP6_parameter_problem:
164 *inner_ip6 = (ip6_header_t *) u8_ptr_add(icmp, 8);
165 *sender_port = ip6_get_port(*inner_ip6, MAP_RECEIVER, icmp_len);
167 switch (icmp->code) {
168 case ICMP6_parameter_problem_erroneous_header_field:
169 icmp->type = ICMP4_parameter_problem;
170 icmp->code = ICMP4_parameter_problem_pointer_indicates_error;
171 u32 pointer = clib_net_to_host_u32(*((u32*)(icmp + 1)));
175 ((u8*)(icmp + 1))[0] = icmp6_to_icmp_updater_pointer_table[pointer];
177 case ICMP6_parameter_problem_unrecognized_next_header:
178 icmp->type = ICMP4_destination_unreachable;
179 icmp->code = ICMP4_destination_unreachable_port_unreachable;
181 case ICMP6_parameter_problem_unrecognized_option:
193 static_always_inline void
194 _ip6_map_t_icmp (map_domain_t *d, vlib_buffer_t *p, u8 *error)
196 ip6_header_t *ip6, *inner_ip6;
197 ip4_header_t *ip4, *inner_ip4;
199 icmp46_header_t *icmp;
202 u32 ip4_sadr, inner_ip4_dadr;
204 ip6 = vlib_buffer_get_current(p);
205 ip6_pay_len = clib_net_to_host_u16(ip6->payload_length);
206 icmp = (icmp46_header_t *)(ip6 + 1);
207 ASSERT(ip6_pay_len + sizeof(*ip6) <= p->current_length);
209 if (ip6->protocol != IP_PROTOCOL_ICMP6) {
210 //No extensions headers allowed here
212 *error = MAP_ERROR_MALFORMED;
216 //There are no fragmented ICMP messages, so no extension header for now
218 if (ip6_icmp_to_icmp6_in_place(icmp, ip6_pay_len, &sender_port, &inner_ip6)) {
219 //TODO: In case of 1:1 mapping it is not necessary to have the sender port
220 *error = MAP_ERROR_ICMP;
224 if (sender_port < 0) {
225 // In case of 1:1 mapping, we don't care about the port
226 if(d->ea_bits_len == 0 && d->rules) {
229 *error = MAP_ERROR_ICMP;
235 //Note that this prevents an intermediate IPv6 router from answering the request
236 ip4_sadr = map_get_ip4(&ip6->src_address);
237 if (ip6->src_address.as_u64[0] != map_get_pfx_net(d, ip4_sadr, sender_port) ||
238 ip6->src_address.as_u64[1] != map_get_sfx_net(d, ip4_sadr, sender_port)) {
239 *error = MAP_ERROR_SEC_CHECK;
244 u16 *inner_L4_checksum, inner_l4_offset, inner_frag_offset, inner_frag_id;
245 u8 *inner_l4, inner_protocol;
247 //We have two headers to translate
249 // [ IPv6 ]<- ext ->[IC][ IPv6 ]<- ext ->[L4 header ...
251 // [ IPv6 ][IC][ IPv6 ][L4 header ...
252 // [ IPv6 ][IC][ IPv6 ][Fr][L4 header ...
254 // [ IPv4][IC][ IPv4][L4 header ...
256 //TODO: This was already done deep in ip6_icmp_to_icmp6_in_place
257 //We shouldn't have to do it again
258 if (ip6_parse(inner_ip6, ip6_pay_len - 8,
259 &inner_protocol, &inner_l4_offset, &inner_frag_offset)) {
260 *error = MAP_ERROR_MALFORMED;
264 inner_l4 = u8_ptr_add(inner_ip6, inner_l4_offset);
265 inner_ip4 = (ip4_header_t *) u8_ptr_add(inner_l4, - sizeof(*inner_ip4));
266 if (inner_frag_offset) {
267 ip6_frag_hdr_t *inner_frag = (ip6_frag_hdr_t *) u8_ptr_add(inner_ip6, inner_frag_offset);
268 inner_frag_id = frag_id_6to4(inner_frag->identification);
273 //Do the translation of the inner packet
274 if (inner_protocol == IP_PROTOCOL_TCP) {
275 inner_L4_checksum = (u16 *) u8_ptr_add(inner_l4, 16);
276 } else if (inner_protocol == IP_PROTOCOL_UDP) {
277 inner_L4_checksum = (u16 *) u8_ptr_add(inner_l4, 6);
278 } else if (inner_protocol == IP_PROTOCOL_ICMP6) {
279 icmp46_header_t *inner_icmp = (icmp46_header_t *) inner_l4;
280 csum = inner_icmp->checksum;
281 csum = ip_csum_sub_even(csum, *((u16 *)inner_icmp));
282 //It cannot be of a different type as ip6_icmp_to_icmp6_in_place succeeded
283 inner_icmp->type = (inner_icmp->type == ICMP6_echo_request) ?
284 ICMP4_echo_request : ICMP4_echo_reply;
285 csum = ip_csum_add_even(csum, *((u16 *)inner_icmp));
286 inner_icmp->checksum = ip_csum_fold(csum);
287 inner_protocol = IP_PROTOCOL_ICMP; //Will be copied to ip6 later
288 inner_L4_checksum = &inner_icmp->checksum;
290 *error = MAP_ERROR_BAD_PROTOCOL;
294 csum = *inner_L4_checksum;
295 csum = ip_csum_sub_even(csum, inner_ip6->src_address.as_u64[0]);
296 csum = ip_csum_sub_even(csum, inner_ip6->src_address.as_u64[1]);
297 csum = ip_csum_sub_even(csum, inner_ip6->dst_address.as_u64[0]);
298 csum = ip_csum_sub_even(csum, inner_ip6->dst_address.as_u64[1]);
300 //Sanity check of the outer destination address
301 if (ip6->dst_address.as_u64[0] != inner_ip6->src_address.as_u64[0] &&
302 ip6->dst_address.as_u64[1] != inner_ip6->src_address.as_u64[1]) {
303 *error = MAP_ERROR_SEC_CHECK;
307 //Security check of inner packet
308 inner_ip4_dadr = map_get_ip4(&inner_ip6->dst_address);
309 if (inner_ip6->dst_address.as_u64[0] != map_get_pfx_net(d, inner_ip4_dadr, sender_port) ||
310 inner_ip6->dst_address.as_u64[1] != map_get_sfx_net(d, inner_ip4_dadr, sender_port)) {
311 *error = MAP_ERROR_SEC_CHECK;
315 inner_ip4->dst_address.as_u32 = inner_ip4_dadr;
316 inner_ip4->src_address.as_u32 = ip6_map_t_embedded_address(d, &inner_ip6->src_address);
317 inner_ip4->ip_version_and_header_length = IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
318 inner_ip4->tos = ip6_translate_tos(inner_ip6);
319 inner_ip4->length = u16_net_add(inner_ip6->payload_length, sizeof(*ip4) + sizeof(*ip6) -
321 inner_ip4->fragment_id = inner_frag_id;
322 inner_ip4->flags_and_fragment_offset = clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS);
323 inner_ip4->ttl = inner_ip6->hop_limit;
324 inner_ip4->protocol = inner_protocol;
325 inner_ip4->checksum = ip4_header_checksum(inner_ip4);
327 if (inner_ip4->protocol == IP_PROTOCOL_ICMP) {
328 //Remove remainings of the pseudo-header in the csum
329 csum = ip_csum_sub_even(csum, clib_host_to_net_u16(IP_PROTOCOL_ICMP6));
330 csum = ip_csum_sub_even(csum, inner_ip4->length - sizeof(*inner_ip4));
332 //Update to new pseudo-header
333 csum = ip_csum_add_even(csum, inner_ip4->src_address.as_u32);
334 csum = ip_csum_add_even(csum, inner_ip4->dst_address.as_u32);
336 *inner_L4_checksum = ip_csum_fold(csum);
338 //Move up icmp header
339 ip4 = (ip4_header_t *) u8_ptr_add(inner_l4, - 2 * sizeof(*ip4) - 8);
340 memcpy(u8_ptr_add(inner_l4, - sizeof(*ip4) - 8), icmp, 8);
341 icmp = (icmp46_header_t *) u8_ptr_add(inner_l4, - sizeof(*ip4) - 8);
343 //Only one header to translate
344 ip4 = (ip4_header_t *) u8_ptr_add(ip6, sizeof(*ip6) - sizeof(*ip4));
346 vlib_buffer_advance(p, (u32) (((u8 *)ip4) - ((u8 *)ip6)));
348 ip4->dst_address.as_u32 = ip6_map_t_embedded_address(d, &ip6->dst_address);
349 ip4->src_address.as_u32 = ip4_sadr;
350 ip4->ip_version_and_header_length = IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
351 ip4->tos = ip6_translate_tos(ip6);
352 ip4->fragment_id = 0;
353 ip4->flags_and_fragment_offset = 0;
354 ip4->ttl = ip6->hop_limit;
355 ip4->protocol = IP_PROTOCOL_ICMP;
356 //TODO fix the length depending on offset length
357 ip4->length = u16_net_add(ip6->payload_length,
358 (inner_ip6 == NULL)?sizeof(*ip4):(2*sizeof(*ip4) - sizeof(*ip6)));
359 ip4->checksum = ip4_header_checksum(ip4);
361 //TODO: We could do an easy diff-checksum for echo requests/replies
362 //Recompute ICMP checksum
364 csum = ip_incremental_checksum(0, icmp, clib_net_to_host_u16(ip4->length) - sizeof(*ip4));
365 icmp->checksum = ~ip_csum_fold (csum);
369 ip6_map_t_icmp (vlib_main_t *vm,
370 vlib_node_runtime_t *node,
373 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
374 vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, ip6_map_t_icmp_node.index);
375 from = vlib_frame_vector_args (frame);
376 n_left_from = frame->n_vectors;
377 next_index = node->cached_next_index;
378 vlib_combined_counter_main_t *cm = map_main.domain_counters;
379 u32 cpu_index = os_get_cpu_number();
381 while (n_left_from > 0) {
382 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
384 while (n_left_from > 0 && n_left_to_next > 0) {
388 ip6_mapt_icmp_next_t next0;
392 pi0 = to_next[0] = from[0];
397 error0 = MAP_ERROR_NONE;
398 next0 = IP6_MAPT_ICMP_NEXT_IP4_LOOKUP;
400 p0 = vlib_get_buffer(vm, pi0);
401 len0 = clib_net_to_host_u16(((ip6_header_t *)vlib_buffer_get_current(p0))->payload_length);
402 d0 = pool_elt_at_index(map_main.domains, vnet_buffer(p0)->map_t.map_domain_index);
403 _ip6_map_t_icmp(d0, p0, &error0);
405 if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
406 //Send to fragmentation node if necessary
407 vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
408 vnet_buffer(p0)->ip_frag.header_offset = 0;
409 vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
410 next0 = IP6_MAPT_ICMP_NEXT_IP4_FRAG;
413 if (PREDICT_TRUE(error0 == MAP_ERROR_NONE)) {
414 vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_RX, cpu_index,
415 vnet_buffer(p0)->map_t.map_domain_index, 1,
418 next0 = IP6_MAPT_ICMP_NEXT_DROP;
421 p0->error = error_node->errors[error0];
422 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
423 to_next, n_left_to_next, pi0,
426 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
428 return frame->n_vectors;
432 ip6_map_t_fragmented (vlib_main_t *vm,
433 vlib_node_runtime_t *node,
436 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
437 from = vlib_frame_vector_args(frame);
438 n_left_from = frame->n_vectors;
439 next_index = node->cached_next_index;
441 while (n_left_from > 0) {
442 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
444 #ifdef IP6_MAP_T_DUAL_LOOP
445 while(n_left_from >= 4 && n_left_to_next >= 2) {
447 vlib_buffer_t *p0, *p1;
448 ip6_header_t *ip60, *ip61;
449 ip6_frag_hdr_t *frag0, *frag1;
450 ip4_header_t *ip40, *ip41;
451 u16 frag_id0, frag_offset0,
452 frag_id1, frag_offset1;
453 u8 frag_more0, frag_more1;
456 pi0 = to_next[0] = from[0];
457 pi1 = to_next[1] = from[1];
463 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
464 next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
465 p0 = vlib_get_buffer(vm, pi0);
466 p1 = vlib_get_buffer(vm, pi1);
467 ip60 = vlib_buffer_get_current(p0);
468 ip61 = vlib_buffer_get_current(p1);
469 frag0 = (ip6_frag_hdr_t *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset);
470 frag1 = (ip6_frag_hdr_t *)u8_ptr_add(ip61, vnet_buffer(p1)->map_t.v6.frag_offset);
471 ip40 = (ip4_header_t *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
472 ip41 = (ip4_header_t *)u8_ptr_add(ip61, vnet_buffer(p1)->map_t.v6.l4_offset - sizeof(*ip40));
473 vlib_buffer_advance(p0, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
474 vlib_buffer_advance(p1, vnet_buffer(p1)->map_t.v6.l4_offset - sizeof(*ip40));
476 frag_id0 = frag_id_6to4(frag0->identification);
477 frag_id1 = frag_id_6to4(frag1->identification);
478 frag_more0 = ip6_frag_hdr_more(frag0);
479 frag_more1 = ip6_frag_hdr_more(frag1);
480 frag_offset0 = ip6_frag_hdr_offset(frag0);
481 frag_offset1 = ip6_frag_hdr_offset(frag1);
483 ip40->dst_address.as_u32 = vnet_buffer(p0)->map_t.v6.daddr;
484 ip41->dst_address.as_u32 = vnet_buffer(p1)->map_t.v6.daddr;
485 ip40->src_address.as_u32 = vnet_buffer(p0)->map_t.v6.saddr;
486 ip41->src_address.as_u32 = vnet_buffer(p1)->map_t.v6.saddr;
487 ip40->ip_version_and_header_length = IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
488 ip41->ip_version_and_header_length = IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
489 ip40->tos = ip6_translate_tos(ip60);
490 ip41->tos = ip6_translate_tos(ip61);
491 ip40->length = u16_net_add(ip60->payload_length,
492 sizeof(*ip40) - vnet_buffer(p0)->map_t.v6.l4_offset + sizeof(*ip60));
493 ip41->length = u16_net_add(ip61->payload_length,
494 sizeof(*ip40) - vnet_buffer(p1)->map_t.v6.l4_offset + sizeof(*ip60));
495 ip40->fragment_id = frag_id0;
496 ip41->fragment_id = frag_id1;
497 ip40->flags_and_fragment_offset =
498 clib_host_to_net_u16(frag_offset0 | (frag_more0?IP4_HEADER_FLAG_MORE_FRAGMENTS:0));
499 ip41->flags_and_fragment_offset =
500 clib_host_to_net_u16(frag_offset1 | (frag_more1?IP4_HEADER_FLAG_MORE_FRAGMENTS:0));
501 ip40->ttl = ip60->hop_limit;
502 ip41->ttl = ip61->hop_limit;
503 ip40->protocol = (vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)?
504 IP_PROTOCOL_ICMP:vnet_buffer(p0)->map_t.v6.l4_protocol;
505 ip41->protocol = (vnet_buffer(p1)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)?
506 IP_PROTOCOL_ICMP:vnet_buffer(p1)->map_t.v6.l4_protocol;
507 ip40->checksum = ip4_header_checksum(ip40);
508 ip41->checksum = ip4_header_checksum(ip41);
510 if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
511 vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
512 vnet_buffer(p0)->ip_frag.header_offset = 0;
513 vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
514 next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
517 if(vnet_buffer(p1)->map_t.mtu < p1->current_length) {
518 vnet_buffer(p1)->ip_frag.mtu = vnet_buffer(p1)->map_t.mtu;
519 vnet_buffer(p1)->ip_frag.header_offset = 0;
520 vnet_buffer(p1)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
521 next1 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
524 vlib_validate_buffer_enqueue_x2(vm, node, next_index,
525 to_next, n_left_to_next, pi0, pi1,
530 while (n_left_from > 0 && n_left_to_next > 0) {
534 ip6_frag_hdr_t *frag0;
541 pi0 = to_next[0] = from[0];
547 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
548 p0 = vlib_get_buffer(vm, pi0);
549 ip60 = vlib_buffer_get_current(p0);
550 frag0 = (ip6_frag_hdr_t *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset);
551 ip40 = (ip4_header_t *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
552 vlib_buffer_advance(p0, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
554 frag_id0 = frag_id_6to4(frag0->identification);
555 frag_more0 = ip6_frag_hdr_more(frag0);
556 frag_offset0 = ip6_frag_hdr_offset(frag0);
558 ip40->dst_address.as_u32 = vnet_buffer(p0)->map_t.v6.daddr;
559 ip40->src_address.as_u32 = vnet_buffer(p0)->map_t.v6.saddr;
560 ip40->ip_version_and_header_length = IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
561 ip40->tos = ip6_translate_tos(ip60);
562 ip40->length = u16_net_add(ip60->payload_length,
563 sizeof(*ip40) - vnet_buffer(p0)->map_t.v6.l4_offset + sizeof(*ip60));
564 ip40->fragment_id = frag_id0;
565 ip40->flags_and_fragment_offset =
566 clib_host_to_net_u16(frag_offset0 | (frag_more0?IP4_HEADER_FLAG_MORE_FRAGMENTS:0));
567 ip40->ttl = ip60->hop_limit;
568 ip40->protocol = (vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)?
569 IP_PROTOCOL_ICMP:vnet_buffer(p0)->map_t.v6.l4_protocol;
570 ip40->checksum = ip4_header_checksum(ip40);
572 if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
573 //Send to fragmentation node if necessary
574 vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
575 vnet_buffer(p0)->ip_frag.header_offset = 0;
576 vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
577 next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
580 vlib_validate_buffer_enqueue_x1(vm, node, next_index,
581 to_next, n_left_to_next, pi0,
584 vlib_put_next_frame(vm, node, next_index, n_left_to_next);
586 return frame->n_vectors;
590 ip6_map_t_tcp_udp (vlib_main_t *vm,
591 vlib_node_runtime_t *node,
594 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
595 from = vlib_frame_vector_args(frame);
596 n_left_from = frame->n_vectors;
597 next_index = node->cached_next_index;
598 while (n_left_from > 0) {
599 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
601 #ifdef IP6_MAP_T_DUAL_LOOP
602 while(n_left_from >= 4 && n_left_to_next >= 2) {
604 vlib_buffer_t *p0, *p1;
605 ip6_header_t *ip60, *ip61;
606 ip_csum_t csum0, csum1;
607 ip4_header_t *ip40, *ip41;
608 u16 fragment_id0, flags0, *checksum0,
609 fragment_id1, flags1, *checksum1;
610 ip6_mapt_tcp_udp_next_t next0, next1;
612 pi0 = to_next[0] = from[0];
613 pi1 = to_next[1] = from[1];
618 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
619 next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
621 p0 = vlib_get_buffer(vm, pi0);
622 p1 = vlib_get_buffer(vm, pi1);
623 ip60 = vlib_buffer_get_current(p0);
624 ip61 = vlib_buffer_get_current(p1);
625 ip40 = (ip4_header_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
626 ip41 = (ip4_header_t *) u8_ptr_add(ip61, vnet_buffer(p1)->map_t.v6.l4_offset - sizeof(*ip40));
627 vlib_buffer_advance(p0, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
628 vlib_buffer_advance(p1, vnet_buffer(p1)->map_t.v6.l4_offset - sizeof(*ip40));
629 checksum0 = (u16 *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.checksum_offset);
630 checksum1 = (u16 *) u8_ptr_add(ip61, vnet_buffer(p1)->map_t.checksum_offset);
632 csum0 = ip_csum_sub_even(*checksum0, ip60->src_address.as_u64[0]);
633 csum1 = ip_csum_sub_even(*checksum1, ip61->src_address.as_u64[0]);
634 csum0 = ip_csum_sub_even(csum0, ip60->src_address.as_u64[1]);
635 csum1 = ip_csum_sub_even(csum1, ip61->src_address.as_u64[1]);
636 csum0 = ip_csum_sub_even(csum0, ip60->dst_address.as_u64[0]);
637 csum1 = ip_csum_sub_even(csum0, ip61->dst_address.as_u64[0]);
638 csum0 = ip_csum_sub_even(csum0, ip60->dst_address.as_u64[1]);
639 csum1 = ip_csum_sub_even(csum1, ip61->dst_address.as_u64[1]);
640 csum0 = ip_csum_add_even(csum0, vnet_buffer(p0)->map_t.v6.daddr);
641 csum1 = ip_csum_add_even(csum1, vnet_buffer(p1)->map_t.v6.daddr);
642 csum0 = ip_csum_add_even(csum0, vnet_buffer(p0)->map_t.v6.saddr);
643 csum1 = ip_csum_add_even(csum1, vnet_buffer(p1)->map_t.v6.saddr);
644 *checksum0 = ip_csum_fold(csum0);
645 *checksum1 = ip_csum_fold(csum1);
647 if (PREDICT_FALSE(vnet_buffer(p0)->map_t.v6.frag_offset)) {
648 ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset);
649 fragment_id0 = frag_id_6to4(hdr->identification);
650 flags0 = clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS);
656 if (PREDICT_FALSE(vnet_buffer(p1)->map_t.v6.frag_offset)) {
657 ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add(ip61, vnet_buffer(p1)->map_t.v6.frag_offset);
658 fragment_id1 = frag_id_6to4(hdr->identification);
659 flags1 = clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS);
665 ip40->dst_address.as_u32 = vnet_buffer(p0)->map_t.v6.daddr;
666 ip41->dst_address.as_u32 = vnet_buffer(p1)->map_t.v6.daddr;
667 ip40->src_address.as_u32 = vnet_buffer(p0)->map_t.v6.saddr;
668 ip41->src_address.as_u32 = vnet_buffer(p1)->map_t.v6.saddr;
669 ip40->ip_version_and_header_length = IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
670 ip41->ip_version_and_header_length = IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
671 ip40->tos = ip6_translate_tos(ip60);
672 ip41->tos = ip6_translate_tos(ip61);
673 ip40->length = u16_net_add(ip60->payload_length,
674 sizeof(*ip40) + sizeof(*ip60) - vnet_buffer(p0)->map_t.v6.l4_offset);
675 ip41->length = u16_net_add(ip61->payload_length,
676 sizeof(*ip40) + sizeof(*ip60) - vnet_buffer(p1)->map_t.v6.l4_offset);
677 ip40->fragment_id = fragment_id0;
678 ip41->fragment_id = fragment_id1;
679 ip40->flags_and_fragment_offset = flags0;
680 ip41->flags_and_fragment_offset = flags1;
681 ip40->ttl = ip60->hop_limit;
682 ip41->ttl = ip61->hop_limit;
683 ip40->protocol = vnet_buffer(p0)->map_t.v6.l4_protocol;
684 ip41->protocol = vnet_buffer(p1)->map_t.v6.l4_protocol;
685 ip40->checksum = ip4_header_checksum(ip40);
686 ip41->checksum = ip4_header_checksum(ip41);
688 if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
689 vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
690 vnet_buffer(p0)->ip_frag.header_offset = 0;
691 vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
692 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
695 if(vnet_buffer(p1)->map_t.mtu < p1->current_length) {
696 vnet_buffer(p1)->ip_frag.mtu = vnet_buffer(p1)->map_t.mtu;
697 vnet_buffer(p1)->ip_frag.header_offset = 0;
698 vnet_buffer(p1)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
699 next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
702 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
703 n_left_to_next, pi0, pi1, next0, next1);
707 while (n_left_from > 0 && n_left_to_next > 0) {
716 ip6_mapt_tcp_udp_next_t next0;
718 pi0 = to_next[0] = from[0];
723 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
725 p0 = vlib_get_buffer(vm, pi0);
726 ip60 = vlib_buffer_get_current(p0);
727 ip40 = (ip4_header_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
728 vlib_buffer_advance(p0, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
729 checksum0 = (u16 *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.checksum_offset);
731 //TODO: This can probably be optimized
732 csum0 = ip_csum_sub_even(*checksum0, ip60->src_address.as_u64[0]);
733 csum0 = ip_csum_sub_even(csum0, ip60->src_address.as_u64[1]);
734 csum0 = ip_csum_sub_even(csum0, ip60->dst_address.as_u64[0]);
735 csum0 = ip_csum_sub_even(csum0, ip60->dst_address.as_u64[1]);
736 csum0 = ip_csum_add_even(csum0, vnet_buffer(p0)->map_t.v6.daddr);
737 csum0 = ip_csum_add_even(csum0, vnet_buffer(p0)->map_t.v6.saddr);
738 *checksum0 = ip_csum_fold(csum0);
740 if (PREDICT_FALSE(vnet_buffer(p0)->map_t.v6.frag_offset)) {
741 //Only the first fragment
742 ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset);
743 fragment_id0 = frag_id_6to4(hdr->identification);
744 flags0 = clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS);
750 ip40->dst_address.as_u32 = vnet_buffer(p0)->map_t.v6.daddr;
751 ip40->src_address.as_u32 = vnet_buffer(p0)->map_t.v6.saddr;
752 ip40->ip_version_and_header_length = IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
753 ip40->tos = ip6_translate_tos(ip60);
754 ip40->length = u16_net_add(ip60->payload_length,
755 sizeof(*ip40) + sizeof(*ip60) - vnet_buffer(p0)->map_t.v6.l4_offset);
756 ip40->fragment_id = fragment_id0;
757 ip40->flags_and_fragment_offset = flags0;
758 ip40->ttl = ip60->hop_limit;
759 ip40->protocol = vnet_buffer(p0)->map_t.v6.l4_protocol;
760 ip40->checksum = ip4_header_checksum(ip40);
762 if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
763 //Send to fragmentation node if necessary
764 vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
765 vnet_buffer(p0)->ip_frag.header_offset = 0;
766 vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
767 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
770 vlib_validate_buffer_enqueue_x1(vm, node, next_index,
771 to_next, n_left_to_next, pi0,
774 vlib_put_next_frame(vm, node, next_index, n_left_to_next);
776 return frame->n_vectors;
779 static_always_inline void
780 ip6_map_t_classify(vlib_buffer_t *p0, ip6_header_t *ip60,
781 map_domain_t *d0, i32 *src_port0,
782 u8 *error0, ip6_mapt_next_t *next0,
783 u32 l4_len0, ip6_frag_hdr_t *frag0)
785 if (PREDICT_FALSE(vnet_buffer(p0)->map_t.v6.frag_offset &&
786 ip6_frag_hdr_offset(frag0))) {
787 *next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
788 if(d0->ea_bits_len == 0 && d0->rules) {
791 *src_port0 = ip6_map_fragment_get(ip60, frag0, d0);
792 *error0 = (*src_port0 != -1) ? *error0 : MAP_ERROR_FRAGMENT_DROPPED;
794 } else if (PREDICT_TRUE(vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP)) {
795 *error0 = l4_len0 < sizeof(tcp_header_t) ? MAP_ERROR_MALFORMED : *error0;
796 vnet_buffer(p0)->map_t.checksum_offset = vnet_buffer(p0)->map_t.v6.l4_offset + 16;
797 *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
798 *src_port0 = (i32) *((u16*)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset));
799 } else if (PREDICT_TRUE(vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP)) {
800 *error0 = l4_len0 < sizeof(udp_header_t) ? MAP_ERROR_MALFORMED : *error0;
801 vnet_buffer(p0)->map_t.checksum_offset = vnet_buffer(p0)->map_t.v6.l4_offset + 6;
802 *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
803 *src_port0 = (i32) *((u16*)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset));
804 } else if (vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6) {
805 *error0 = l4_len0 < sizeof(icmp46_header_t) ? MAP_ERROR_MALFORMED : *error0;
806 *next0 = IP6_MAPT_NEXT_MAPT_ICMP;
807 if(d0->ea_bits_len == 0 && d0->rules) {
809 } else if (((icmp46_header_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset))->code == ICMP6_echo_reply ||
810 ((icmp46_header_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset))->code == ICMP6_echo_request) {
811 *src_port0 = (i32) *((u16 *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset + 6));
814 //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
815 *error0 = MAP_ERROR_BAD_PROTOCOL;
820 ip6_map_t (vlib_main_t *vm,
821 vlib_node_runtime_t *node,
824 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
825 vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip6_map_t_node.index);
826 vlib_combined_counter_main_t *cm = map_main.domain_counters;
827 u32 cpu_index = os_get_cpu_number();
829 from = vlib_frame_vector_args(frame);
830 n_left_from = frame->n_vectors;
831 next_index = node->cached_next_index;
832 while (n_left_from > 0) {
833 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
835 #ifdef IP6_MAP_T_DUAL_LOOP
836 while (n_left_from >= 4 && n_left_to_next >=2) {
838 vlib_buffer_t *p0, *p1;
839 ip6_header_t *ip60, *ip61;
841 ip6_mapt_next_t next0, next1;
842 u32 l4_len0, l4_len1;
843 i32 src_port0, src_port1;
844 map_domain_t *d0, *d1;
845 ip6_frag_hdr_t *frag0, *frag1;
847 next0 = next1 = 0; //Because compiler whines
849 pi0 = to_next[0] = from[0];
850 pi1 = to_next[1] = from[1];
856 error0 = MAP_ERROR_NONE;
857 error1 = MAP_ERROR_NONE;
859 p0 = vlib_get_buffer(vm, pi0);
860 p1 = vlib_get_buffer(vm, pi1);
861 ip60 = vlib_buffer_get_current(p0);
862 ip61 = vlib_buffer_get_current(p1);
864 saddr0 = map_get_ip4(&ip60->src_address);
865 saddr1 = map_get_ip4(&ip61->src_address);
866 d0 = ip6_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX],
867 (ip4_address_t *)&saddr0,
868 &vnet_buffer(p0)->map_t.map_domain_index, &error0);
869 d1 = ip6_map_get_domain(vnet_buffer(p1)->ip.adj_index[VLIB_TX],
870 (ip4_address_t *)&saddr1,
871 &vnet_buffer(p1)->map_t.map_domain_index, &error1);
873 vnet_buffer(p0)->map_t.v6.saddr = saddr0;
874 vnet_buffer(p1)->map_t.v6.saddr = saddr1;
875 vnet_buffer(p0)->map_t.v6.daddr = ip6_map_t_embedded_address(d0, &ip60->dst_address);
876 vnet_buffer(p1)->map_t.v6.daddr = ip6_map_t_embedded_address(d1, &ip61->dst_address);
877 vnet_buffer(p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
878 vnet_buffer(p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
880 if (PREDICT_FALSE(ip6_parse(ip60, p0->current_length,
881 &(vnet_buffer(p0)->map_t.v6.l4_protocol),
882 &(vnet_buffer(p0)->map_t.v6.l4_offset),
883 &(vnet_buffer(p0)->map_t.v6.frag_offset)))) {
884 error0 = MAP_ERROR_MALFORMED;
885 next0 = IP6_MAPT_NEXT_DROP;
888 if (PREDICT_FALSE(ip6_parse(ip61, p1->current_length,
889 &(vnet_buffer(p1)->map_t.v6.l4_protocol),
890 &(vnet_buffer(p1)->map_t.v6.l4_offset),
891 &(vnet_buffer(p1)->map_t.v6.frag_offset)))) {
892 error1 = MAP_ERROR_MALFORMED;
893 next1 = IP6_MAPT_NEXT_DROP;
896 src_port0 = src_port1 = -1;
897 l4_len0 = (u32)clib_net_to_host_u16(ip60->payload_length) +
898 sizeof(*ip60) - vnet_buffer(p0)->map_t.v6.l4_offset;
899 l4_len1 = (u32)clib_net_to_host_u16(ip61->payload_length) +
900 sizeof(*ip60) - vnet_buffer(p1)->map_t.v6.l4_offset;
901 frag0 = (ip6_frag_hdr_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset);
902 frag1 = (ip6_frag_hdr_t *) u8_ptr_add(ip61, vnet_buffer(p1)->map_t.v6.frag_offset);
904 ip6_map_t_classify(p0, ip60, d0, &src_port0, &error0, &next0, l4_len0, frag0);
905 ip6_map_t_classify(p1, ip61, d1, &src_port1, &error1, &next1, l4_len1, frag1);
907 if (PREDICT_FALSE((src_port0 != -1) && (
908 ip60->src_address.as_u64[0] != map_get_pfx_net(d0, vnet_buffer(p0)->map_t.v6.saddr, src_port0) ||
909 ip60->src_address.as_u64[1] != map_get_sfx_net(d0, vnet_buffer(p0)->map_t.v6.saddr, src_port0)))) {
910 error0 = MAP_ERROR_SEC_CHECK;
913 if (PREDICT_FALSE((src_port1 != -1) && (
914 ip61->src_address.as_u64[0] != map_get_pfx_net(d1, vnet_buffer(p1)->map_t.v6.saddr, src_port1) ||
915 ip61->src_address.as_u64[1] != map_get_sfx_net(d1, vnet_buffer(p1)->map_t.v6.saddr, src_port1)))) {
916 error1 = MAP_ERROR_SEC_CHECK;
919 if (PREDICT_FALSE(vnet_buffer(p0)->map_t.v6.frag_offset &&
920 !ip6_frag_hdr_offset((ip6_frag_hdr_t *)
921 u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset))) &&
922 (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules) && (error0 == MAP_ERROR_NONE)) {
923 ip6_map_fragment_cache(ip60,
924 (ip6_frag_hdr_t *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset),
928 if (PREDICT_FALSE(vnet_buffer(p1)->map_t.v6.frag_offset &&
929 !ip6_frag_hdr_offset((ip6_frag_hdr_t *)
930 u8_ptr_add(ip61, vnet_buffer(p1)->map_t.v6.frag_offset))) &&
931 (src_port1 != -1) && (d1->ea_bits_len != 0 || !d1->rules) && (error1 == MAP_ERROR_NONE)) {
932 ip6_map_fragment_cache(ip61,
933 (ip6_frag_hdr_t *)u8_ptr_add(ip61, vnet_buffer(p1)->map_t.v6.frag_offset),
937 if (PREDICT_TRUE(error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP)) {
938 vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_RX, cpu_index,
939 vnet_buffer(p0)->map_t.map_domain_index, 1,
940 clib_net_to_host_u16(ip60->payload_length));
943 if (PREDICT_TRUE(error1 == MAP_ERROR_NONE && next1 != IP6_MAPT_NEXT_MAPT_ICMP)) {
944 vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_RX, cpu_index,
945 vnet_buffer(p1)->map_t.map_domain_index, 1,
946 clib_net_to_host_u16(ip61->payload_length));
949 next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
950 next1 = (error1 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next1;
951 p0->error = error_node->errors[error0];
952 p1->error = error_node->errors[error1];
953 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, pi0, pi1, next0, next1);
957 while (n_left_from > 0 && n_left_to_next > 0) {
965 ip6_frag_hdr_t *frag0;
966 ip6_mapt_next_t next0 = 0;
969 pi0 = to_next[0] = from[0];
974 error0 = MAP_ERROR_NONE;
976 p0 = vlib_get_buffer(vm, pi0);
977 ip60 = vlib_buffer_get_current(p0);
978 //Save saddr in a different variable to not overwrite ip.adj_index
979 saddr = map_get_ip4(&ip60->src_address);
980 d0 = ip6_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX],
981 (ip4_address_t *)&saddr,
982 &vnet_buffer(p0)->map_t.map_domain_index, &error0);
984 //FIXME: What if d0 is null
985 vnet_buffer(p0)->map_t.v6.saddr = saddr;
986 vnet_buffer(p0)->map_t.v6.daddr = ip6_map_t_embedded_address(d0, &ip60->dst_address);
987 vnet_buffer(p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
989 if (PREDICT_FALSE(ip6_parse(ip60, p0->current_length,
990 &(vnet_buffer(p0)->map_t.v6.l4_protocol),
991 &(vnet_buffer(p0)->map_t.v6.l4_offset),
992 &(vnet_buffer(p0)->map_t.v6.frag_offset)))) {
993 error0 = MAP_ERROR_MALFORMED;
994 next0 = IP6_MAPT_NEXT_DROP;
998 l4_len0 = (u32)clib_net_to_host_u16(ip60->payload_length) +
999 sizeof(*ip60) - vnet_buffer(p0)->map_t.v6.l4_offset;
1000 frag0 = (ip6_frag_hdr_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset);
1003 if (PREDICT_FALSE(vnet_buffer(p0)->map_t.v6.frag_offset &&
1004 ip6_frag_hdr_offset(frag0))) {
1005 src_port0 = ip6_map_fragment_get(ip60, frag0, d0);
1006 error0 = (src_port0 != -1) ? error0 : MAP_ERROR_FRAGMENT_MEMORY;
1007 next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
1008 } else if (PREDICT_TRUE(vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP)) {
1009 error0 = l4_len0 < sizeof(tcp_header_t) ? MAP_ERROR_MALFORMED : error0;
1010 vnet_buffer(p0)->map_t.checksum_offset = vnet_buffer(p0)->map_t.v6.l4_offset + 16;
1011 next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
1012 src_port0 = (i32) *((u16*)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset));
1013 } else if (PREDICT_TRUE(vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP)) {
1014 error0 = l4_len0 < sizeof(udp_header_t) ? MAP_ERROR_MALFORMED : error0;
1015 vnet_buffer(p0)->map_t.checksum_offset = vnet_buffer(p0)->map_t.v6.l4_offset + 6;
1016 next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
1017 src_port0 = (i32) *((u16*)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset));
1018 } else if (vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6) {
1019 error0 = l4_len0 < sizeof(icmp46_header_t) ? MAP_ERROR_MALFORMED : error0;
1020 next0 = IP6_MAPT_NEXT_MAPT_ICMP;
1021 if (((icmp46_header_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset))->code == ICMP6_echo_reply ||
1022 ((icmp46_header_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset))->code == ICMP6_echo_request)
1023 src_port0 = (i32) *((u16 *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset + 6));
1025 //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
1026 error0 = MAP_ERROR_BAD_PROTOCOL;
1030 if (PREDICT_FALSE((src_port0 != -1) && (
1031 ip60->src_address.as_u64[0] != map_get_pfx_net(d0, vnet_buffer(p0)->map_t.v6.saddr, src_port0) ||
1032 ip60->src_address.as_u64[1] != map_get_sfx_net(d0, vnet_buffer(p0)->map_t.v6.saddr, src_port0)))) {
1033 //Security check when src_port0 is not zero (non-first fragment, UDP or TCP)
1034 error0 = MAP_ERROR_SEC_CHECK;
1037 //Fragmented first packet needs to be cached for following packets
1038 if (PREDICT_FALSE(vnet_buffer(p0)->map_t.v6.frag_offset &&
1039 !ip6_frag_hdr_offset((ip6_frag_hdr_t *)
1040 u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset))) &&
1041 (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules) && (error0 == MAP_ERROR_NONE)) {
1042 ip6_map_fragment_cache(ip60,
1043 (ip6_frag_hdr_t *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset),
1047 if (PREDICT_TRUE(error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP)) {
1048 vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_RX, cpu_index,
1049 vnet_buffer(p0)->map_t.map_domain_index, 1,
1050 clib_net_to_host_u16(ip60->payload_length));
1053 next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
1054 p0->error = error_node->errors[error0];
1055 vlib_validate_buffer_enqueue_x1(vm, node, next_index,
1056 to_next, n_left_to_next, pi0,
1059 vlib_put_next_frame(vm, node, next_index, n_left_to_next);
1061 return frame->n_vectors;
1064 static char *map_t_error_strings[] = {
1065 #define _(sym,string) string,
1070 VLIB_REGISTER_NODE(ip6_map_t_fragmented_node) = {
1071 .function = ip6_map_t_fragmented,
1072 .name = "ip6-map-t-fragmented",
1073 .vector_size = sizeof (u32),
1074 .format_trace = format_map_trace,
1075 .type = VLIB_NODE_TYPE_INTERNAL,
1077 .n_errors = MAP_N_ERROR,
1078 .error_strings = map_t_error_strings,
1080 .n_next_nodes = IP6_MAPT_FRAGMENTED_N_NEXT,
1082 [IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP] = "ip4-lookup",
1083 [IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
1084 [IP6_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
1088 VLIB_REGISTER_NODE(ip6_map_t_icmp_node) = {
1089 .function = ip6_map_t_icmp,
1090 .name = "ip6-map-t-icmp",
1091 .vector_size = sizeof (u32),
1092 .format_trace = format_map_trace,
1093 .type = VLIB_NODE_TYPE_INTERNAL,
1095 .n_errors = MAP_N_ERROR,
1096 .error_strings = map_t_error_strings,
1098 .n_next_nodes = IP6_MAPT_ICMP_N_NEXT,
1100 [IP6_MAPT_ICMP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1101 [IP6_MAPT_ICMP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
1102 [IP6_MAPT_ICMP_NEXT_DROP] = "error-drop",
1106 VLIB_REGISTER_NODE(ip6_map_t_tcp_udp_node) = {
1107 .function = ip6_map_t_tcp_udp,
1108 .name = "ip6-map-t-tcp-udp",
1109 .vector_size = sizeof (u32),
1110 .format_trace = format_map_trace,
1111 .type = VLIB_NODE_TYPE_INTERNAL,
1113 .n_errors = MAP_N_ERROR,
1114 .error_strings = map_t_error_strings,
1116 .n_next_nodes = IP6_MAPT_TCP_UDP_N_NEXT,
1118 [IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1119 [IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
1120 [IP6_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
1124 VLIB_REGISTER_NODE(ip6_map_t_node) = {
1125 .function = ip6_map_t,
1126 .name = "ip6-map-t",
1127 .vector_size = sizeof(u32),
1128 .format_trace = format_map_trace,
1129 .type = VLIB_NODE_TYPE_INTERNAL,
1131 .n_errors = MAP_N_ERROR,
1132 .error_strings = map_t_error_strings,
1134 .n_next_nodes = IP6_MAPT_N_NEXT,
1136 [IP6_MAPT_NEXT_MAPT_TCP_UDP] = "ip6-map-t-tcp-udp",
1137 [IP6_MAPT_NEXT_MAPT_ICMP] = "ip6-map-t-icmp",
1138 [IP6_MAPT_NEXT_MAPT_FRAGMENTED] = "ip6-map-t-fragmented",
1139 [IP6_MAPT_NEXT_DROP] = "error-drop",