u16 ethertype = 0;
u16 l2hdr_sz = 0;
- ASSERT (is_ip4 ^ is_ip6);
+ ASSERT (!(is_ip4 && is_ip6));
if (is_l2)
{
vnet_calc_checksums_inline
(vm, b[0],
b[0]->flags & VNET_BUFFER_F_IS_IP4,
- b[0]->flags & VNET_BUFFER_F_IS_IP6, 1 /* with gso */ );
+ b[0]->flags & VNET_BUFFER_F_IS_IP6);
if (b[1]->flags & vnet_buffer_offload_flags)
vnet_calc_checksums_inline
(vm, b[1],
b[1]->flags & VNET_BUFFER_F_IS_IP4,
- b[1]->flags & VNET_BUFFER_F_IS_IP6, 1 /* with gso */ );
+ b[1]->flags & VNET_BUFFER_F_IS_IP6);
if (b[2]->flags & vnet_buffer_offload_flags)
vnet_calc_checksums_inline
(vm, b[2],
b[2]->flags & VNET_BUFFER_F_IS_IP4,
- b[2]->flags & VNET_BUFFER_F_IS_IP6, 1 /* with gso */ );
+ b[2]->flags & VNET_BUFFER_F_IS_IP6);
if (b[3]->flags & vnet_buffer_offload_flags)
vnet_calc_checksums_inline
(vm, b[3],
b[3]->flags & VNET_BUFFER_F_IS_IP4,
- b[3]->flags & VNET_BUFFER_F_IS_IP6, 1 /* with gso */ );
+ b[3]->flags & VNET_BUFFER_F_IS_IP6);
}
}
b += 4;
vnet_calc_checksums_inline
(vm, b[0],
b[0]->flags & VNET_BUFFER_F_IS_IP4,
- b[0]->flags & VNET_BUFFER_F_IS_IP6, 1 /* with gso */ );
+ b[0]->flags & VNET_BUFFER_F_IS_IP6);
}
b += 1;
}
static_always_inline void
vnet_calc_checksums_inline (vlib_main_t * vm, vlib_buffer_t * b,
- int is_ip4, int is_ip6, int with_gso)
+ int is_ip4, int is_ip6)
{
ip4_header_t *ip4;
ip6_header_t *ip6;
tcp_header_t *th;
udp_header_t *uh;
- if (with_gso)
- {
- generic_header_offset_t gho = { 0 };
- vnet_generic_header_offset_parser (b, &gho, 1 /* l2 */ , is_ip4,
- is_ip6);
-
- ASSERT (gho.gho_flags ^ (GHO_F_IP4 | GHO_F_IP6));
-
- vnet_get_inner_header (b, &gho);
-
- ip4 = (ip4_header_t *)
- (vlib_buffer_get_current (b) + gho.l3_hdr_offset);
- ip6 = (ip6_header_t *)
- (vlib_buffer_get_current (b) + gho.l3_hdr_offset);
- th = (tcp_header_t *) (vlib_buffer_get_current (b) + gho.l4_hdr_offset);
- uh = (udp_header_t *) (vlib_buffer_get_current (b) + gho.l4_hdr_offset);
+ ASSERT (!(is_ip4 && is_ip6));
- if (gho.gho_flags & GHO_F_IP4)
- {
- vnet_calc_ip4_checksums (vm, b, ip4, th, uh);
- }
- else if (gho.gho_flags & GHO_F_IP6)
- {
- vnet_calc_ip6_checksums (vm, b, ip6, th, uh);
- }
+ ip4 = (ip4_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
+ ip6 = (ip6_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
+ th = (tcp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
+ uh = (udp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
- vnet_get_outer_header (b, &gho);
+ if (is_ip4)
+ {
+ vnet_calc_ip4_checksums (vm, b, ip4, th, uh);
}
- else
+ else if (is_ip6)
{
- ASSERT (!(is_ip4 && is_ip6));
-
- ip4 = (ip4_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
- ip6 = (ip6_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
- th = (tcp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
- uh = (udp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
-
- if (is_ip4)
- {
- vnet_calc_ip4_checksums (vm, b, ip4, th, uh);
- }
- if (is_ip6)
- {
- vnet_calc_ip6_checksums (vm, b, ip6, th, uh);
- }
+ vnet_calc_ip6_checksums (vm, b, ip6, th, uh);
}
+
b->flags &= ~VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
b->flags &= ~VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
b->flags &= ~VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
next[0] = next_index;
if (is_midchain)
vnet_calc_checksums_inline (vm, b[0], 1 /* is_ip4 */ ,
- 0 /* is_ip6 */ ,
- 0 /* with gso */ );
+ 0 /* is_ip6 */ );
}
else
{
next[1] = next_index;
if (is_midchain)
vnet_calc_checksums_inline (vm, b[1], 1 /* is_ip4 */ ,
- 0 /* is_ip6 */ ,
- 0 /* with gso */ );
+ 0 /* is_ip6 */ );
}
else
{
if (is_midchain)
{
vnet_calc_checksums_inline (vm, b[0], 1 /* is_ip4 */ ,
- 0 /* is_ip6 */ ,
- 0 /* with gso */ );
+ 0 /* is_ip6 */ );
/* Guess we are only writing on ipv4 header. */
vnet_rewrite_one_header (adj0[0], ip0, sizeof (ip4_header_t));
{
/* this acts on the packet that is about to be encapped */
vnet_calc_checksums_inline (vm, b[0], 1 /* is_ip4 */ ,
- 0 /* is_ip6 */ ,
- 0 /* with gso */ );
+ 0 /* is_ip6 */ );
/* Guess we are only writing on ipv4 header. */
vnet_rewrite_one_header (adj0[0], ip0, sizeof (ip4_header_t));
/* before we paint on the next header, update the L4
* checksums if required, since there's no offload on a tunnel */
vnet_calc_checksums_inline (vm, p0, 0 /* is_ip4 */ ,
- 1 /* is_ip6 */ ,
- 0 /* with gso */ );
+ 1 /* is_ip6 */ );
vnet_calc_checksums_inline (vm, p1, 0 /* is_ip4 */ ,
- 1 /* is_ip6 */ ,
- 0 /* with gso */ );
+ 1 /* is_ip6 */ );
/* Guess we are only writing on ipv6 header. */
vnet_rewrite_two_headers (adj0[0], adj1[0],
if (is_midchain)
{
vnet_calc_checksums_inline (vm, p0, 0 /* is_ip4 */ ,
- 1 /* is_ip6 */ ,
- 0 /* with gso */ );
+ 1 /* is_ip6 */ );
/* Guess we are only writing on ip6 header. */
vnet_rewrite_one_header (adj0[0], ip0, sizeof (ip6_header_t));
}
static_always_inline void
-fill_gso_buffer_flags (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
- u32 packet_data_size)
+fill_buffer_offload_flags (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
+ int gso_enabled, u32 gso_size)
{
-
for (int i = 0; i < n_buffers; i++)
{
vlib_buffer_t *b0 = vlib_get_buffer (vm, buffers[i]);
VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
}
+
if (l4_proto == IP_PROTOCOL_TCP)
{
- b0->flags |= (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM | VNET_BUFFER_F_GSO);
+ b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
tcp_header_t *tcp = (tcp_header_t *) (vlib_buffer_get_current (b0) +
vnet_buffer
(b0)->l4_hdr_offset);
- l4_hdr_sz = tcp_header_bytes (tcp);
tcp->checksum = 0;
- vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
- vnet_buffer2 (b0)->gso_size = packet_data_size;
+ if (gso_enabled)
+ {
+ b0->flags |= VNET_BUFFER_F_GSO;
+ l4_hdr_sz = tcp_header_bytes (tcp);
+ vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
+ vnet_buffer2 (b0)->gso_size = gso_size;
+ }
}
else if (l4_proto == IP_PROTOCOL_UDP)
{
udp_header_t *udp = (udp_header_t *) (vlib_buffer_get_current (b0) +
vnet_buffer
(b0)->l4_hdr_offset);
- vnet_buffer2 (b0)->gso_l4_hdr_sz = sizeof (*udp);
udp->checksum = 0;
}
}
vnet_buffer (b)->feature_arc_index = feature_arc_index;
}
- if (pi->gso_enabled)
- fill_gso_buffer_flags (vm, to_next, n_this_frame, pi->gso_size);
+ if (pi->gso_enabled ||
+ (s->buffer_flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
+ VNET_BUFFER_F_OFFLOAD_UDP_CKSUM |
+ VNET_BUFFER_F_OFFLOAD_IP_CKSUM)))
+ {
+ fill_buffer_offload_flags (vm, to_next, n_this_frame,
+ pi->gso_enabled, pi->gso_size);
+ }
n_trace = vlib_get_trace_count (vm, node);
if (n_trace > 0)
self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
self.assertEqual(rx[IPv6].src, self.pg0.local_ip6)
self.assertEqual(rx[IPv6].dst, self.pg0.remote_ip6)
- self.assert_udp_checksum_valid(rx)
+ self.assert_udp_checksum_valid(rx, ignore_zero_checksum=False)
self.assertEqual(rx[VXLAN].vni, 10)
inner = rx[VXLAN].payload
self.assertEqual(rx[IPv6].plen - 8 - 8, len(inner))
self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
self.assertEqual(rx[IPv6].src, self.pg0.local_ip6)
self.assertEqual(rx[IPv6].dst, self.pg0.remote_ip6)
- self.assert_udp_checksum_valid(rx)
+ self.assert_udp_checksum_valid(rx, ignore_zero_checksum=False)
self.assertEqual(rx[VXLAN].vni, 10)
inner = rx[VXLAN].payload
self.assertEqual(rx[IPv6].plen - 8 - 8, len(inner))
# Verify UDP destination port is VXLAN 4789, source UDP port could be
# arbitrary.
self.assertEqual(pkt[UDP].dport, type(self).dport)
- # TODO: checksum check
+ # Verify UDP checksum
+ self.assert_udp_checksum_valid(pkt)
# Verify VNI
self.assertEqual(pkt[VXLAN].vni, vni)
from template_bd import BridgeDomain
from scapy.layers.l2 import Ether
-from scapy.layers.inet6 import IPv6, UDP
+from scapy.packet import Raw
+from scapy.layers.inet6 import IP, IPv6, UDP
from scapy.layers.vxlan import VXLAN
import util
# Verify UDP destination port is VXLAN 4789, source UDP port could be
# arbitrary.
self.assertEqual(pkt[UDP].dport, type(self).dport)
- # TODO: checksum check
+ # Verify UDP checksum
+ self.assert_udp_checksum_valid(pkt, ignore_zero_checksum=False)
# Verify VNI
self.assertEqual(pkt[VXLAN].vni, vni)
self.logger.info(self.vapi.cli("show bridge-domain 3 detail"))
self.logger.info(self.vapi.cli("show vxlan tunnel"))
+ def test_encap_fragmented_packet(self):
+ """ Encapsulation test send fragments from pg1
+ Verify receipt of encapsulated frames on pg0
+ """
+
+ frame = (Ether(src='00:00:00:00:00:02', dst='00:00:00:00:00:01') /
+ IP(src='4.3.2.1', dst='1.2.3.4') /
+ UDP(sport=20000, dport=10000) /
+ Raw(b'\xa5' * 1000))
+
+ frags = util.fragment_rfc791(frame, 400)
+
+ self.pg1.add_stream(frags)
+
+ self.pg0.enable_capture()
+
+ self.pg_start()
+
+ out = self.pg0.get_capture(3)
+
+ payload = []
+ for pkt in out:
+ payload.append(self.decapsulate(pkt))
+ self.check_encapsulation(pkt, self.single_tunnel_vni)
+
+ reassembled = util.reassemble4(payload)
+
+ self.assertEqual(Ether(raw(frame))[IP], reassembled[IP])
+
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
# Verify UDP destination port is VXLAN GBP 48879, source UDP port could
# be arbitrary.
self.assertEqual(pkt[UDP].dport, type(self).dport)
- # TODO: checksum check
+ # Verify UDP checksum
+ self.assert_udp_checksum_valid(pkt)
# Verify VNI
# pkt.show()
self.assertEqual(pkt[VXLAN].vni, vni)
# Verify UDP destination port is VXLAN-GPE 4790, source UDP port
# could be arbitrary.
self.assertEqual(pkt[UDP].dport, type(self).dport)
+ # Verify UDP checksum
+ self.assert_udp_checksum_valid(pkt)
# Verify VNI
self.assertEqual(pkt[VXLAN].vni, vni)