X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fdevices%2Faf_packet%2Fdevice.c;h=013d9f71733802756046949af1292f9e1486f867;hb=5a7aa51f0;hp=b6b99a0465cfe9918e1c118391461d7b70a276aa;hpb=ffc6bdcd38b8209050671d3d86f943c37887a7b7;p=vpp.git diff --git a/src/vnet/devices/af_packet/device.c b/src/vnet/devices/af_packet/device.c index b6b99a0465c..013d9f71733 100644 --- a/src/vnet/devices/af_packet/device.c +++ b/src/vnet/devices/af_packet/device.c @@ -27,8 +27,14 @@ #include #include #include +#include +#include +#include +#include +#include #include +#include #define foreach_af_packet_tx_func_error \ _(FRAME_NOT_READY, "tx frame not ready") \ @@ -50,6 +56,15 @@ static char *af_packet_tx_func_error_strings[] = { #undef _ }; +typedef struct +{ + u32 buffer_index; + u32 hw_if_index; + u16 queue_id; + tpacket3_hdr_t tph; + vnet_virtio_net_hdr_t vnet_hdr; + vlib_buffer_t buffer; +} af_packet_tx_trace_t; #ifndef CLIB_MARCH_VARIANT u8 * @@ -73,113 +88,329 @@ format_af_packet_device (u8 * s, va_list * args) af_packet_main_t *apm = &af_packet_main; af_packet_if_t *apif = pool_elt_at_index (apm->interfaces, dev_instance); - clib_spinlock_lock_if_init (&apif->lockp); - u32 block_size = apif->tx_req->tp_block_size; - u32 frame_size = apif->tx_req->tp_frame_size; - u32 frame_num = apif->tx_req->tp_frame_nr; - int block = 0; - u8 *block_start = apif->tx_ring + block * block_size; - u32 tx_frame = apif->next_tx_frame; - struct tpacket2_hdr *tph; - - s = format (s, "Linux PACKET socket interface\n"); - s = format (s, "%Ublock:%d frame:%d\n", format_white_space, indent, - block_size, frame_size); - s = format (s, "%Unext frame:%d\n", format_white_space, indent, - apif->next_tx_frame); - - int n_send_req = 0, n_avail = 0, n_sending = 0, n_tot = 0, n_wrong = 0; - do + af_packet_queue_t *rx_queue = 0; + af_packet_queue_t *tx_queue = 0; + + s = format (s, "Linux PACKET socket interface"); + + vec_foreach (rx_queue, apif->rx_queues) { - tph = (struct tpacket2_hdr *) (block_start + tx_frame * frame_size); - tx_frame = (tx_frame + 1) % frame_num; - if (tph->tp_status == 0) - n_avail++; - else if (tph->tp_status & TP_STATUS_SEND_REQUEST) - n_send_req++; - else if (tph->tp_status & TP_STATUS_SENDING) - n_sending++; - else - n_wrong++; - n_tot++; + u32 rx_block_size = rx_queue->rx_req->tp_block_size; + u32 rx_frame_size = rx_queue->rx_req->tp_frame_size; + u32 rx_frame_nr = rx_queue->rx_req->tp_frame_nr; + u32 rx_block_nr = rx_queue->rx_req->tp_block_nr; + + s = format (s, "\n%URX Queue %u:", format_white_space, indent, + rx_queue->queue_id); + s = format (s, "\n%Ublock size:%d nr:%d frame size:%d nr:%d", + format_white_space, indent + 2, rx_block_size, rx_block_nr, + rx_frame_size, rx_frame_nr); + s = format (s, " next block:%d", rx_queue->next_rx_block); + if (rx_queue->is_rx_pending) + { + s = format ( + s, "\n%UPending Request: num-rx-pkts:%d next-frame-offset:%d", + format_white_space, indent + 2, rx_queue->num_rx_pkts, + rx_queue->rx_frame_offset); + } } - while (tx_frame != apif->next_tx_frame); - s = format (s, "%Uavailable:%d request:%d sending:%d wrong:%d total:%d\n", - format_white_space, indent, n_avail, n_send_req, n_sending, - n_wrong, n_tot); - clib_spinlock_unlock_if_init (&apif->lockp); + vec_foreach (tx_queue, apif->tx_queues) + { + clib_spinlock_lock (&tx_queue->lockp); + u32 tx_block_sz = tx_queue->tx_req->tp_block_size; + u32 tx_frame_sz = tx_queue->tx_req->tp_frame_size; + u32 tx_frame_nr = tx_queue->tx_req->tp_frame_nr; + u32 tx_block_nr = tx_queue->tx_req->tp_block_nr; + int block = 0; + int n_send_req = 0, n_avail = 0, n_sending = 0, n_tot = 0, n_wrong = 0; + u8 *tx_block_start = tx_queue->tx_ring[block]; + u32 tx_frame = tx_queue->next_tx_frame; + tpacket3_hdr_t *tph; + + s = format (s, "\n%UTX Queue %u:", format_white_space, indent, + tx_queue->queue_id); + s = format (s, "\n%Ublock size:%d nr:%d frame size:%d nr:%d", + format_white_space, indent + 2, tx_block_sz, tx_block_nr, + tx_frame_sz, tx_frame_nr); + s = format (s, " next frame:%d", tx_queue->next_tx_frame); + + do + { + tph = (tpacket3_hdr_t *) (tx_block_start + tx_frame * tx_frame_sz); + tx_frame = (tx_frame + 1) % tx_frame_nr; + if (tph->tp_status == 0) + n_avail++; + else if (tph->tp_status & TP_STATUS_SEND_REQUEST) + n_send_req++; + else if (tph->tp_status & TP_STATUS_SENDING) + n_sending++; + else + n_wrong++; + n_tot++; + } + while (tx_frame != tx_queue->next_tx_frame); + s = + format (s, "\n%Uavailable:%d request:%d sending:%d wrong:%d total:%d", + format_white_space, indent + 2, n_avail, n_send_req, n_sending, + n_wrong, n_tot); + clib_spinlock_unlock (&tx_queue->lockp); + } return s; } static u8 * -format_af_packet_tx_trace (u8 * s, va_list * args) +format_af_packet_tx_trace (u8 *s, va_list *va) { - s = format (s, "Unimplemented..."); + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *); + af_packet_tx_trace_t *t = va_arg (*va, af_packet_tx_trace_t *); + u32 indent = format_get_indent (s); + + s = format (s, "af_packet: hw_if_index %u tx-queue %u", t->hw_if_index, + t->queue_id); + + s = + format (s, + "\n%Utpacket3_hdr:\n%Ustatus 0x%x len %u snaplen %u mac %u net %u" + "\n%Usec 0x%x nsec 0x%x vlan %U" +#ifdef TP_STATUS_VLAN_TPID_VALID + " vlan_tpid %u" +#endif + , + format_white_space, indent + 2, format_white_space, indent + 4, + t->tph.tp_status, t->tph.tp_len, t->tph.tp_snaplen, t->tph.tp_mac, + t->tph.tp_net, format_white_space, indent + 4, t->tph.tp_sec, + t->tph.tp_nsec, format_ethernet_vlan_tci, t->tph.hv1.tp_vlan_tci +#ifdef TP_STATUS_VLAN_TPID_VALID + , + t->tph.hv1.tp_vlan_tpid +#endif + ); + + s = format (s, + "\n%Uvnet-hdr:\n%Uflags 0x%02x gso_type 0x%02x hdr_len %u" + "\n%Ugso_size %u csum_start %u csum_offset %u", + format_white_space, indent + 2, format_white_space, indent + 4, + t->vnet_hdr.flags, t->vnet_hdr.gso_type, t->vnet_hdr.hdr_len, + format_white_space, indent + 4, t->vnet_hdr.gso_size, + t->vnet_hdr.csum_start, t->vnet_hdr.csum_offset); + + s = format (s, "\n%Ubuffer 0x%x:\n%U%U", format_white_space, indent + 2, + t->buffer_index, format_white_space, indent + 4, + format_vnet_buffer_no_chain, &t->buffer); + s = format (s, "\n%U%U", format_white_space, indent + 2, + format_ethernet_header_with_length, t->buffer.pre_data, + sizeof (t->buffer.pre_data)); return s; } +static void +af_packet_tx_trace (vlib_main_t *vm, vlib_node_runtime_t *node, + vlib_buffer_t *b0, u32 bi, tpacket3_hdr_t *tph, + vnet_virtio_net_hdr_t *vnet_hdr, u32 hw_if_index, + u16 queue_id) +{ + af_packet_tx_trace_t *t; + t = vlib_add_trace (vm, node, b0, sizeof (t[0])); + t->hw_if_index = hw_if_index; + t->queue_id = queue_id; + t->buffer_index = bi; + + clib_memcpy_fast (&t->tph, tph, sizeof (*tph)); + clib_memcpy_fast (&t->vnet_hdr, vnet_hdr, sizeof (*vnet_hdr)); + clib_memcpy_fast (&t->buffer, b0, sizeof (*b0) - sizeof (b0->pre_data)); + clib_memcpy_fast (t->buffer.pre_data, vlib_buffer_get_current (b0), + sizeof (t->buffer.pre_data)); +} + +static_always_inline void +fill_gso_offload (vlib_buffer_t *b0, vnet_virtio_net_hdr_t *vnet_hdr) +{ + vnet_buffer_oflags_t oflags = vnet_buffer (b0)->oflags; + if (b0->flags & VNET_BUFFER_F_IS_IP4) + { + ip4_header_t *ip4; + vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; + vnet_hdr->gso_size = vnet_buffer2 (b0)->gso_size; + vnet_hdr->hdr_len = + vnet_buffer (b0)->l4_hdr_offset + vnet_buffer2 (b0)->gso_l4_hdr_sz; + vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + vnet_hdr->csum_start = vnet_buffer (b0)->l4_hdr_offset; // 0x22; + vnet_hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum); + ip4 = (ip4_header_t *) (vlib_buffer_get_current (b0) + + vnet_buffer (b0)->l3_hdr_offset); + if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM) + ip4->checksum = ip4_header_checksum (ip4); + } + else if (b0->flags & VNET_BUFFER_F_IS_IP6) + { + vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; + vnet_hdr->gso_size = vnet_buffer2 (b0)->gso_size; + vnet_hdr->hdr_len = + vnet_buffer (b0)->l4_hdr_offset + vnet_buffer2 (b0)->gso_l4_hdr_sz; + vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + vnet_hdr->csum_start = vnet_buffer (b0)->l4_hdr_offset; // 0x36; + vnet_hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum); + } +} + +static_always_inline void +fill_cksum_offload (vlib_buffer_t *b0, vnet_virtio_net_hdr_t *vnet_hdr) +{ + vnet_buffer_oflags_t oflags = vnet_buffer (b0)->oflags; + if (b0->flags & VNET_BUFFER_F_IS_IP4) + { + ip4_header_t *ip4; + ip4 = (ip4_header_t *) (vlib_buffer_get_current (b0) + + vnet_buffer (b0)->l3_hdr_offset); + if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM) + ip4->checksum = ip4_header_checksum (ip4); + vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + vnet_hdr->csum_start = 0x22; + if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM) + { + tcp_header_t *tcp = + (tcp_header_t *) (vlib_buffer_get_current (b0) + + vnet_buffer (b0)->l4_hdr_offset); + tcp->checksum = ip4_pseudo_header_cksum (ip4); + vnet_hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum); + } + else if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM) + { + udp_header_t *udp = + (udp_header_t *) (vlib_buffer_get_current (b0) + + vnet_buffer (b0)->l4_hdr_offset); + udp->checksum = ip4_pseudo_header_cksum (ip4); + vnet_hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum); + } + } + else if (b0->flags & VNET_BUFFER_F_IS_IP6) + { + ip6_header_t *ip6; + vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + vnet_hdr->csum_start = 0x36; + ip6 = (ip6_header_t *) (vlib_buffer_get_current (b0) + + vnet_buffer (b0)->l3_hdr_offset); + if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM) + { + tcp_header_t *tcp = + (tcp_header_t *) (vlib_buffer_get_current (b0) + + vnet_buffer (b0)->l4_hdr_offset); + tcp->checksum = ip6_pseudo_header_cksum (ip6); + vnet_hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum); + } + else if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM) + { + udp_header_t *udp = + (udp_header_t *) (vlib_buffer_get_current (b0) + + vnet_buffer (b0)->l4_hdr_offset); + udp->checksum = ip6_pseudo_header_cksum (ip6); + vnet_hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum); + } + } +} + VNET_DEVICE_CLASS_TX_FN (af_packet_device_class) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { af_packet_main_t *apm = &af_packet_main; + vnet_hw_if_tx_frame_t *tf = vlib_frame_scalar_args (frame); u32 *buffers = vlib_frame_vector_args (frame); u32 n_left = frame->n_vectors; u32 n_sent = 0; vnet_interface_output_runtime_t *rd = (void *) node->runtime_data; af_packet_if_t *apif = pool_elt_at_index (apm->interfaces, rd->dev_instance); - clib_spinlock_lock_if_init (&apif->lockp); - int block = 0; - u32 block_size = apif->tx_req->tp_block_size; - u32 frame_size = apif->tx_req->tp_frame_size; - u32 frame_num = apif->tx_req->tp_frame_nr; - u8 *block_start = apif->tx_ring + block * block_size; - u32 tx_frame = apif->next_tx_frame; - struct tpacket2_hdr *tph; + u16 queue_id = tf->queue_id; + af_packet_queue_t *tx_queue = vec_elt_at_index (apif->tx_queues, queue_id); + u32 block = 0, frame_size = 0, frame_num = 0, tx_frame = 0; + u8 *block_start = 0; + tpacket3_hdr_t *tph = 0; u32 frame_not_ready = 0; + u8 is_cksum_gso_enabled = (apif->is_cksum_gso_enabled == 1) ? 1 : 0; + + if (tf->shared_queue) + clib_spinlock_lock (&tx_queue->lockp); + + frame_size = tx_queue->tx_req->tp_frame_size; + frame_num = tx_queue->tx_req->tp_frame_nr; + block_start = tx_queue->tx_ring[block]; + tx_frame = tx_queue->next_tx_frame; while (n_left) { u32 len; + vnet_virtio_net_hdr_t *vnet_hdr = 0; u32 offset = 0; - vlib_buffer_t *b0; + vlib_buffer_t *b0 = 0, *b0_first = 0; + u32 bi, bi_first; + + bi = bi_first = buffers[0]; n_left--; - u32 bi = buffers[0]; buffers++; - nextframe: - tph = (struct tpacket2_hdr *) (block_start + tx_frame * frame_size); + tph = (tpacket3_hdr_t *) (block_start + tx_frame * frame_size); if (PREDICT_FALSE (tph->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING))) { - tx_frame = (tx_frame + 1) % frame_num; frame_not_ready++; - /* check if we've exhausted the ring */ - if (PREDICT_FALSE (frame_not_ready + n_sent == frame_num)) - break; - goto nextframe; + goto next; } - do + b0_first = b0 = vlib_get_buffer (vm, bi); + + if (PREDICT_TRUE (is_cksum_gso_enabled)) + { + vnet_hdr = + (vnet_virtio_net_hdr_t *) ((u8 *) tph + TPACKET_ALIGN (sizeof ( + tpacket3_hdr_t))); + + clib_memset_u8 (vnet_hdr, 0, sizeof (vnet_virtio_net_hdr_t)); + offset = sizeof (vnet_virtio_net_hdr_t); + + if (b0->flags & VNET_BUFFER_F_GSO) + fill_gso_offload (b0, vnet_hdr); + else if (b0->flags & VNET_BUFFER_F_OFFLOAD) + fill_cksum_offload (b0, vnet_hdr); + } + + len = b0->current_length; + clib_memcpy_fast ((u8 *) tph + TPACKET_ALIGN (sizeof (tpacket3_hdr_t)) + + offset, + vlib_buffer_get_current (b0), len); + offset += len; + + while (b0->flags & VLIB_BUFFER_NEXT_PRESENT) { - b0 = vlib_get_buffer (vm, bi); + b0 = vlib_get_buffer (vm, b0->next_buffer); len = b0->current_length; clib_memcpy_fast ((u8 *) tph + - TPACKET_ALIGN (sizeof (struct tpacket2_hdr)) + - offset, vlib_buffer_get_current (b0), len); + TPACKET_ALIGN (sizeof (tpacket3_hdr_t)) + offset, + vlib_buffer_get_current (b0), len); offset += len; } - while ((bi = - (b0->flags & VLIB_BUFFER_NEXT_PRESENT) ? b0->next_buffer : 0)); tph->tp_len = tph->tp_snaplen = offset; tph->tp_status = TP_STATUS_SEND_REQUEST; n_sent++; + if (PREDICT_FALSE (b0_first->flags & VLIB_BUFFER_IS_TRACED)) + { + if (PREDICT_TRUE (is_cksum_gso_enabled)) + af_packet_tx_trace (vm, node, b0_first, bi_first, tph, vnet_hdr, + apif->hw_if_index, queue_id); + else + { + vnet_virtio_net_hdr_t vnet_hdr2 = {}; + af_packet_tx_trace (vm, node, b0_first, bi_first, tph, + &vnet_hdr2, apif->hw_if_index, queue_id); + } + } tx_frame = (tx_frame + 1) % frame_num; + next: /* check if we've exhausted the ring */ if (PREDICT_FALSE (frame_not_ready + n_sent == frame_num)) break; @@ -187,24 +418,27 @@ VNET_DEVICE_CLASS_TX_FN (af_packet_device_class) (vlib_main_t * vm, CLIB_MEMORY_BARRIER (); - apif->next_tx_frame = tx_frame; - if (PREDICT_TRUE (n_sent)) - if (PREDICT_FALSE (sendto (apif->fd, NULL, 0, MSG_DONTWAIT, NULL, 0) == - -1)) - { - /* Uh-oh, drop & move on, but count whether it was fatal or not. - * Note that we have no reliable way to properly determine the - * disposition of the packets we just enqueued for delivery. - */ - vlib_error_count (vm, node->node_index, - unix_error_is_fatal (errno) ? - AF_PACKET_TX_ERROR_TXRING_FATAL : - AF_PACKET_TX_ERROR_TXRING_EAGAIN, - n_sent); - } - - clib_spinlock_unlock_if_init (&apif->lockp); + { + tx_queue->next_tx_frame = tx_frame; + + if (PREDICT_FALSE ( + sendto (tx_queue->fd, NULL, 0, MSG_DONTWAIT, NULL, 0) == -1)) + { + /* Uh-oh, drop & move on, but count whether it was fatal or not. + * Note that we have no reliable way to properly determine the + * disposition of the packets we just enqueued for delivery. + */ + vlib_error_count (vm, node->node_index, + unix_error_is_fatal (errno) ? + AF_PACKET_TX_ERROR_TXRING_FATAL : + AF_PACKET_TX_ERROR_TXRING_EAGAIN, + n_sent); + } + } + + if (tf->shared_queue) + clib_spinlock_unlock (&tx_queue->lockp); if (PREDICT_FALSE (frame_not_ready)) vlib_error_count (vm, node->node_index, @@ -329,9 +563,18 @@ static clib_error_t *af_packet_set_mac_address_function af_packet_main_t *apm = &af_packet_main; af_packet_if_t *apif = pool_elt_at_index (apm->interfaces, hi->dev_instance); - int rv, fd = socket (AF_UNIX, SOCK_DGRAM, 0); + int rv, fd; struct ifreq ifr; + if (apif->mode == AF_PACKET_IF_MODE_IP) + { + vlib_log_warn (apm->log_class, "af_packet_%s interface is in IP mode", + apif->host_if_name); + return clib_error_return (0, + " MAC update failed, interface is in IP mode"); + } + + fd = socket (AF_UNIX, SOCK_DGRAM, 0); if (0 > fd) { vlib_log_warn (apm->log_class, "af_packet_%s could not open socket", @@ -373,7 +616,6 @@ error: return 0; /* no error */ } -/* *INDENT-OFF* */ VNET_DEVICE_CLASS (af_packet_device_class) = { .name = "af-packet", .format_device_name = format_af_packet_device_name, @@ -387,7 +629,6 @@ VNET_DEVICE_CLASS (af_packet_device_class) = { .subif_add_del_function = af_packet_subif_add_del_function, .mac_addr_change_function = af_packet_set_mac_address_function, }; -/* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON