X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fixge%2Fixge.c;h=c27042df8b20049816f4f9a569c53c6ce880fb5a;hb=cfc7a107e6cb8be6e7c53a08e23a146c431c8e90;hp=4eebc457eaaa2dd9a4a1c21fa6a86b293ec0e90c;hpb=374e2c5fc30a5bfabfd2eb6c2d3ca5797402af16;p=vpp.git diff --git a/src/plugins/ixge/ixge.c b/src/plugins/ixge/ixge.c index 4eebc457eaa..c27042df8b2 100644 --- a/src/plugins/ixge/ixge.c +++ b/src/plugins/ixge/ixge.c @@ -20,7 +20,7 @@ * Please use supported DPDK driver instead. */ -#if __x86_64__ +#if __x86_64__ || __i386__ || __aarch64__ #include #ifndef CLIB_HAVE_VEC128 @@ -359,7 +359,7 @@ ixge_sfp_phy_init (ixge_device_t * xd) vlib_i2c_read_eeprom (ib, 0x50, 0, 128, (u8 *) & xd->sfp_eeprom); if (vlib_i2c_bus_timed_out (ib) || !sfp_eeprom_is_valid (&xd->sfp_eeprom)) - xd->sfp_eeprom.id = SFP_ID_unknown; + xd->sfp_eeprom.id = SFP_ID_UNKNOWN; else { /* FIXME 5 => SR/LR eeprom ID. */ @@ -448,7 +448,7 @@ format_ixge_rx_from_hw_descriptor (u8 * s, va_list * va) va_arg (*va, ixge_rx_from_hw_descriptor_t *); u32 s0 = d->status[0], s2 = d->status[2]; u32 is_ip4, is_ip6, is_ip, is_tcp, is_udp; - uword indent = format_get_indent (s); + u32 indent = format_get_indent (s); s = format (s, "%s-owned", (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE) ? "sw" : @@ -517,7 +517,7 @@ format_ixge_tx_descriptor (u8 * s, va_list * va) { ixge_tx_descriptor_t *d = va_arg (*va, ixge_tx_descriptor_t *); u32 s0 = d->status0, s1 = d->status1; - uword indent = format_get_indent (s); + u32 indent = format_get_indent (s); u32 v; s = format (s, "buffer 0x%Lx, %d packet bytes, %d bytes this buffer", @@ -582,7 +582,7 @@ format_ixge_rx_dma_trace (u8 * s, va_list * va) ixge_main_t *xm = &ixge_main; ixge_device_t *xd = vec_elt_at_index (xm->devices, t->device_index); format_function_t *f; - uword indent = format_get_indent (s); + u32 indent = format_get_indent (s); { vnet_sw_interface_t *sw = @@ -601,7 +601,7 @@ format_ixge_rx_dma_trace (u8 * s, va_list * va) s = format (s, "\n%Ubuffer 0x%x: %U", format_white_space, indent, - t->buffer_index, format_vlib_buffer, &t->buffer); + t->buffer_index, format_vnet_buffer, &t->buffer); s = format (s, "\n%U", format_white_space, indent); @@ -656,11 +656,11 @@ ixge_rx_next_and_error_from_status_x1 (ixge_device_t * xd, f0 = ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED)) - ? IP_BUFFER_L4_CHECKSUM_COMPUTED : 0); + ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0); f0 |= ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR)) - ? 0 : IP_BUFFER_L4_CHECKSUM_CORRECT); + ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT); *error0 = e0; *next0 = n0; @@ -715,17 +715,17 @@ ixge_rx_next_and_error_from_status_x2 (ixge_device_t * xd, f0 = ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED)) - ? IP_BUFFER_L4_CHECKSUM_COMPUTED : 0); + ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0); f1 = ((s12 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED | IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED)) - ? IP_BUFFER_L4_CHECKSUM_COMPUTED : 0); + ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0); f0 |= ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR)) - ? 0 : IP_BUFFER_L4_CHECKSUM_CORRECT); + ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT); f1 |= ((s12 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR | IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR)) - ? 0 : IP_BUFFER_L4_CHECKSUM_CORRECT); + ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT); *flags0 = f0; *flags1 = f1; @@ -869,7 +869,7 @@ format_ixge_tx_dma_trace (u8 * s, va_list * va) ixge_main_t *xm = &ixge_main; ixge_device_t *xd = vec_elt_at_index (xm->devices, t->device_index); format_function_t *f; - uword indent = format_get_indent (s); + u32 indent = format_get_indent (s); { vnet_sw_interface_t *sw = @@ -885,7 +885,7 @@ format_ixge_tx_dma_trace (u8 * s, va_list * va) s = format (s, "\n%Ubuffer 0x%x: %U", format_white_space, indent, - t->buffer_index, format_vlib_buffer, &t->buffer); + t->buffer_index, format_vnet_buffer, &t->buffer); s = format (s, "\n%U", format_white_space, indent); @@ -1097,10 +1097,8 @@ ixge_tx_no_wrap (ixge_main_t * xm, ASSERT (ixge_tx_descriptor_matches_template (xm, d + 0)); ASSERT (ixge_tx_descriptor_matches_template (xm, d + 1)); - d[0].buffer_address = - vlib_get_buffer_data_physical_address (vm, bi0) + b0->current_data; - d[1].buffer_address = - vlib_get_buffer_data_physical_address (vm, bi1) + b1->current_data; + d[0].buffer_address = vlib_buffer_get_pa (vm, b0); + d[1].buffer_address = vlib_buffer_get_pa (vm, b1); d[0].n_bytes_this_buffer = len0; d[1].n_bytes_this_buffer = len1; @@ -1153,9 +1151,7 @@ ixge_tx_no_wrap (ixge_main_t * xm, ASSERT (ixge_tx_descriptor_matches_template (xm, d + 0)); - d[0].buffer_address = - vlib_get_buffer_data_physical_address (vm, bi0) + b0->current_data; - + d[0].buffer_address = vlib_buffer_get_pa (vm, b0); d[0].n_bytes_this_buffer = len0; d[0].status0 = @@ -1395,9 +1391,8 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm, vec_resize (xm->rx_buffers_to_add, n_to_alloc); _vec_len (xm->rx_buffers_to_add) = l; - n_allocated = vlib_buffer_alloc_from_free_list - (vm, xm->rx_buffers_to_add + l, n_to_alloc, - xm->vlib_buffer_free_list_index); + n_allocated = + vlib_buffer_alloc (vm, xm->rx_buffers_to_add + l, n_to_alloc); _vec_len (xm->rx_buffers_to_add) += n_allocated; /* Handle transient allocation failure */ @@ -1426,6 +1421,7 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm, while (n_descriptors_left >= 4 && n_left_to_next >= 2) { vlib_buffer_t *b0, *b1; + vlib_buffer_t *f0, *f1; u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0; u32 bi1, fi1, len1, l3_offset1, s21, s01, flags1; u8 is_eop0, error0, next0; @@ -1462,14 +1458,12 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm, to_rx += 2; to_add -= 2; - ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == - vlib_buffer_is_known (vm, bi0)); - ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == - vlib_buffer_is_known (vm, bi1)); - ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == - vlib_buffer_is_known (vm, fi0)); - ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == - vlib_buffer_is_known (vm, fi1)); +#if 0 + ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (bi0)); + ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (bi1)); + ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (fi0)); + ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (fi1)); +#endif b0 = vlib_get_buffer (vm, bi0); b1 = vlib_get_buffer (vm, bi1); @@ -1513,10 +1507,10 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm, n_packets += is_eop0 + is_eop1; /* Give new buffers to hardware. */ - d0.rx_to_hw.tail_address = - vlib_get_buffer_data_physical_address (vm, fi0); - d1.rx_to_hw.tail_address = - vlib_get_buffer_data_physical_address (vm, fi1); + f0 = vlib_get_buffer (vm, fi0); + f1 = vlib_get_buffer (vm, fi1); + d0.rx_to_hw.tail_address = vlib_buffer_get_pa (vm, f0); + d1.rx_to_hw.tail_address = vlib_buffer_get_pa (vm, f1); d0.rx_to_hw.head_address = d[0].rx_to_hw.tail_address; d1.rx_to_hw.head_address = d[1].rx_to_hw.tail_address; d[0].as_u32x4 = d0.as_u32x4; @@ -1658,6 +1652,7 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm, while (n_descriptors_left > 0 && n_left_to_next > 0) { vlib_buffer_t *b0; + vlib_buffer_t *f0; u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0; u8 is_eop0, error0, next0; ixge_descriptor_t d0; @@ -1678,10 +1673,10 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm, to_rx += 1; to_add -= 1; - ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == - vlib_buffer_is_known (vm, bi0)); - ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == - vlib_buffer_is_known (vm, fi0)); +#if 0 + ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (bi0)); + ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (fi0)); +#endif b0 = vlib_get_buffer (vm, bi0); @@ -1711,8 +1706,8 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm, n_packets += is_eop0; /* Give new buffer to hardware. */ - d0.rx_to_hw.tail_address = - vlib_get_buffer_data_physical_address (vm, fi0); + f0 = vlib_get_buffer (vm, fi0); + d0.rx_to_hw.tail_address = vlib_buffer_get_pa (vm, f0); d0.rx_to_hw.head_address = d0.rx_to_hw.tail_address; d[0].as_u32x4 = d0.as_u32x4; @@ -1887,7 +1882,7 @@ done: vlib_increment_combined_counter (vnet_main. interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - 0 /* cpu_index */ , + 0 /* thread_index */ , xd->vlib_sw_if_index, n_packets, dq->rx.n_bytes); @@ -2243,18 +2238,18 @@ VLIB_REGISTER_NODE (ixge_input_node, static) = { }, }; -VLIB_NODE_FUNCTION_MULTIARCH_CLONE (ixge_input) -CLIB_MULTIARCH_SELECT_FN (ixge_input) /* *INDENT-ON* */ static u8 * format_ixge_device_name (u8 * s, va_list * args) { + vlib_main_t *vm = vlib_get_main (); u32 i = va_arg (*args, u32); ixge_main_t *xm = &ixge_main; ixge_device_t *xd = vec_elt_at_index (xm->devices, i); - return format (s, "TenGigabitEthernet%U", - format_vlib_pci_handle, &xd->pci_device.bus_address); + vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, xd->pci_dev_handle); + return format (s, "TenGigabitEthernet%x/%x/%x/%x", + addr->domain, addr->bus, addr->slot, addr->function); } #define IXGE_COUNTER_IS_64_BIT (1 << 0) @@ -2341,10 +2336,11 @@ format_ixge_device (u8 * s, va_list * args) { u32 dev_instance = va_arg (*args, u32); CLIB_UNUSED (int verbose) = va_arg (*args, int); + vlib_main_t *vm = vlib_get_main (); ixge_main_t *xm = &ixge_main; ixge_device_t *xd = vec_elt_at_index (xm->devices, dev_instance); ixge_phy_t *phy = xd->phys + xd->phy_index; - uword indent = format_get_indent (s); + u32 indent = format_get_indent (s); ixge_update_counters (xd); xd->link_status_at_last_link_change = xd->regs->xge_mac.link_status; @@ -2355,14 +2351,18 @@ format_ixge_device (u8 * s, va_list * args) { - s = format (s, "\n%UPCIe %U", format_white_space, indent + 2, - format_vlib_pci_link_speed, &xd->pci_device); + vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, xd->pci_dev_handle); + vlib_pci_device_info_t *d = vlib_pci_get_device_info (vm, addr, 0); + + if (d) + s = format (s, "\n%UPCIe %U", format_white_space, indent + 2, + format_vlib_pci_link_speed, d); } s = format (s, "\n%U", format_white_space, indent + 2); if (phy->mdio_address != ~0) s = format (s, "PHY address %d, id 0x%x", phy->mdio_address, phy->id); - else if (xd->sfp_eeprom.id == SFP_ID_sfp) + else if (xd->sfp_eeprom.id == SFP_ID_SFP) s = format (s, "SFP %U", format_sfp_eeprom, &xd->sfp_eeprom); else s = format (s, "PHY not found"); @@ -2454,7 +2454,6 @@ VNET_DEVICE_CLASS (ixge_device_class) = { .clear_counters = ixge_clear_hw_interface_counters, .admin_up_down_function = ixge_interface_admin_up_down, .rx_redirect_to_node = ixge_set_interface_next_node, - .flatten_output_chains = 1, }; /* *INDENT-ON* */ @@ -2478,13 +2477,6 @@ ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index) if (!xm->n_bytes_in_rx_buffer) xm->n_bytes_in_rx_buffer = IXGE_N_BYTES_IN_RX_BUFFER; xm->n_bytes_in_rx_buffer = round_pow2 (xm->n_bytes_in_rx_buffer, 1024); - if (!xm->vlib_buffer_free_list_index) - { - xm->vlib_buffer_free_list_index = - vlib_buffer_get_or_create_free_list (vm, xm->n_bytes_in_rx_buffer, - "ixge rx"); - ASSERT (xm->vlib_buffer_free_list_index != 0); - } if (!xm->n_descriptors[rt]) xm->n_descriptors[rt] = 4 * VLIB_FRAME_SIZE; @@ -2494,32 +2486,28 @@ ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index) round_pow2 (xm->n_descriptors[rt], xm->n_descriptors_per_cache_line); dq->head_index = dq->tail_index = 0; - dq->descriptors = vlib_physmem_alloc_aligned (vm, &error, - dq->n_descriptors * + dq->descriptors = vlib_physmem_alloc_aligned (vm, dq->n_descriptors * sizeof (dq->descriptors[0]), 128 /* per chip spec */ ); - if (error) - return error; + if (!dq->descriptors) + return vlib_physmem_last_error (vm); - memset (dq->descriptors, 0, - dq->n_descriptors * sizeof (dq->descriptors[0])); + clib_memset (dq->descriptors, 0, + dq->n_descriptors * sizeof (dq->descriptors[0])); vec_resize (dq->descriptor_buffer_indices, dq->n_descriptors); if (rt == VLIB_RX) { u32 n_alloc, i; - n_alloc = vlib_buffer_alloc_from_free_list - (vm, dq->descriptor_buffer_indices, - vec_len (dq->descriptor_buffer_indices), - xm->vlib_buffer_free_list_index); + n_alloc = vlib_buffer_alloc (vm, dq->descriptor_buffer_indices, + vec_len (dq->descriptor_buffer_indices)); ASSERT (n_alloc == vec_len (dq->descriptor_buffer_indices)); for (i = 0; i < n_alloc; i++) { - vlib_buffer_t *b = - vlib_get_buffer (vm, dq->descriptor_buffer_indices[i]); dq->descriptors[i].rx_to_hw.tail_address = - vlib_physmem_virtual_to_physical (vm, b->data); + vlib_buffer_get_pa + (vm, vlib_get_buffer (vm, dq->descriptor_buffer_indices[i])); } } else @@ -2527,7 +2515,9 @@ ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index) u32 i; dq->tx.head_index_write_back = - vlib_physmem_alloc (vm, &error, CLIB_CACHE_LINE_BYTES); + vlib_physmem_alloc (vm, CLIB_CACHE_LINE_BYTES); + if (!dq->tx.head_index_write_back) + return vlib_physmem_last_error (vm); for (i = 0; i < dq->n_descriptors; i++) dq->descriptors[i].tx = xm->tx_descriptor_template; @@ -2539,7 +2529,7 @@ ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index) ixge_dma_regs_t *dr = get_dma_regs (xd, rt, queue_index); u64 a; - a = vlib_physmem_virtual_to_physical (vm, dq->descriptors); + a = vlib_physmem_get_pa (vm, dq->descriptors); dr->descriptor_address[0] = a & 0xFFFFFFFF; dr->descriptor_address[1] = a >> (u64) 32; dr->n_descriptor_bytes = dq->n_descriptors * sizeof (dq->descriptors[0]); @@ -2564,8 +2554,7 @@ ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index) /* Make sure its initialized before hardware can get to it. */ dq->tx.head_index_write_back[0] = dq->head_index; - a = - vlib_physmem_virtual_to_physical (vm, dq->tx.head_index_write_back); + a = vlib_physmem_get_pa (vm, dq->tx.head_index_write_back); dr->tx.head_index_write_back_address[0] = /* enable bit */ 1 | a; dr->tx.head_index_write_back_address[1] = (u64) a >> (u64) 32; } @@ -2727,7 +2716,7 @@ ixge_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f) vec_foreach (xd, xm->devices) { ixge_update_counters (xd); - memset (xd->counters, 0, sizeof (xd->counters)); + clib_memset (xd->counters, 0, sizeof (xd->counters)); } timeout = 30.0; @@ -2799,13 +2788,12 @@ clib_error_t * ixge_init (vlib_main_t * vm) { ixge_main_t *xm = &ixge_main; - clib_error_t *error; xm->vlib_main = vm; - memset (&xm->tx_descriptor_template, 0, - sizeof (xm->tx_descriptor_template)); - memset (&xm->tx_descriptor_template_mask, 0, - sizeof (xm->tx_descriptor_template_mask)); + clib_memset (&xm->tx_descriptor_template, 0, + sizeof (xm->tx_descriptor_template)); + clib_memset (&xm->tx_descriptor_template_mask, 0, + sizeof (xm->tx_descriptor_template_mask)); xm->tx_descriptor_template.status0 = (IXGE_TX_DESCRIPTOR_STATUS0_ADVANCED | IXGE_TX_DESCRIPTOR_STATUS0_IS_ADVANCED | @@ -2818,20 +2806,21 @@ ixge_init (vlib_main_t * vm) | IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS); xm->tx_descriptor_template_mask.status1 &= ~(IXGE_TX_DESCRIPTOR_STATUS1_DONE); - - error = vlib_call_init_function (vm, pci_bus_init); - - return error; + return 0; } -VLIB_INIT_FUNCTION (ixge_init); +/* *INDENT-OFF* */ +VLIB_INIT_FUNCTION (ixge_init) = +{ + .runs_before = VLIB_INITS("pci_bus_init"), +}; +/* *INDENT-ON* */ static void -ixge_pci_intr_handler (vlib_pci_device_t * dev) +ixge_pci_intr_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h) { - ixge_main_t *xm = &ixge_main; - vlib_main_t *vm = xm->vlib_main; + uword private_data = vlib_pci_get_private_data (vm, h); vlib_node_set_interrupt_pending (vm, ixge_input_node.index); @@ -2839,23 +2828,21 @@ ixge_pci_intr_handler (vlib_pci_device_t * dev) { vlib_node_runtime_t *rt = vlib_node_get_runtime (vm, ixge_input_node.index); - rt->runtime_data[0] |= 1 << dev->private_data; + rt->runtime_data[0] |= 1 << private_data; } } static clib_error_t * -ixge_pci_init (vlib_main_t * vm, vlib_pci_device_t * dev) +ixge_pci_init (vlib_main_t * vm, vlib_pci_dev_handle_t h) { ixge_main_t *xm = &ixge_main; - clib_error_t *error; + clib_error_t *error = 0; void *r; ixge_device_t *xd; + vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, h); + vlib_pci_device_info_t *d = vlib_pci_get_device_info (vm, addr, 0); - /* Device found: make sure we have dma memory. */ - if (unix_physmem_is_fake (vm)) - return clib_error_return (0, "no physical memory available"); - - error = vlib_pci_map_resource (dev, 0, &r); + error = vlib_pci_map_region (vm, h, 0, &r); if (error) return error; @@ -2863,16 +2850,17 @@ ixge_pci_init (vlib_main_t * vm, vlib_pci_device_t * dev) if (vec_len (xm->devices) == 1) { - ixge_input_node.function = ixge_input_multiarch_select (); + ixge_input_node.function = ixge_input; } - xd->pci_device = dev[0]; - xd->device_id = xd->pci_device.config0.header.device_id; + xd->pci_dev_handle = h; + xd->device_id = d->device_id; xd->regs = r; xd->device_index = xd - xm->devices; - xd->pci_function = dev->bus_address.function; + xd->pci_function = addr->function; xd->per_interface_next_index = ~0; + vlib_pci_set_private_data (vm, h, xd->device_index); /* Chip found so enable node. */ { @@ -2881,7 +2869,7 @@ ixge_pci_init (vlib_main_t * vm, vlib_pci_device_t * dev) ? VLIB_NODE_STATE_POLLING : VLIB_NODE_STATE_INTERRUPT)); - dev->private_data = xd->device_index; + //dev->private_data = xd->device_index; } if (vec_len (xm->devices) == 1) @@ -2890,12 +2878,12 @@ ixge_pci_init (vlib_main_t * vm, vlib_pci_device_t * dev) xm->process_node_index = ixge_process_node.index; } - error = vlib_pci_bus_master_enable (dev); + error = vlib_pci_bus_master_enable (vm, h); if (error) return error; - return vlib_pci_intr_enable (dev); + return vlib_pci_intr_enable (vm, h); } /* *INDENT-OFF* */ @@ -2929,15 +2917,17 @@ ixge_set_next_node (ixge_rx_next_t next, char *name) break; } } -#endif /* *INDENT-OFF* */ VLIB_PLUGIN_REGISTER () = { .version = VPP_BUILD_VER, .default_disabled = 1, + .description = "Intel 82599 Family Native Driver (experimental)", }; +#endif /* *INDENT-ON* */ + /* * fd.io coding-style-patch-verification: ON *