#include <vlib/pci/pci.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/interface/rx_queue_funcs.h>
+#include <vnet/interface/tx_queue_funcs.h>
#include <avf/avf.h>
#define PCI_DEVICE_ID_INTEL_X710_VF 0x154c
#define PCI_DEVICE_ID_INTEL_X722_VF 0x37cd
-/* *INDENT-OFF* */
VLIB_REGISTER_LOG_CLASS (avf_log) = {
.class_name = "avf",
};
-/* *INDENT-ON* */
+
+VLIB_REGISTER_LOG_CLASS (avf_stats_log) = {
+ .class_name = "avf",
+ .subclass_name = "stats",
+};
avf_main_t avf_main;
void avf_delete_if (vlib_main_t * vm, avf_device_t * ad, int with_barrier);
if (ad->flags & AVF_DEVICE_F_ELOG)
clib_memcpy_fast (&dc, d, sizeof (avf_aq_desc_t));
- CLIB_MEMORY_BARRIER ();
ad->atq_next_slot = (ad->atq_next_slot + 1) % AVF_MBOX_LEN;
avf_reg_write (ad, AVF_ATQT, ad->atq_next_slot);
avf_reg_flush (ad);
d++;
}
- ad->n_rx_queues = clib_min (ad->num_queue_pairs, qid + 1);
return 0;
}
u8 bpi = vlib_buffer_pool_get_default_for_numa (vm,
ad->numa_node);
- if (qid >= ad->num_queue_pairs)
- {
- qid = qid % ad->num_queue_pairs;
- txq = vec_elt_at_index (ad->txqs, qid);
- if (txq->lock == 0)
- clib_spinlock_init (&txq->lock);
- ad->flags |= AVF_DEVICE_F_SHARED_TXQ_LOCK;
- return 0;
- }
-
vec_validate_aligned (ad->txqs, qid, CLIB_CACHE_LINE_BYTES);
txq = vec_elt_at_index (ad->txqs, qid);
txq->size = txq_size;
txq->next = 0;
+ clib_spinlock_init (&txq->lock);
/* Prepare a placeholder buffer(s) to maintain a 1-1 relationship between
* bufs and descs when a context descriptor is added in descs. Worst case
vec_validate_aligned (txq->tmp_descs, txq->size, CLIB_CACHE_LINE_BYTES);
vec_validate_aligned (txq->tmp_bufs, txq->size, CLIB_CACHE_LINE_BYTES);
- ad->n_tx_queues = clib_min (ad->num_queue_pairs, qid + 1);
return 0;
}
clib_error_t *
avf_op_config_rss_key (vlib_main_t * vm, avf_device_t * ad)
{
+ /* from DPDK i40e... */
+ static uint32_t rss_key_default[] = { 0x6b793944, 0x23504cb5, 0x5bea75b6,
+ 0x309f4f12, 0x3dc0a2b8, 0x024ddcdf,
+ 0x339b8ca0, 0x4c4af64a, 0x34fac605,
+ 0x55d85839, 0x3a58997d, 0x2ec938e1,
+ 0x66031581 };
int msg_len = sizeof (virtchnl_rss_key_t) + ad->rss_key_size - 1;
- int i;
u8 msg[msg_len];
virtchnl_rss_key_t *rk;
+ if (sizeof (rss_key_default) != ad->rss_key_size)
+ return clib_error_create ("unsupported RSS key size (expected %d, got %d)",
+ sizeof (rss_key_default), ad->rss_key_size);
+
clib_memset (msg, 0, msg_len);
rk = (virtchnl_rss_key_t *) msg;
rk->vsi_id = ad->vsi_id;
rk->key_len = ad->rss_key_size;
- u32 seed = random_default_seed ();
- for (i = 0; i < ad->rss_key_size; i++)
- rk->key[i] = (u8) random_u32 (&seed);
+ memcpy_s (rk->key, rk->key_len, rss_key_default, sizeof (rss_key_default));
avf_log_debug (ad, "config_rss_key: vsi_id %u rss_key_size %u key 0x%U",
rk->vsi_id, rk->key_len, format_hex_bytes_no_wrap, rk->key,
virtchnl_eth_stats_t * es)
{
virtchnl_queue_select_t qs = { 0 };
+ clib_error_t *err;
qs.vsi_id = ad->vsi_id;
- avf_log_debug (ad, "get_stats: vsi_id %u", ad->vsi_id);
+ err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_STATS, &qs,
+ sizeof (virtchnl_queue_select_t), es,
+ sizeof (virtchnl_eth_stats_t));
+
+ avf_stats_log_debug (ad, "get_stats: vsi_id %u\n %U", ad->vsi_id,
+ format_avf_eth_stats, es);
- return avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_STATS,
- &qs, sizeof (virtchnl_queue_select_t),
- es, sizeof (virtchnl_eth_stats_t));
+ return err;
}
clib_error_t *
virtchnl_version_info_t ver = { 0 };
virtchnl_vf_resource_t res = { 0 };
clib_error_t *error;
- vlib_thread_main_t *tm = vlib_get_thread_main ();
int i, wb_on_itr;
+ u16 rxq_num, txq_num;
avf_adminq_init (vm, ad);
- if ((error = avf_request_queues (vm, ad, clib_max (tm->n_vlib_mains,
- args->rxq_num))))
+ rxq_num = args->rxq_num ? args->rxq_num : 1;
+ txq_num = args->txq_num ? args->txq_num : vlib_get_n_threads ();
+
+ if ((error = avf_request_queues (vm, ad, clib_max (txq_num, rxq_num))))
{
/* we failed to get more queues, but still we want to proceed */
clib_error_free (error);
ad->vsi_id = res.vsi_res[0].vsi_id;
ad->cap_flags = res.vf_cap_flags;
ad->num_queue_pairs = res.num_queue_pairs;
+ ad->n_rx_queues = clib_min (rxq_num, res.num_queue_pairs);
+ ad->n_tx_queues = clib_min (txq_num, res.num_queue_pairs);
ad->max_vectors = res.max_vectors;
ad->max_mtu = res.max_mtu;
ad->rss_key_size = res.rss_key_size;
ad->rss_lut_size = res.rss_lut_size;
+ ad->n_rx_irqs = ad->max_vectors > ad->n_rx_queues ? ad->n_rx_queues : 1;
+
+ if (ad->max_vectors > ad->n_rx_queues)
+ ad->flags |= AVF_DEVICE_F_RX_INT;
+
wb_on_itr = (ad->cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) != 0;
clib_memcpy_fast (ad->hwaddr, res.vsi_res[0].default_mac_addr, 6);
+ if (args->rxq_num != 0 && ad->n_rx_queues != args->rxq_num)
+ return clib_error_return (0,
+ "Number of requested RX queues (%u) is "
+ "higher than mumber of available queues (%u)",
+ args->rxq_num, ad->num_queue_pairs);
+
+ if (args->txq_num != 0 && ad->n_tx_queues != args->txq_num)
+ return clib_error_return (0,
+ "Number of requested TX queues (%u) is "
+ "higher than mumber of available queues (%u)",
+ args->txq_num, ad->num_queue_pairs);
+
/*
* Disable VLAN stripping
*/
outer = vc.offloads.stripping_support.outer & mask;
inner = vc.offloads.stripping_support.inner & mask;
+ /* Check for ability to modify the VLAN setting */
+ outer =
+ vc.offloads.stripping_support.outer & VIRTCHNL_VLAN_TOGGLE ? outer : 0;
+ inner =
+ vc.offloads.stripping_support.inner & VIRTCHNL_VLAN_TOGGLE ? inner : 0;
+
if ((outer || inner) &&
(error = avf_op_disable_vlan_stripping_v2 (vm, ad, outer, inner)))
return error;
/*
* Init Queues
*/
- if (args->rxq_num == 0)
- {
- args->rxq_num = 1;
- }
- else if (args->rxq_num > ad->num_queue_pairs)
- {
- args->rxq_num = ad->num_queue_pairs;
- avf_log_warn (ad, "Requested more rx queues than queue pairs available."
- "Using %u rx queues.", args->rxq_num);
- }
-
- for (i = 0; i < args->rxq_num; i++)
+ for (i = 0; i < ad->n_rx_queues; i++)
if ((error = avf_rxq_init (vm, ad, i, args->rxq_size)))
return error;
- for (i = 0; i < tm->n_vlib_mains; i++)
+ for (i = 0; i < ad->n_tx_queues; i++)
if ((error = avf_txq_init (vm, ad, i, args->txq_size)))
return error;
- if (ad->max_vectors > ad->n_rx_queues)
- {
- ad->flags |= AVF_DEVICE_F_RX_INT;
- ad->n_rx_irqs = args->rxq_num;
- }
- else
- ad->n_rx_irqs = 1;
-
if ((ad->cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) &&
(error = avf_op_config_rss_lut (vm, ad)))
return error;
if (is_irq == 0)
avf_op_get_stats (vm, ad, &ad->eth_stats);
- /* *INDENT-OFF* */
vec_foreach (e, ad->events)
{
avf_log_debug (ad, "event: %s (%u) sev %d",
flags |= (VNET_HW_INTERFACE_FLAG_FULL_DUPLEX |
VNET_HW_INTERFACE_FLAG_LINK_UP);
vnet_hw_interface_set_flags (vnm, ad->hw_if_index, flags);
- vnet_hw_interface_set_link_speed (vnm, ad->hw_if_index,
- mbps * 1000);
+ vnet_hw_interface_set_link_speed (
+ vnm, ad->hw_if_index,
+ (mbps == UINT32_MAX) ? UINT32_MAX : mbps * 1000);
ad->link_speed = mbps;
}
else if (!link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) != 0)
}
}
}
- /* *INDENT-ON* */
vec_reset_length (ad->events);
return;
clib_error_t *
avf_op_program_flow (vlib_main_t *vm, avf_device_t *ad, int is_create,
- u8 *rule, u32 rule_len, u8 *program_status,
- u32 status_len)
+ enum virthnl_adv_ops vc_op, u8 *rule, u32 rule_len,
+ u8 *program_status, u32 status_len)
{
+ virtchnl_ops_t op;
+
avf_log_debug (ad, "avf_op_program_flow: vsi_id %u is_create %u", ad->vsi_id,
is_create);
- return avf_send_to_pf (vm, ad,
- is_create ? VIRTCHNL_OP_ADD_FDIR_FILTER :
- VIRTCHNL_OP_DEL_FDIR_FILTER,
- rule, rule_len, program_status, status_len);
+ switch (vc_op)
+ {
+ case VIRTCHNL_ADV_OP_ADD_FDIR_FILTER:
+ case VIRTCHNL_ADV_OP_DEL_FDIR_FILTER:
+ op =
+ is_create ? VIRTCHNL_OP_ADD_FDIR_FILTER : VIRTCHNL_OP_DEL_FDIR_FILTER;
+ break;
+ case VIRTCHNL_ADV_OP_ADD_RSS_CFG:
+ case VIRTCHNL_ADV_OP_DEL_RSS_CFG:
+ op = is_create ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
+ break;
+ default:
+ return clib_error_return (0, "invalid virtchnl opcode");
+ ;
+ }
+
+ return avf_send_to_pf (vm, ad, op, rule, rule_len, program_status,
+ status_len);
}
static void
else if (req->type == AVF_PROCESS_REQ_CONFIG_PROMISC_MDDE)
req->error = avf_op_config_promisc_mode (vm, ad, req->is_enable);
else if (req->type == AVF_PROCESS_REQ_PROGRAM_FLOW)
- req->error =
- avf_op_program_flow (vm, ad, req->is_add, req->rule, req->rule_len,
- req->program_status, req->status_len);
+ req->error = avf_op_program_flow (vm, ad, req->is_add, req->vc_op,
+ req->rule, req->rule_len,
+ req->program_status, req->status_len);
else
clib_panic ("BUG: unknown avf proceess request type");
/* create local list of device pointers as device pool may grow
* during suspend */
vec_reset_length (dev_pointers);
- /* *INDENT-OFF* */
pool_foreach_index (i, am->devices)
{
vec_add1 (dev_pointers, avf_get_device (i));
{
avf_process_one_device (vm, dev_pointers[i], irq);
};
- /* *INDENT-ON* */
last_run_duration = vlib_time_now (vm) - last_periodic_time;
}
return 0;
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (avf_process_node) = {
.function = avf_process,
.type = VLIB_NODE_TYPE_PROCESS,
.name = "avf-process",
};
-/* *INDENT-ON* */
static void
avf_irq_0_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h, u16 line)
if (ad->flags & AVF_DEVICE_F_ELOG)
{
- /* *INDENT-OFF* */
ELOG_TYPE_DECLARE (el) =
{
.format = "avf[%d] irq 0: icr0 0x%x",
.format_args = "i4i4",
};
- /* *INDENT-ON* */
struct
{
u32 dev_instance;
if (ad->flags & AVF_DEVICE_F_ELOG)
{
- /* *INDENT-OFF* */
ELOG_TYPE_DECLARE (el) =
{
.format = "avf[%d] irq %d: received",
.format_args = "i4i2",
};
- /* *INDENT-ON* */
struct
{
u32 dev_instance;
vlib_physmem_free (vm, ad->atq_bufs);
vlib_physmem_free (vm, ad->arq_bufs);
- /* *INDENT-OFF* */
vec_foreach_index (i, ad->rxqs)
{
avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
rxq->n_enqueued);
vec_free (rxq->bufs);
}
- /* *INDENT-ON* */
vec_free (ad->rxqs);
- /* *INDENT-OFF* */
vec_foreach_index (i, ad->txqs)
{
avf_txq_t *txq = vec_elt_at_index (ad->txqs, i);
clib_ring_free (txq->rs_slots);
vec_free (txq->tmp_bufs);
vec_free (txq->tmp_descs);
+ clib_spinlock_free (&txq->lock);
}
- /* *INDENT-ON* */
vec_free (ad->txqs);
vec_free (ad->name);
avf_create_if (vlib_main_t * vm, avf_create_if_args_t * args)
{
vnet_main_t *vnm = vnet_get_main ();
+ vnet_eth_interface_registration_t eir = {};
avf_main_t *am = &avf_main;
avf_device_t *ad, **adp;
vlib_pci_dev_handle_t h;
if (avf_validate_queue_size (args) != 0)
return;
- /* *INDENT-OFF* */
pool_foreach (adp, am->devices) {
if ((*adp)->pci_addr.as_u32 == args->addr.as_u32)
{
return;
}
}
- /* *INDENT-ON* */
pool_get (am->devices, adp);
adp[0] = ad = clib_mem_alloc_aligned (sizeof (avf_device_t),
ad->name = vec_dup (args->name);
if (args->enable_elog)
- ad->flags |= AVF_DEVICE_F_ELOG;
+ {
+ ad->flags |= AVF_DEVICE_F_ELOG;
+ avf_elog_init ();
+ }
if ((error = vlib_pci_device_open (vm, &args->addr, avf_pci_device_ids,
&h)))
goto error;
/* create interface */
- error = ethernet_register_interface (vnm, avf_device_class.index,
- ad->dev_instance, ad->hwaddr,
- &ad->hw_if_index, avf_flag_change);
-
- if (error)
- goto error;
+ eir.dev_class_index = avf_device_class.index;
+ eir.dev_instance = ad->dev_instance;
+ eir.address = ad->hwaddr;
+ eir.cb.flag_change = avf_flag_change;
+ ad->hw_if_index = vnet_eth_register_interface (vnm, &eir);
- /* Indicate ability to support L3 DMAC filtering and
- * initialize interface to L3 non-promisc mode */
- vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, ad->hw_if_index);
- hi->caps |= VNET_HW_INTERFACE_CAP_SUPPORTS_MAC_FILTER |
- VNET_HW_INTERFACE_CAP_SUPPORTS_L4_TX_CKSUM |
- VNET_HW_INTERFACE_CAP_SUPPORTS_TCP_GSO;
ethernet_set_flags (vnm, ad->hw_if_index,
ETHERNET_INTERFACE_FLAG_DEFAULT_L3);
vnet_sw_interface_t *sw = vnet_get_hw_sw_interface (vnm, ad->hw_if_index);
args->sw_if_index = ad->sw_if_index = sw->sw_if_index;
- vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, ad->hw_if_index);
- hw->caps |= VNET_HW_INTERFACE_CAP_SUPPORTS_INT_MODE;
vnet_hw_if_set_input_node (vnm, ad->hw_if_index, avf_input_node.index);
+ /* set hw interface caps */
+ vnet_hw_if_set_caps (vnm, ad->hw_if_index,
+ VNET_HW_IF_CAP_INT_MODE | VNET_HW_IF_CAP_MAC_FILTER |
+ VNET_HW_IF_CAP_TX_CKSUM | VNET_HW_IF_CAP_TCP_GSO);
+
for (i = 0; i < ad->n_rx_queues; i++)
{
u32 qi, fi;
}
ad->rxqs[i].queue_index = qi;
}
+
+ for (i = 0; i < ad->n_tx_queues; i++)
+ {
+ u32 qi = vnet_hw_if_register_tx_queue (vnm, ad->hw_if_index, i);
+ ad->txqs[i].queue_index = qi;
+ }
+
+ for (i = 0; i < vlib_get_n_threads (); i++)
+ {
+ u32 qi = ad->txqs[i % ad->n_tx_queues].queue_index;
+ vnet_hw_if_tx_queue_assign_thread (vnm, qi, i);
+ }
+
vnet_hw_if_update_runtime_data (vnm, ad->hw_if_index);
if (pool_elts (am->devices) == 1)
}
clib_error_t *
-avf_program_flow (u32 dev_instance, int is_add, u8 *rule, u32 rule_len,
- u8 *program_status, u32 status_len)
+avf_program_flow (u32 dev_instance, int is_add, enum virthnl_adv_ops vc_op,
+ u8 *rule, u32 rule_len, u8 *program_status, u32 status_len)
{
vlib_main_t *vm = vlib_get_main ();
avf_process_req_t req;
req.dev_instance = dev_instance;
req.type = AVF_PROCESS_REQ_PROGRAM_FLOW;
req.is_add = is_add;
+ req.vc_op = vc_op;
req.rule = rule;
req.rule_len = rule_len;
req.program_status = program_status;
return avf_process_request (vm, &req);
}
-/* *INDENT-OFF* */
VNET_DEVICE_CLASS (avf_device_class, ) = {
.name = "Adaptive Virtual Function (AVF) interface",
.clear_counters = avf_clear_hw_interface_counters,
.tx_function_error_strings = avf_tx_func_error_strings,
.flow_ops_function = avf_flow_ops_fn,
};
-/* *INDENT-ON* */
clib_error_t *
avf_init (vlib_main_t * vm)
return 0;
}
-/* *INDENT-OFF* */
-VLIB_INIT_FUNCTION (avf_init) =
-{
- .runs_after = VLIB_INITS ("pci_bus_init"),
-};
-/* *INDENT-OFF* */
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
+VLIB_INIT_FUNCTION (avf_init);