int socket_id;
/* Per adapter EAL service */
uint32_t service_id;
+ /* Adapter started flag */
+ uint8_t rxa_started;
} __rte_cache_aligned;
/* Per eth device */
nb_rx_queues = dev_info->dev->data->nb_rx_queues;
if (dev_info->rx_queue == NULL)
continue;
+ if (dev_info->internal_event_port)
+ continue;
for (q = 0; q < nb_rx_queues; q++) {
struct eth_rx_queue_info *queue_info =
&dev_info->rx_queue[q];
static inline void
fill_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,
- uint8_t dev_id,
+ uint16_t eth_dev_id,
uint16_t rx_queue_id,
struct rte_mbuf **mbufs,
uint16_t num)
{
uint32_t i;
struct eth_device_info *eth_device_info =
- &rx_adapter->eth_devices[dev_id];
+ &rx_adapter->eth_devices[eth_dev_id];
struct eth_rx_queue_info *eth_rx_queue_info =
ð_device_info->rx_queue[rx_queue_id];
* the hypervisor's switching layer where adjustments can be made to deal with
* it.
*/
-static inline uint32_t
+static inline void
eth_rx_poll(struct rte_event_eth_rx_adapter *rx_adapter)
{
uint32_t num_queue;
*/
if (buf->count >= BATCH_SIZE)
flush_event_buffer(rx_adapter);
- if (BATCH_SIZE > (ETH_EVENT_BUFFER_SIZE - buf->count))
- break;
+ if (BATCH_SIZE > (ETH_EVENT_BUFFER_SIZE - buf->count)) {
+ rx_adapter->wrr_pos = wrr_pos;
+ return;
+ }
stats->rx_poll_count++;
n = rte_eth_rx_burst(d, qid, mbufs, BATCH_SIZE);
if (nb_rx > max_nb_rx) {
rx_adapter->wrr_pos =
(wrr_pos + 1) % rx_adapter->wrr_len;
- return nb_rx;
+ break;
}
}
wrr_pos = 0;
}
- return nb_rx;
+ if (buf->count >= BATCH_SIZE)
+ flush_event_buffer(rx_adapter);
}
static int
event_eth_rx_adapter_service_func(void *args)
{
struct rte_event_eth_rx_adapter *rx_adapter = args;
- struct rte_eth_event_enqueue_buffer *buf;
- buf = &rx_adapter->event_enqueue_buffer;
if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
return 0;
- if (eth_rx_poll(rx_adapter) == 0 && buf->count)
- flush_event_buffer(rx_adapter);
+ if (!rx_adapter->rxa_started) {
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+ return 0;
+ }
+ eth_rx_poll(rx_adapter);
rte_spinlock_unlock(&rx_adapter->rx_lock);
return 0;
}
&rte_eth_devices[i]);
}
- if (use_service)
+ if (use_service) {
+ rte_spinlock_lock(&rx_adapter->rx_lock);
+ rx_adapter->rxa_started = start;
rte_service_runstate_set(rx_adapter->service_id, start);
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+ }
return 0;
}
rx_adapter->conf_arg = conf_arg;
strcpy(rx_adapter->mem_name, mem_name);
rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,
- rte_eth_dev_count() *
+ RTE_MAX_ETHPORTS *
sizeof(struct eth_device_info), 0,
socket_id);
rte_convert_rss_key((const uint32_t *)default_rss_key,
return -ENOMEM;
}
rte_spinlock_init(&rx_adapter->rx_lock);
- for (i = 0; i < rte_eth_dev_count(); i++)
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++)
rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
event_eth_rx_adapter[id] = rx_adapter;
&rte_eth_devices[eth_dev_id],
rx_queue_id, queue_conf);
if (ret == 0) {
+ dev_info->internal_event_port = 1;
update_queue_info(rx_adapter,
&rx_adapter->eth_devices[eth_dev_id],
rx_queue_id,
}
} else {
rte_spinlock_lock(&rx_adapter->rx_lock);
+ dev_info->internal_event_port = 0;
ret = init_service(rx_adapter, id);
if (ret == 0)
ret = add_rx_queue(rx_adapter, eth_dev_id, rx_queue_id,