*/
#define RSS_RETA_CONF_ARRAY_SIZE (ETH_RSS_RETA_SIZE_512/RTE_RETA_GROUP_SIZE)
+#define NETBE_REALLOC(loc, n) do { \
+ (loc) = rte_realloc((loc), sizeof(*(loc)) * (n), RTE_CACHE_LINE_SIZE); \
+ if ((loc) == NULL) { \
+ RTE_LOG(ERR, USER1, \
+ "%s: failed to reallocate memory\n", \
+ __func__); \
+ return -ENOMEM; \
+ } \
+} while (0)
+
static volatile int force_quit;
static struct netbe_cfg becfg;
lc = find_initilized_lcore(cfg, prt->lcore[j]);
if (lc == NULL) {
+ NETBE_REALLOC(cfg->cpu, cfg->cpu_num + 1);
lc = &cfg->cpu[cfg->cpu_num];
lc->id = prt->lcore[j];
cfg->cpu_num++;
}
+
+ NETBE_REALLOC(lc->prtq, lc->prtq_num + 1);
lc->prtq[lc->prtq_num].rxqid = j;
lc->prtq[lc->prtq_num].txqid = j;
lc->prtq[lc->prtq_num].port = *prt;
-
lc->prtq_num++;
}
}
uint32_t i, n, sid, j;
struct netbe_port *prt;
- n = RTE_MIN(RTE_DIM(cfg->prt), (uint32_t)argc);
+ n = (uint32_t)argc;
rc = 0;
for (i = 0; i != n; i++) {
+ NETBE_REALLOC(cfg->prt, cfg->prt_num + 1);
rc = parse_netbe_arg(cfg->prt + i, argv[i]);
if (rc != 0) {
RTE_LOG(ERR, USER1,
__func__, argv[i], rc);
return rc;
}
+ cfg->prt_num++;
}
- cfg->prt_num = i;
/* calculate number of queues per lcore. */
rc = calculate_nb_prtq(cfg);
rte_ip_frag_table_destroy(lc->ftbl);
rte_lpm_free(lc->lpm4);
rte_lpm6_free(lc->lpm6);
+ rte_free(lc->prtq[prtqid].port.lcore);
+ lc->prtq[prtqid].port.nb_lcore = 0;
+ rte_free(lc->prtq);
+ lc->prtq_num = 0;
return rc;
}
}
rte_ip_frag_table_destroy(cfg->cpu[i].ftbl);
rte_lpm_free(cfg->cpu[i].lpm4);
rte_lpm6_free(cfg->cpu[i].lpm6);
+
+ rte_free(cfg->cpu[i].prtq);
+ cfg->cpu[i].prtq_num = 0;
}
- memset(cfg->cpu, 0, sizeof(cfg->cpu));
+ rte_free(cfg->cpu);
cfg->cpu_num = 0;
+ for (i = 0; i != cfg->prt_num; i++) {
+ rte_free(cfg->prt[i].lcore);
+ cfg->prt[i].nb_lcore = 0;
+ }
+ rte_free(cfg->prt);
+ cfg->prt_num = 0;
}
static int
struct netbe_port {
uint32_t id;
uint32_t nb_lcore;
- uint32_t lcore[RTE_MAX_LCORE];
+ uint32_t *lcore;
uint32_t mtu;
uint32_t rx_offload;
uint32_t tx_offload;
uint32_t prtq_num;
uint32_t dst4_num;
uint32_t dst6_num;
- struct netbe_dev prtq[RTE_MAX_ETHPORTS * RTE_MAX_LCORE];
+ struct netbe_dev *prtq;
struct tle_udp_dest dst4[LCORE_MAX_DST];
struct tle_udp_dest dst6[LCORE_MAX_DST];
struct rte_ip_frag_death_row death_row;
uint32_t promisc;
uint32_t prt_num;
uint32_t cpu_num;
- struct netbe_port prt[RTE_MAX_ETHPORTS];
- struct netbe_lcore cpu[RTE_MAX_LCORE];
+ struct netbe_port *prt;
+ struct netbe_lcore *cpu;
};
/*
parse_netbe_arg(struct netbe_port *prt, const char *arg)
{
int32_t rc;
- uint32_t i, j;
+ uint32_t i, j, nc;
static const char *keys_man[] = {
"port",
return rc;
prt->id = val[0].u64;
+
+ for (i = 0, nc = 0; i < RTE_MAX_LCORE; i++)
+ nc += CPU_ISSET(i, &val[1].cpuset);
+ prt->lcore = rte_zmalloc(NULL, nc * sizeof(prt->lcore[0]),
+ RTE_CACHE_LINE_SIZE);
+ prt->nb_lcore = nc;
+
for (i = 0, j = 0; i < RTE_MAX_LCORE; i++)
if (CPU_ISSET(i, &val[1].cpuset))
prt->lcore[j++] = i;
- prt->nb_lcore = j;
+
prt->mtu = val[2].u64;
prt->rx_offload = val[3].u64;
prt->tx_offload = val[4].u64;