Code Review
/
deb_dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
review
|
tree
raw
|
inline
| side by side
Imported Upstream version 16.11
[deb_dpdk.git]
/
lib
/
librte_kni
/
rte_kni.c
diff --git
a/lib/librte_kni/rte_kni.c
b/lib/librte_kni/rte_kni.c
index
ea9baf4
..
a80cefd
100644
(file)
--- a/
lib/librte_kni/rte_kni.c
+++ b/
lib/librte_kni/rte_kni.c
@@
-210,14
+210,18
@@
rte_kni_init(unsigned int max_kni_ifaces)
if (max_kni_ifaces == 0) {
RTE_LOG(ERR, KNI, "Invalid number of max_kni_ifaces %d\n",
max_kni_ifaces);
if (max_kni_ifaces == 0) {
RTE_LOG(ERR, KNI, "Invalid number of max_kni_ifaces %d\n",
max_kni_ifaces);
- rte_panic("Unable to initialize KNI\n");
+ RTE_LOG(ERR, KNI, "Unable to initialize KNI\n");
+ return;
}
/* Check FD and open */
if (kni_fd < 0) {
kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
}
/* Check FD and open */
if (kni_fd < 0) {
kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
- if (kni_fd < 0)
- rte_panic("Can not open /dev/%s\n", KNI_DEVICE);
+ if (kni_fd < 0) {
+ RTE_LOG(ERR, KNI,
+ "Can not open /dev/%s\n", KNI_DEVICE);
+ return;
+ }
}
/* Allocate slot objects */
}
/* Allocate slot objects */
@@
-307,8
+311,8
@@
rte_kni_init(unsigned int max_kni_ifaces)
return;
kni_fail:
return;
kni_fail:
- rte_panic("Unable to allocate memory for max_kni_ifaces:%d. Increase the amount of hugepages memory\n",
-
max_kni_ifaces);
+ RTE_LOG(ERR, KNI, "Unable to allocate memory for max_kni_ifaces:%d."
+
"Increase the amount of hugepages memory\n",
max_kni_ifaces);
}
}
@@
-321,7
+325,6
@@
rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
struct rte_kni_device_info dev_info;
struct rte_kni *ctx;
char intf_name[RTE_KNI_NAMESIZE];
struct rte_kni_device_info dev_info;
struct rte_kni *ctx;
char intf_name[RTE_KNI_NAMESIZE];
- char mz_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz;
struct rte_kni_memzone_slot *slot = NULL;
const struct rte_memzone *mz;
struct rte_kni_memzone_slot *slot = NULL;
@@
-413,14
+416,6
@@
rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
dev_info.sync_va = mz->addr;
dev_info.sync_phys = mz->phys_addr;
dev_info.sync_va = mz->addr;
dev_info.sync_phys = mz->phys_addr;
-
- /* MBUF mempool */
- snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_OBJ_NAME,
- pktmbuf_pool->name);
- mz = rte_memzone_lookup(mz_name);
- KNI_MEM_CHECK(mz == NULL);
- dev_info.mbuf_va = mz->addr;
- dev_info.mbuf_phys = mz->phys_addr;
ctx->pktmbuf_pool = pktmbuf_pool;
ctx->group_id = conf->group_id;
ctx->slot_id = slot->id;
ctx->pktmbuf_pool = pktmbuf_pool;
ctx->group_id = conf->group_id;
ctx->slot_id = slot->id;
@@
-456,6
+451,20
@@
kni_free_fifo(struct rte_kni_fifo *fifo)
} while (ret);
}
} while (ret);
}
+static void
+kni_free_fifo_phy(struct rte_kni_fifo *fifo)
+{
+ void *mbuf_phys;
+ int ret;
+
+ do {
+ ret = kni_fifo_get(fifo, &mbuf_phys, 1);
+ /*
+ * TODO: free mbufs
+ */
+ } while (ret);
+}
+
int
rte_kni_release(struct rte_kni *kni)
{
int
rte_kni_release(struct rte_kni *kni)
{
@@
-473,8
+482,8
@@
rte_kni_release(struct rte_kni *kni)
/* mbufs in all fifo should be released, except request/response */
kni_free_fifo(kni->tx_q);
/* mbufs in all fifo should be released, except request/response */
kni_free_fifo(kni->tx_q);
- kni_free_fifo(kni->rx_q);
- kni_free_fifo(kni->alloc_q);
+ kni_free_fifo
_phy
(kni->rx_q);
+ kni_free_fifo
_phy
(kni->alloc_q);
kni_free_fifo(kni->free_q);
slot_id = kni->slot_id;
kni_free_fifo(kni->free_q);
slot_id = kni->slot_id;
@@
-484,8
+493,9
@@
rte_kni_release(struct rte_kni *kni)
/* Release memzone */
if (slot_id > kni_memzone_pool.max_ifaces) {
/* Release memzone */
if (slot_id > kni_memzone_pool.max_ifaces) {
-
rte_panic(
"KNI pool: corrupted slot ID: %d, max: %d\n",
+
RTE_LOG(ERR, KNI,
"KNI pool: corrupted slot ID: %d, max: %d\n",
slot_id, kni_memzone_pool.max_ifaces);
slot_id, kni_memzone_pool.max_ifaces);
+ return -1;
}
kni_memzone_pool_release(&kni_memzone_pool.slots[slot_id]);
}
kni_memzone_pool_release(&kni_memzone_pool.slots[slot_id]);
@@
-507,7
+517,8
@@
rte_kni_handle_request(struct rte_kni *kni)
return 0; /* It is OK of can not getting the request mbuf */
if (req != kni->sync_addr) {
return 0; /* It is OK of can not getting the request mbuf */
if (req != kni->sync_addr) {
- rte_panic("Wrong req pointer %p\n", req);
+ RTE_LOG(ERR, KNI, "Wrong req pointer %p\n", req);
+ return -1;
}
/* Analyze the request and call the relevant actions for it */
}
/* Analyze the request and call the relevant actions for it */
@@
-538,10
+549,25
@@
rte_kni_handle_request(struct rte_kni *kni)
return 0;
}
return 0;
}
+static void *
+va2pa(struct rte_mbuf *m)
+{
+ return (void *)((unsigned long)m -
+ ((unsigned long)m->buf_addr -
+ (unsigned long)m->buf_physaddr));
+}
+
unsigned
rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num)
{
unsigned
rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num)
{
- unsigned ret = kni_fifo_put(kni->rx_q, (void **)mbufs, num);
+ void *phy_mbufs[num];
+ unsigned int ret;
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ phy_mbufs[i] = va2pa(mbufs[i]);
+
+ ret = kni_fifo_put(kni->rx_q, phy_mbufs, num);
/* Get mbufs from free_q and then free them */
kni_free_mbufs(kni);
/* Get mbufs from free_q and then free them */
kni_free_mbufs(kni);
@@
-579,6
+605,7
@@
kni_allocate_mbufs(struct rte_kni *kni)
{
int i, ret;
struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
{
int i, ret;
struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
+ void *phys[MAX_MBUF_BURST_NUM];
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) !=
offsetof(struct rte_kni_mbuf, pool));
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) !=
offsetof(struct rte_kni_mbuf, pool));
@@
-608,13
+635,14
@@
kni_allocate_mbufs(struct rte_kni *kni)
RTE_LOG(ERR, KNI, "Out of memory\n");
break;
}
RTE_LOG(ERR, KNI, "Out of memory\n");
break;
}
+ phys[i] = va2pa(pkts[i]);
}
/* No pkt mbuf alocated */
if (i <= 0)
return;
}
/* No pkt mbuf alocated */
if (i <= 0)
return;
- ret = kni_fifo_put(kni->alloc_q,
(void **)pkt
s, i);
+ ret = kni_fifo_put(kni->alloc_q,
phy
s, i);
/* Check if any mbufs not put into alloc_q, and then free them */
if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) {
/* Check if any mbufs not put into alloc_q, and then free them */
if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) {