2 * Copyright (c) 2018 Ant Financial Services Group.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <rte_ethdev.h>
22 #include <netinet/in.h>
23 #include <netinet/ip6.h>
32 rte_rx_callback_fn fn;
36 ETHER_ARP_PTYPE = 0x1,
40 IPV6_EXT_PTYPE = 0x10,
46 static inline uint64_t
47 _mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
48 uint64_t ol3, uint64_t ol2)
50 return il2 | il3 << 7 | il4 << 16 | tso << 24 | ol3 << 40 | ol2 << 49;
54 fill_pkt_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t l3, uint32_t l4)
56 if (l2 + l3 + l4 > m->pkt_len)
58 m->tx_offload = _mbuf_tx_offload(l2, l3, l4, 0, 0, 0);
63 is_ipv4_frag(const struct ipv4_hdr *iph)
65 const uint16_t mask = rte_cpu_to_be_16(~IPV4_HDR_DF_FLAG);
67 return ((mask & iph->fragment_offset) != 0);
70 static inline uint32_t
71 get_tcp_header_size(struct rte_mbuf *m, uint32_t l2_len, uint32_t l3_len)
73 const struct tcp_hdr *tcp;
75 tcp = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *, l2_len + l3_len);
76 return (tcp->data_off >> 4) * 4;
80 adjust_ipv4_pktlen(struct rte_mbuf *m, uint32_t l2_len)
83 const struct ipv4_hdr *iph;
85 iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2_len);
86 plen = rte_be_to_cpu_16(iph->total_length) + l2_len;
87 if (plen < m->pkt_len) {
88 trim = m->pkt_len - plen;
89 rte_pktmbuf_trim(m, trim);
90 } else if (plen > m->pkt_len) {
97 adjust_ipv6_pktlen(struct rte_mbuf *m, uint32_t l2_len)
100 const struct ipv6_hdr *iph;
102 iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *, l2_len);
103 plen = rte_be_to_cpu_16(iph->payload_len) + sizeof(*iph) + l2_len;
104 if (plen < m->pkt_len) {
105 trim = m->pkt_len - plen;
106 rte_pktmbuf_trim(m, trim);
107 } else if (plen > m->pkt_len) {
113 static inline uint32_t
114 get_ipv4_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t proto, uint32_t frag)
116 const struct ipv4_hdr *iph;
119 dlen = rte_pktmbuf_data_len(m);
122 iph = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2);
123 len = (iph->version_ihl & IPV4_HDR_IHL_MASK) * IPV4_IHL_MULTIPLIER;
125 if (frag != 0 && is_ipv4_frag(iph)) {
126 m->packet_type &= ~RTE_PTYPE_L4_MASK;
127 m->packet_type |= RTE_PTYPE_L4_FRAG;
130 if (len > dlen || (proto <= IPPROTO_MAX && iph->next_proto_id != proto))
131 m->packet_type = RTE_PTYPE_UNKNOWN;
136 static inline uint32_t
137 get_ipv6x_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t *fproto)
139 const struct ipv6_hdr *ip6h;
140 const struct ip6_ext *ipx;
142 int32_t dlen, len, ofs;
144 ip6h = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr*, l2);
145 nproto = ip6h->proto;
146 len = sizeof(struct ipv6_hdr);
148 dlen = rte_pktmbuf_data_len(m);
152 ipx = rte_pktmbuf_mtod_offset(m, const struct ip6_ext *, ofs);
154 while (ofs > 0 && len < dlen) {
157 case IPPROTO_HOPOPTS:
158 case IPPROTO_ROUTING:
159 case IPPROTO_DSTOPTS:
160 ofs = (ipx->ip6e_len + 1) << 3;
163 ofs = (ipx->ip6e_len + 2) << 2;
165 case IPPROTO_FRAGMENT:
167 * tso_segsz is not used by RX, so use it as temporary
168 * buffer to store the fragment offset.
170 m->tso_segsz = l2 + len;
171 ofs = sizeof(struct ip6_frag);
172 m->packet_type &= ~RTE_PTYPE_L4_MASK;
173 m->packet_type |= RTE_PTYPE_L4_FRAG;
187 nproto = ipx->ip6e_nxt;
189 ipx += ofs / sizeof(*ipx);
193 /* unrecognized or invalid packet. */
194 if (*fproto == 0 || len > dlen)
195 m->packet_type = RTE_PTYPE_UNKNOWN;
200 static inline uint32_t
201 get_ipv6_hdr_len(struct rte_mbuf *m, uint32_t l2, uint32_t fproto)
203 const struct ipv6_hdr *iph;
205 iph = rte_pktmbuf_mtod_offset(m, const struct ipv6_hdr *,
206 sizeof(struct ether_hdr));
208 if (iph->proto == fproto)
209 return sizeof(struct ipv6_hdr);
211 return get_ipv6x_hdr_len(m, l2, &fproto);
214 static inline struct rte_mbuf*
215 process_ipv4_frag(struct rte_mbuf *m, struct glue_ctx *ctx, uint32_t l2_len, uint32_t l3_len)
217 struct ipv4_hdr* iph;
221 /* fixme: ip checksum should be checked here.
222 * After reassemble, the ip checksum would be invalid.
224 m = rte_ipv4_frag_reassemble_packet(ctx->frag_tbl,
225 &ctx->frag_dr, m, rte_rdtsc(),
226 rte_pktmbuf_mtod_offset(m, struct ipv4_hdr*, m->l2_len));
227 rte_ip_frag_free_death_row(&ctx->frag_dr, 3);
230 iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr*, m->l2_len);
231 switch (iph->next_proto_id) {
233 m->packet_type &= ~RTE_PTYPE_L4_MASK;
234 m->packet_type |= RTE_PTYPE_L4_TCP;
237 m->packet_type &= ~RTE_PTYPE_L4_MASK;
238 m->packet_type |= RTE_PTYPE_L4_UDP;
244 static inline struct rte_mbuf*
245 process_ipv6_frag(struct rte_mbuf *m, struct glue_ctx *ctx, uint32_t l2_len, uint32_t l3_len)
247 struct ipv6_hdr* ip6h;
251 m = rte_ipv6_frag_reassemble_packet(ctx->frag_tbl,
252 &ctx->frag_dr, m, rte_rdtsc(),
253 rte_pktmbuf_mtod_offset(m, struct ipv6_hdr*, l2_len),
254 rte_pktmbuf_mtod_offset(m, struct ipv6_extension_fragment*, m->tso_segsz));
255 rte_ip_frag_free_death_row(&ctx->frag_dr, 3);
258 ip6h = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr*, m->l2_len);
259 switch (ip6h->proto) {
261 m->packet_type &= ~RTE_PTYPE_L4_MASK;
262 m->packet_type |= RTE_PTYPE_L4_TCP;
265 m->packet_type &= ~RTE_PTYPE_L4_MASK;
266 m->packet_type |= RTE_PTYPE_L4_UDP;
272 static inline struct rte_mbuf *
273 fill_ptypes_and_hdr_len(struct glue_ctx *ctx, struct rte_mbuf *m)
275 uint32_t dlen, l2_len, l3_len, l4_len, proto;
276 const struct ether_hdr *eth;
281 dlen = rte_pktmbuf_data_len(m);
284 l2_len = sizeof(*eth);
286 eth = rte_pktmbuf_mtod(m, const struct ether_hdr *);
287 etp = eth->ether_type;
288 while (etp == rte_be_to_cpu_16(ETHER_TYPE_VLAN)) {
289 etp = rte_pktmbuf_mtod_offset(m, struct vlan_hdr*, l2_len)->eth_proto;
290 l2_len += sizeof(struct vlan_hdr);
293 if (etp == rte_be_to_cpu_16(ETHER_TYPE_ARP))
294 return arp_recv(ctx, m, l2_len);
296 if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv4)) {
297 const struct ipv4_hdr *hdr;
300 hdr = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr *, l2_len);
301 error = adjust_ipv4_pktlen(m, l2_len);
306 l3_len = get_ipv4_hdr_len(m, l2_len, IPPROTO_MAX + 1, 1);
308 if ((m->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_FRAG) {
309 m = process_ipv4_frag(m, ctx, l2_len, l3_len);
312 hdr = rte_pktmbuf_mtod_offset(m, const struct ipv4_hdr*, m->l2_len);
313 l3_len = get_ipv4_hdr_len(m, m->l2_len, IPPROTO_MAX + 1, 0);
317 switch (hdr->next_proto_id) {
319 return icmp_recv(ctx, m, l2_len, l3_len);
321 ptypes = RTE_PTYPE_L4_TCP |
322 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
324 l4_len = get_tcp_header_size(m, l2_len, l3_len);
327 ptypes = RTE_PTYPE_L4_UDP |
328 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
330 l4_len = sizeof(struct udp_hdr);
333 GLUE_LOG(ERR, "drop ipv4 pkt of unknow L4: (%d)",
339 } else if (etp == rte_be_to_cpu_16(ETHER_TYPE_IPv6) &&
340 dlen >= l2_len + sizeof(struct ipv6_hdr) + sizeof(struct udp_hdr)) {
342 error = adjust_ipv6_pktlen(m, l2_len);
348 l3_len = get_ipv6x_hdr_len(m, l2_len, &proto);
350 if ((m->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_FRAG) {
351 m = process_ipv6_frag(m, ctx, l2_len, l3_len);
354 l3_len = get_ipv6x_hdr_len(m, m->l2_len, &proto);
360 ptypes = RTE_PTYPE_L4_TCP |
361 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
363 l4_len = get_tcp_header_size(m, l2_len, l3_len);
366 ptypes = RTE_PTYPE_L4_UDP |
367 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
369 l4_len = sizeof(struct udp_hdr);
372 return icmp6_recv(ctx, m, l2_len, l3_len);
374 GLUE_DEBUG("drop ipv6 pkt of unknown L4: (%x)", proto);
379 GLUE_DEBUG("Drop unknown L3 packet: %x", etp);
384 m->packet_type = ptypes;
385 error = fill_pkt_hdr_len(m, l2_len, l3_len, l4_len);
394 /* exclude NULLs from the final list of packets. */
395 static inline uint32_t
396 compress_pkt_list(struct rte_mbuf *pkt[], uint32_t nb_pkt, uint32_t nb_zero)
400 for (j = nb_pkt; nb_zero != 0 && j-- != 0; ) {
403 if (pkt[j] == NULL) {
405 /* find how big is it. */
406 for (i = j; i-- != 0 && pkt[i] == NULL; )
409 for (k = j + 1, l = i + 1; k != nb_pkt; k++, l++)
421 static inline struct rte_mbuf *
422 common_fill_hdr_len(struct rte_mbuf *m, uint32_t tp, struct glue_ctx *ctx)
424 uint32_t l4_len, l3_len, l2_len = sizeof(struct ether_hdr);
428 /* possibly fragmented packets. */
429 case (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER):
430 case (RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L2_ETHER):
431 l3_len = get_ipv4_hdr_len(m, l2_len, IPPROTO_MAX + 1, 1);
432 if ((m->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_FRAG) {
433 m = process_ipv4_frag(m, ctx, l2_len, l3_len);
436 tp = m->packet_type & (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK |
440 case (RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER):
441 case (RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L2_ETHER):
442 l3_len = get_ipv6_hdr_len(m, l2_len, IPPROTO_MAX + 1);
443 if ((m->packet_type & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_FRAG) {
444 m = process_ipv6_frag(m, ctx, l2_len, l3_len);
447 tp = m->packet_type & (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK |
454 /* non fragmented tcp packets. */
455 case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4 |
457 l3_len = sizeof(struct ipv4_hdr);
458 l4_len = get_tcp_header_size(m, l2_len, l3_len);
459 error = adjust_ipv4_pktlen(m, l2_len);
461 case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV6 |
463 l3_len = sizeof(struct ipv6_hdr);
464 l4_len = get_tcp_header_size(m, l2_len, l3_len);
465 error = adjust_ipv6_pktlen(m, l2_len);
467 case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV4_EXT |
469 l3_len = get_ipv4_hdr_len(m, l2_len,
471 l4_len = get_tcp_header_size(m, l2_len, l3_len);
472 error = adjust_ipv4_pktlen(m, l2_len);
474 case (RTE_PTYPE_L4_TCP | RTE_PTYPE_L3_IPV6_EXT |
476 l3_len = get_ipv6_hdr_len(m, l2_len, IPPROTO_TCP);
477 l4_len = get_tcp_header_size(m, l2_len, l3_len);
478 error = adjust_ipv6_pktlen(m, l2_len);
481 /* non fragmented udp packets. */
482 case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV4 |
484 l3_len = sizeof(struct ipv4_hdr);
485 l4_len = sizeof(struct udp_hdr);
486 error = adjust_ipv4_pktlen(m, l2_len);
488 case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV6 |
490 l3_len = sizeof(struct ipv6_hdr);
491 l4_len = sizeof(struct udp_hdr);
492 error = adjust_ipv6_pktlen(m, l2_len);
494 case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV4_EXT |
496 l3_len = get_ipv4_hdr_len(m, l2_len,
498 l4_len = sizeof(struct udp_hdr);
499 error = adjust_ipv4_pktlen(m, l2_len);
501 case (RTE_PTYPE_L4_UDP | RTE_PTYPE_L3_IPV6_EXT |
503 l3_len = get_ipv6_hdr_len(m, l2_len, IPPROTO_UDP);
504 l4_len = sizeof(struct udp_hdr);
505 error = adjust_ipv6_pktlen(m, l2_len);
508 GLUE_LOG(ERR, "drop unknown pkt");
517 error = fill_pkt_hdr_len(m, l2_len, l3_len, l4_len);
527 * HW can recognize L2-arp/L3 with/without extensions/L4 (i40e)
530 type0_rx_callback(uint16_t port,
532 struct rte_mbuf *pkt[],
537 uint32_t j, tp, l2_len, l3_len;
538 struct glue_ctx *ctx;
539 uint16_t nb_zero = 0;
543 RTE_SET_USED(max_pkts);
547 for (j = 0; j != nb_pkts; j++) {
548 tp = pkt[j]->packet_type & (RTE_PTYPE_L4_MASK |
549 RTE_PTYPE_L3_MASK | RTE_PTYPE_L2_MASK);
552 case (RTE_PTYPE_L2_ETHER_ARP):
553 arp_recv(ctx, pkt[j], sizeof(struct ether_hdr));
557 case (RTE_PTYPE_L4_ICMP | RTE_PTYPE_L3_IPV4 |
559 case (RTE_PTYPE_L4_ICMP | RTE_PTYPE_L3_IPV4_EXT |
561 l2_len = sizeof(struct ether_hdr);
562 l3_len = get_ipv4_hdr_len(pkt[j], l2_len, IPPROTO_ICMP, 0);
563 icmp_recv(ctx, pkt[j], l2_len, l3_len);
567 case (RTE_PTYPE_L4_ICMP | RTE_PTYPE_L3_IPV6 |
569 case (RTE_PTYPE_L4_ICMP | RTE_PTYPE_L3_IPV6_EXT |
571 l2_len = sizeof(struct ether_hdr);
572 l3_len = get_ipv6_hdr_len(pkt[j], l2_len, IPPROTO_ICMPV6);
573 icmp6_recv(ctx, pkt[j], l2_len, l3_len);
578 if (common_fill_hdr_len(pkt[j], tp, ctx) == NULL) {
589 return compress_pkt_list(pkt, nb_pkts, nb_zero);
593 * HW can recognize L2/L3/L4 and fragments; but cannot recognize ARP
597 type1_rx_callback(uint16_t port,
599 struct rte_mbuf *pkt[],
604 uint32_t j, tp, l2_len, l3_len;
605 struct glue_ctx *ctx;
606 uint16_t nb_zero = 0;
607 const struct ether_hdr *eth;
608 const struct ipv4_hdr *ip4;
609 const struct ipv6_hdr *ip6;
614 RTE_SET_USED(max_pkts);
618 for (j = 0; j != nb_pkts; j++) {
619 tp = pkt[j]->packet_type & (RTE_PTYPE_L4_MASK |
620 RTE_PTYPE_L3_MASK | RTE_PTYPE_L2_MASK);
623 case RTE_PTYPE_L2_ETHER:
624 eth = rte_pktmbuf_mtod(pkt[j], const struct ether_hdr *);
625 etp = eth->ether_type;
626 if (etp == rte_be_to_cpu_16(ETHER_TYPE_ARP))
627 arp_recv(ctx, pkt[j], sizeof(*eth));
631 case (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER):
632 case (RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L2_ETHER):
633 ip4 = rte_pktmbuf_mtod_offset(pkt[j],
634 const struct ipv4_hdr *,
636 if (ip4->next_proto_id == IPPROTO_ICMP) {
637 l2_len = sizeof(struct ether_hdr);
638 l3_len = get_ipv4_hdr_len(pkt[j], l2_len, IPPROTO_ICMP, 0);
639 icmp_recv(ctx, pkt[j], l2_len, l3_len);
641 rte_pktmbuf_free(pkt[j]);
646 case (RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER):
647 case (RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L2_ETHER):
648 ip6 = rte_pktmbuf_mtod_offset(pkt[j],
649 const struct ipv6_hdr *,
651 if (ip6->proto == IPPROTO_ICMPV6) {
652 l2_len = sizeof(struct ether_hdr);
653 l3_len = get_ipv6_hdr_len(pkt[j], l2_len, IPPROTO_ICMPV6);
654 icmp6_recv(ctx, pkt[j], l2_len, l3_len);
656 rte_pktmbuf_free(pkt[j]);
662 if (common_fill_hdr_len(pkt[j], tp, ctx) == NULL) {
673 return compress_pkt_list(pkt, nb_pkts, nb_zero);
677 * generic, assumes HW doesn't recognize any packet type.
680 typen_rx_callback(uint16_t port,
682 struct rte_mbuf *pkt[],
689 struct glue_ctx *ctx;
693 RTE_SET_USED(max_pkts);
698 for (j = 0; j != nb_pkts; j++) {
699 /* fix me: now we avoid checking ip checksum */
700 pkt[j]->ol_flags &= (~PKT_RX_IP_CKSUM_BAD);
701 pkt[j]->packet_type = 0;
702 pkt[j] = fill_ptypes_and_hdr_len(ctx, pkt[j]);
703 nb_zero += (pkt[j] == NULL);
709 return compress_pkt_list(pkt, nb_pkts, nb_zero);
713 get_ptypes(uint16_t port_id)
717 const uint32_t pmask =
718 RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK;
721 rc = rte_eth_dev_get_supported_ptypes(port_id, pmask, NULL, 0);
724 "%s(port=%u) failed to get supported ptypes;\n",
730 rc = rte_eth_dev_get_supported_ptypes(port_id, pmask, ptype, rc);
732 for (i = 0; i != rc; i++) {
734 case RTE_PTYPE_L2_ETHER_ARP:
735 smask |= ETHER_ARP_PTYPE;
737 case RTE_PTYPE_L3_IPV4:
738 case RTE_PTYPE_L3_IPV4_EXT_UNKNOWN:
741 case RTE_PTYPE_L3_IPV4_EXT:
742 smask |= IPV4_EXT_PTYPE;
744 case RTE_PTYPE_L3_IPV6:
745 case RTE_PTYPE_L3_IPV6_EXT_UNKNOWN:
748 case RTE_PTYPE_L3_IPV6_EXT:
749 smask |= IPV6_EXT_PTYPE;
751 case RTE_PTYPE_L4_TCP:
754 case RTE_PTYPE_L4_UDP:
757 case RTE_PTYPE_L4_ICMP:
766 /* In rx callbacks, we need to check and make sure below things are done,
767 * either by hw or by sw:
768 * 1. filter out arp packets, and handle arp packets properly
769 * - for arp request packet, reply arp if it's requesting myself.
770 * 2. fill l2, l3, l4 header length
772 * 3. GSO/GRO setup (TODO)
776 setup_rx_cb(uint16_t port_id, uint16_t qid)
779 uint32_t i, n, smask;
781 struct glue_ctx *ctx;
782 const struct ptype2cb *ptype2cb;
784 static const struct ptype2cb tcp_arp_ptype2cb[] = {
786 .mask = ETHER_ARP_PTYPE |
788 IPV4_PTYPE | IPV4_EXT_PTYPE |
789 IPV6_PTYPE | IPV6_EXT_PTYPE |
790 TCP_PTYPE | UDP_PTYPE,
791 .name = "HW l2-arp/l3x/l4-tcp ptype",
792 .fn = type0_rx_callback,
794 { /* ixgbe does not support ARP ptype */
795 .mask = IPV4_PTYPE | IPV4_EXT_PTYPE |
796 IPV6_PTYPE | IPV6_EXT_PTYPE |
797 TCP_PTYPE | UDP_PTYPE,
798 .name = "HW l3x/l4-tcp ptype",
799 .fn = type1_rx_callback,
803 .name = "HW does not support any ptype",
804 .fn = typen_rx_callback,
808 ctx = glue_ctx_lookup(port_id, qid);
810 GLUE_LOG(ERR, "no ctx fount by port(%d) and queue (%d)",
815 smask = get_ptypes(port_id);
817 ptype2cb = tcp_arp_ptype2cb;
818 n = RTE_DIM(tcp_arp_ptype2cb);
820 for (i = 0; i != n; i++) {
821 if ((smask & ptype2cb[i].mask) == ptype2cb[i].mask) {
822 cb = rte_eth_add_rx_callback(port_id, qid,
823 ptype2cb[i].fn, ctx);
825 GLUE_LOG(ERR, "%s(port=%u), setup RX callback \"%s\";",
826 __func__, port_id, ptype2cb[i].name);
827 return ((cb == NULL) ? rc : 0);
831 GLUE_LOG(ERR, "%s(port=%u) failed to find an appropriate callback",