1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Chelsio Communications.
9 #include "cxgbe_filter.h"
14 * Initialize Hash Filters
16 int init_hash_filter(struct adapter *adap)
18 unsigned int n_user_filters;
19 unsigned int user_filter_perc;
21 u32 params[7], val[7];
23 #define FW_PARAM_DEV(param) \
24 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
25 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
27 #define FW_PARAM_PFVF(param) \
28 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
29 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
30 V_FW_PARAMS_PARAM_Y(0) | \
31 V_FW_PARAMS_PARAM_Z(0))
33 params[0] = FW_PARAM_DEV(NTID);
34 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
38 adap->tids.ntids = val[0];
39 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
41 user_filter_perc = 100;
42 n_user_filters = mult_frac(adap->tids.nftids,
46 adap->tids.nftids = n_user_filters;
47 adap->params.hash_filter = 1;
52 * Validate if the requested filter specification can be set by checking
53 * if the requested features have been enabled
55 int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs)
60 * Check for unconfigured fields being used.
62 fconf = adapter->params.tp.vlan_pri_map;
65 (fs->val._field || fs->mask._field)
66 #define U(_mask, _field) \
67 (!(fconf & (_mask)) && S(_field))
69 if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) ||
70 U(F_PROTOCOL, proto) || U(F_MACMATCH, macidx))
77 * If the user is requesting that the filter action loop
78 * matching packets back out one of our ports, make sure that
79 * the egress port is in range.
81 if (fs->action == FILTER_SWITCH &&
82 fs->eport >= adapter->params.nports)
86 * Don't allow various trivially obvious bogus out-of-range
89 if (fs->val.iport >= adapter->params.nports)
92 if (!fs->cap && fs->nat_mode && !adapter->params.filter2_wr_support)
95 if (!fs->cap && fs->swapmac && !adapter->params.filter2_wr_support)
102 * Get the queue to which the traffic must be steered to.
104 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
105 struct ch_filter_specification *fs)
107 struct port_info *pi = ethdev2pinfo(dev);
108 struct adapter *adapter = pi->adapter;
112 * If the user has requested steering matching Ingress Packets
113 * to a specific Queue Set, we need to make sure it's in range
114 * for the port and map that into the Absolute Queue ID of the
115 * Queue Set's Response Queue.
121 * If the iq id is greater than the number of qsets,
122 * then assume it is an absolute qid.
124 if (fs->iq < pi->n_rx_qsets)
125 iq = adapter->sge.ethrxq[pi->first_qset +
134 /* Return an error number if the indicated filter isn't writable ... */
135 int writable_filter(struct filter_entry *f)
146 * Send CPL_SET_TCB_FIELD message
148 static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
149 u16 word, u64 mask, u64 val, int no_reply)
151 struct rte_mbuf *mbuf;
152 struct cpl_set_tcb_field *req;
153 struct sge_ctrl_txq *ctrlq;
155 ctrlq = &adapter->sge.ctrlq[0];
156 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
159 mbuf->data_len = sizeof(*req);
160 mbuf->pkt_len = mbuf->data_len;
162 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
163 memset(req, 0, sizeof(*req));
164 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
165 req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
166 V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
167 V_NO_REPLY(no_reply));
168 req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
169 req->mask = cpu_to_be64(mask);
170 req->val = cpu_to_be64(val);
172 t4_mgmt_tx(ctrlq, mbuf);
176 * Set one of the t_flags bits in the TCB.
178 static void set_tcb_tflag(struct adapter *adap, unsigned int ftid,
179 unsigned int bit_pos, unsigned int val, int no_reply)
181 set_tcb_field(adap, ftid, W_TCB_T_FLAGS, 1ULL << bit_pos,
182 (unsigned long long)val << bit_pos, no_reply);
186 * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
188 static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
189 struct cpl_set_tcb_field *req,
191 u64 mask, u64 val, u8 cookie,
194 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
195 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
197 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
198 V_ULP_TXPKT_DEST(0));
199 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
200 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
201 sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
202 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
203 req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
205 req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
206 req->mask = cpu_to_be64(mask);
207 req->val = cpu_to_be64(val);
208 sc = (struct ulptx_idata *)(req + 1);
209 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
210 sc->len = cpu_to_be32(0);
214 * Check if entry already filled.
216 bool is_filter_set(struct tid_info *t, int fidx, int family)
221 /* IPv6 requires four slots and IPv4 requires only 1 slot.
222 * Ensure, there's enough slots available.
224 max = family == FILTER_TYPE_IPV6 ? fidx + 3 : fidx;
226 t4_os_lock(&t->ftid_lock);
227 for (i = fidx; i <= max; i++) {
228 if (rte_bitmap_get(t->ftid_bmap, i)) {
233 t4_os_unlock(&t->ftid_lock);
238 * Allocate a available free entry
240 int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family)
242 struct tid_info *t = &adap->tids;
244 int size = t->nftids;
246 t4_os_lock(&t->ftid_lock);
247 if (family == FILTER_TYPE_IPV6)
248 pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size, 4);
250 pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
251 t4_os_unlock(&t->ftid_lock);
253 return pos < size ? pos : -1;
257 * Construct hash filter ntuple.
259 static u64 hash_filter_ntuple(const struct filter_entry *f)
261 struct adapter *adap = ethdev2adap(f->dev);
262 struct tp_params *tp = &adap->params.tp;
264 u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
266 if (tp->port_shift >= 0)
267 ntuple |= (u64)f->fs.mask.iport << tp->port_shift;
269 if (tp->protocol_shift >= 0) {
270 if (!f->fs.val.proto)
271 ntuple |= (u64)tcp_proto << tp->protocol_shift;
273 ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
276 if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
277 ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
278 if (tp->macmatch_shift >= 0 && f->fs.mask.macidx)
279 ntuple |= (u64)(f->fs.val.macidx) << tp->macmatch_shift;
281 if (ntuple != tp->hash_filter_mask)
288 * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
290 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
293 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
294 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
296 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
297 V_ULP_TXPKT_DEST(0));
298 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
299 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
300 sc->len = cpu_to_be32(sizeof(*abort_req) -
301 sizeof(struct work_request_hdr));
302 OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
303 abort_req->rsvd0 = cpu_to_be32(0);
304 abort_req->rsvd1 = 0;
305 abort_req->cmd = CPL_ABORT_NO_RST;
306 sc = (struct ulptx_idata *)(abort_req + 1);
307 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
308 sc->len = cpu_to_be32(0);
312 * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
314 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
317 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
318 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
320 txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
321 V_ULP_TXPKT_DEST(0));
322 txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
323 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
324 sc->len = cpu_to_be32(sizeof(*abort_rpl) -
325 sizeof(struct work_request_hdr));
326 OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
327 abort_rpl->rsvd0 = cpu_to_be32(0);
328 abort_rpl->rsvd1 = 0;
329 abort_rpl->cmd = CPL_ABORT_NO_RST;
330 sc = (struct ulptx_idata *)(abort_rpl + 1);
331 sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
332 sc->len = cpu_to_be32(0);
336 * Delete the specified hash filter.
338 static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
339 unsigned int filter_id,
340 struct filter_ctx *ctx)
342 struct adapter *adapter = ethdev2adap(dev);
343 struct tid_info *t = &adapter->tids;
344 struct filter_entry *f;
345 struct sge_ctrl_txq *ctrlq;
346 unsigned int port_id = ethdev2pinfo(dev)->port_id;
349 if (filter_id > adapter->tids.ntids)
352 f = lookup_tid(t, filter_id);
354 dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
355 __func__, filter_id);
359 ret = writable_filter(f);
365 struct rte_mbuf *mbuf;
366 struct work_request_hdr *wr;
367 struct ulptx_idata *aligner;
368 struct cpl_set_tcb_field *req;
369 struct cpl_abort_req *abort_req;
370 struct cpl_abort_rpl *abort_rpl;
375 wrlen = cxgbe_roundup(sizeof(*wr) +
376 (sizeof(*req) + sizeof(*aligner)) +
377 sizeof(*abort_req) + sizeof(*abort_rpl),
380 ctrlq = &adapter->sge.ctrlq[port_id];
381 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
383 dev_err(adapter, "%s: could not allocate skb ..\n",
388 mbuf->data_len = wrlen;
389 mbuf->pkt_len = mbuf->data_len;
391 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
392 INIT_ULPTX_WR(req, wrlen, 0, 0);
393 wr = (struct work_request_hdr *)req;
395 req = (struct cpl_set_tcb_field *)wr;
396 mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
397 V_TCB_RSS_INFO(M_TCB_RSS_INFO),
398 V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
400 aligner = (struct ulptx_idata *)(req + 1);
401 abort_req = (struct cpl_abort_req *)(aligner + 1);
402 mk_abort_req_ulp(abort_req, f->tid);
403 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
404 mk_abort_rpl_ulp(abort_rpl, f->tid);
405 t4_mgmt_tx(ctrlq, mbuf);
414 * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
416 static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
417 unsigned int qid_filterid, struct adapter *adap)
419 struct cpl_t6_act_open_req6 *req = NULL;
420 u64 local_lo, local_hi, peer_lo, peer_hi;
421 u32 *lip = (u32 *)f->fs.val.lip;
422 u32 *fip = (u32 *)f->fs.val.fip;
424 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
426 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
431 dev_err(adap, "%s: unsupported chip type!\n", __func__);
435 local_hi = ((u64)lip[1]) << 32 | lip[0];
436 local_lo = ((u64)lip[3]) << 32 | lip[2];
437 peer_hi = ((u64)fip[1]) << 32 | fip[0];
438 peer_lo = ((u64)fip[3]) << 32 | fip[2];
440 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
442 req->local_port = cpu_to_be16(f->fs.val.lport);
443 req->peer_port = cpu_to_be16(f->fs.val.fport);
444 req->local_ip_hi = local_hi;
445 req->local_ip_lo = local_lo;
446 req->peer_ip_hi = peer_hi;
447 req->peer_ip_lo = peer_lo;
448 req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
449 f->fs.newvlan == VLAN_REWRITE) |
450 V_DELACK(f->fs.hitcnts) |
451 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
452 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
454 V_TX_CHAN(f->fs.eport) |
455 V_ULP_MODE(ULP_MODE_NONE) |
456 F_TCAM_BYPASS | F_NON_OFFLOAD);
457 req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
458 req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
459 V_RSS_QUEUE(f->fs.iq) |
462 V_SACK_EN(f->fs.swapmac) |
463 V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
464 (f->fs.dirsteer << 1)) |
465 V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
469 * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
471 static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
472 unsigned int qid_filterid, struct adapter *adap)
474 struct cpl_t6_act_open_req *req = NULL;
476 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
478 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
483 dev_err(adap, "%s: unsupported chip type!\n", __func__);
487 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
489 req->local_port = cpu_to_be16(f->fs.val.lport);
490 req->peer_port = cpu_to_be16(f->fs.val.fport);
491 req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
492 f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
493 req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
494 f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
495 req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
496 f->fs.newvlan == VLAN_REWRITE) |
497 V_DELACK(f->fs.hitcnts) |
498 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
499 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
501 V_TX_CHAN(f->fs.eport) |
502 V_ULP_MODE(ULP_MODE_NONE) |
503 F_TCAM_BYPASS | F_NON_OFFLOAD);
504 req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
505 req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
506 V_RSS_QUEUE(f->fs.iq) |
509 V_SACK_EN(f->fs.swapmac) |
510 V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
511 (f->fs.dirsteer << 1)) |
512 V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
516 * Set the specified hash filter.
518 static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
519 struct ch_filter_specification *fs,
520 struct filter_ctx *ctx)
522 struct port_info *pi = ethdev2pinfo(dev);
523 struct adapter *adapter = pi->adapter;
524 struct tid_info *t = &adapter->tids;
525 struct filter_entry *f;
526 struct rte_mbuf *mbuf;
527 struct sge_ctrl_txq *ctrlq;
532 ret = validate_filter(adapter, fs);
536 iq = get_filter_steerq(dev, fs);
538 ctrlq = &adapter->sge.ctrlq[pi->port_id];
540 f = t4_os_alloc(sizeof(*f));
550 * If the new filter requires loopback Destination MAC and/or VLAN
551 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
554 if (f->fs.newvlan == VLAN_INSERT ||
555 f->fs.newvlan == VLAN_REWRITE) {
556 /* allocate L2T entry for new filter */
557 f->l2t = cxgbe_l2t_alloc_switching(dev, f->fs.vlan,
558 f->fs.eport, f->fs.dmac);
565 atid = cxgbe_alloc_atid(t, f);
570 /* IPv6 hash filter */
571 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
575 size = sizeof(struct cpl_t6_act_open_req6);
576 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
582 mbuf->data_len = size;
583 mbuf->pkt_len = mbuf->data_len;
585 mk_act_open_req6(f, mbuf,
586 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
589 /* IPv4 hash filter */
590 size = sizeof(struct cpl_t6_act_open_req);
591 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
597 mbuf->data_len = size;
598 mbuf->pkt_len = mbuf->data_len;
600 mk_act_open_req(f, mbuf,
601 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
606 t4_mgmt_tx(ctrlq, mbuf);
610 cxgbe_clip_release(f->dev, f->clipt);
612 cxgbe_free_atid(t, atid);
620 * Clear a filter and release any of its resources that we own. This also
621 * clears the filter's "pending" status.
623 void clear_filter(struct filter_entry *f)
626 cxgbe_clip_release(f->dev, f->clipt);
629 * The zeroing of the filter rule below clears the filter valid,
630 * pending, locked flags etc. so it's all we need for
633 memset(f, 0, sizeof(*f));
637 * t4_mk_filtdelwr - create a delete filter WR
638 * @adap: adapter context
639 * @ftid: the filter ID
640 * @wr: the filter work request to populate
641 * @qid: ingress queue to receive the delete notification
643 * Creates a filter work request to delete the supplied filter. If @qid is
644 * negative the delete notification is suppressed.
646 static void t4_mk_filtdelwr(struct adapter *adap, unsigned int ftid,
647 struct fw_filter2_wr *wr, int qid)
649 memset(wr, 0, sizeof(*wr));
650 if (adap->params.filter2_wr_support)
651 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
653 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
654 wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
655 wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
656 V_FW_FILTER_WR_NOREPLY(qid < 0));
657 wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
659 wr->rx_chan_rx_rpl_iq =
660 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
664 * Create FW work request to delete the filter at a specified index
666 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
668 struct adapter *adapter = ethdev2adap(dev);
669 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
670 struct rte_mbuf *mbuf;
671 struct fw_filter2_wr *fwr;
672 struct sge_ctrl_txq *ctrlq;
673 unsigned int port_id = ethdev2pinfo(dev)->port_id;
675 ctrlq = &adapter->sge.ctrlq[port_id];
676 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
680 mbuf->data_len = sizeof(*fwr);
681 mbuf->pkt_len = mbuf->data_len;
683 fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
684 t4_mk_filtdelwr(adapter, f->tid, fwr, adapter->sge.fw_evtq.abs_id);
687 * Mark the filter as "pending" and ship off the Filter Work Request.
688 * When we get the Work Request Reply we'll clear the pending status.
691 t4_mgmt_tx(ctrlq, mbuf);
695 int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
697 struct adapter *adapter = ethdev2adap(dev);
698 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
699 struct rte_mbuf *mbuf;
700 struct fw_filter2_wr *fwr;
701 struct sge_ctrl_txq *ctrlq;
702 unsigned int port_id = ethdev2pinfo(dev)->port_id;
706 * If the new filter requires loopback Destination MAC and/or VLAN
707 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
711 /* allocate L2T entry for new filter */
712 f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan,
713 f->fs.eport, f->fs.dmac);
718 ctrlq = &adapter->sge.ctrlq[port_id];
719 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
725 mbuf->data_len = sizeof(*fwr);
726 mbuf->pkt_len = mbuf->data_len;
728 fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
729 memset(fwr, 0, sizeof(*fwr));
732 * Construct the work request to set the filter.
734 if (adapter->params.filter2_wr_support)
735 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
737 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
738 fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
740 cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
741 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
742 V_FW_FILTER_WR_NOREPLY(0) |
743 V_FW_FILTER_WR_IQ(f->fs.iq));
744 fwr->del_filter_to_l2tix =
745 cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
746 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
747 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
748 V_FW_FILTER_WR_INSVLAN
749 (f->fs.newvlan == VLAN_INSERT ||
750 f->fs.newvlan == VLAN_REWRITE) |
751 V_FW_FILTER_WR_RMVLAN
752 (f->fs.newvlan == VLAN_REMOVE ||
753 f->fs.newvlan == VLAN_REWRITE) |
754 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
755 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
756 V_FW_FILTER_WR_PRIO(f->fs.prio) |
757 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
758 fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
759 fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
761 fwr->rx_chan_rx_rpl_iq =
762 cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
763 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
765 fwr->maci_to_matchtypem =
766 cpu_to_be32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
767 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
768 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
769 V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
770 fwr->ptcl = f->fs.val.proto;
771 fwr->ptclm = f->fs.mask.proto;
772 rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
773 rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
774 rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
775 rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
776 fwr->lp = cpu_to_be16(f->fs.val.lport);
777 fwr->lpm = cpu_to_be16(f->fs.mask.lport);
778 fwr->fp = cpu_to_be16(f->fs.val.fport);
779 fwr->fpm = cpu_to_be16(f->fs.mask.fport);
781 if (adapter->params.filter2_wr_support) {
782 fwr->filter_type_swapmac =
783 V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
784 fwr->natmode_to_ulp_type =
785 V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
788 V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
789 memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
790 memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
791 fwr->newlport = cpu_to_be16(f->fs.nat_lport);
792 fwr->newfport = cpu_to_be16(f->fs.nat_fport);
796 * Mark the filter as "pending" and ship off the Filter Work Request.
797 * When we get the Work Request Reply we'll clear the pending status.
800 t4_mgmt_tx(ctrlq, mbuf);
808 * Set the corresponding entry in the bitmap. 4 slots are
809 * marked for IPv6, whereas only 1 slot is marked for IPv4.
811 static int cxgbe_set_ftid(struct tid_info *t, int fidx, int family)
813 t4_os_lock(&t->ftid_lock);
814 if (rte_bitmap_get(t->ftid_bmap, fidx)) {
815 t4_os_unlock(&t->ftid_lock);
819 if (family == FILTER_TYPE_IPV4) {
820 rte_bitmap_set(t->ftid_bmap, fidx);
822 rte_bitmap_set(t->ftid_bmap, fidx);
823 rte_bitmap_set(t->ftid_bmap, fidx + 1);
824 rte_bitmap_set(t->ftid_bmap, fidx + 2);
825 rte_bitmap_set(t->ftid_bmap, fidx + 3);
827 t4_os_unlock(&t->ftid_lock);
832 * Clear the corresponding entry in the bitmap. 4 slots are
833 * cleared for IPv6, whereas only 1 slot is cleared for IPv4.
835 static void cxgbe_clear_ftid(struct tid_info *t, int fidx, int family)
837 t4_os_lock(&t->ftid_lock);
838 if (family == FILTER_TYPE_IPV4) {
839 rte_bitmap_clear(t->ftid_bmap, fidx);
841 rte_bitmap_clear(t->ftid_bmap, fidx);
842 rte_bitmap_clear(t->ftid_bmap, fidx + 1);
843 rte_bitmap_clear(t->ftid_bmap, fidx + 2);
844 rte_bitmap_clear(t->ftid_bmap, fidx + 3);
846 t4_os_unlock(&t->ftid_lock);
850 * Check a delete filter request for validity and send it to the hardware.
851 * Return 0 on success, an error number otherwise. We attach any provided
852 * filter operation context to the internal filter specification in order to
853 * facilitate signaling completion of the operation.
855 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
856 struct ch_filter_specification *fs,
857 struct filter_ctx *ctx)
859 struct port_info *pi = (struct port_info *)(dev->data->dev_private);
860 struct adapter *adapter = pi->adapter;
861 struct filter_entry *f;
862 unsigned int chip_ver;
865 if (is_hashfilter(adapter) && fs->cap)
866 return cxgbe_del_hash_filter(dev, filter_id, ctx);
868 if (filter_id >= adapter->tids.nftids)
871 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
873 ret = is_filter_set(&adapter->tids, filter_id, fs->type);
875 dev_warn(adap, "%s: could not find filter entry: %u\n",
876 __func__, filter_id);
881 * Ensure filter id is aligned on the 2 slot boundary for T6,
882 * and 4 slot boundary for cards below T6.
885 if (chip_ver < CHELSIO_T6)
891 f = &adapter->tids.ftid_tab[filter_id];
892 ret = writable_filter(f);
898 cxgbe_clear_ftid(&adapter->tids,
899 f->tid - adapter->tids.ftid_base,
900 f->fs.type ? FILTER_TYPE_IPV6 :
902 return del_filter_wr(dev, filter_id);
906 * If the caller has passed in a Completion Context then we need to
907 * mark it as a successful completion so they don't stall waiting
912 t4_complete(&ctx->completion);
919 * Check a Chelsio Filter Request for validity, convert it into our internal
920 * format and send it to the hardware. Return 0 on success, an error number
921 * otherwise. We attach any provided filter operation context to the internal
922 * filter specification in order to facilitate signaling completion of the
925 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
926 struct ch_filter_specification *fs,
927 struct filter_ctx *ctx)
929 struct port_info *pi = ethdev2pinfo(dev);
930 struct adapter *adapter = pi->adapter;
931 unsigned int fidx, iq, fid_bit = 0;
932 struct filter_entry *f;
933 unsigned int chip_ver;
934 uint8_t bitoff[16] = {0};
937 if (is_hashfilter(adapter) && fs->cap)
938 return cxgbe_set_hash_filter(dev, fs, ctx);
940 if (filter_id >= adapter->tids.nftids)
943 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
945 ret = validate_filter(adapter, fs);
950 * Ensure filter id is aligned on the 4 slot boundary for IPv6
956 ret = is_filter_set(&adapter->tids, filter_id, fs->type);
960 iq = get_filter_steerq(dev, fs);
963 * IPv6 filters occupy four slots and must be aligned on four-slot
964 * boundaries for T5. On T6, IPv6 filters occupy two-slots and
965 * must be aligned on two-slot boundaries.
967 * IPv4 filters only occupy a single slot and have no alignment
968 * requirements but writing a new IPv4 filter into the middle
969 * of an existing IPv6 filter requires clearing the old IPv6
972 if (fs->type == FILTER_TYPE_IPV4) { /* IPv4 */
974 * For T6, If our IPv4 filter isn't being written to a
975 * multiple of two filter index and there's an IPv6
976 * filter at the multiple of 2 base slot, then we need
977 * to delete that IPv6 filter ...
978 * For adapters below T6, IPv6 filter occupies 4 entries.
980 if (chip_ver < CHELSIO_T6)
981 fidx = filter_id & ~0x3;
983 fidx = filter_id & ~0x1;
985 if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) {
986 f = &adapter->tids.ftid_tab[fidx];
991 unsigned int max_filter_id;
993 if (chip_ver < CHELSIO_T6) {
995 * Ensure that the IPv6 filter is aligned on a
996 * multiple of 4 boundary.
1001 max_filter_id = filter_id + 4;
1004 * For T6, CLIP being enabled, IPv6 filter would occupy
1007 if (filter_id & 0x1)
1010 max_filter_id = filter_id + 2;
1014 * Check all except the base overlapping IPv4 filter
1017 for (fidx = filter_id + 1; fidx < max_filter_id; fidx++) {
1018 f = &adapter->tids.ftid_tab[fidx];
1025 * Check to make sure that provided filter index is not
1026 * already in use by someone else
1028 f = &adapter->tids.ftid_tab[filter_id];
1032 fidx = adapter->tids.ftid_base + filter_id;
1033 fid_bit = filter_id;
1034 ret = cxgbe_set_ftid(&adapter->tids, fid_bit,
1035 fs->type ? FILTER_TYPE_IPV6 : FILTER_TYPE_IPV4);
1040 * Check to make sure the filter requested is writable ...
1042 ret = writable_filter(f);
1044 /* Clear the bits we have set above */
1045 cxgbe_clear_ftid(&adapter->tids, fid_bit,
1046 fs->type ? FILTER_TYPE_IPV6 :
1052 * Allocate a clip table entry only if we have non-zero IPv6 address
1054 if (chip_ver > CHELSIO_T5 && fs->type &&
1055 memcmp(fs->val.lip, bitoff, sizeof(bitoff))) {
1056 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
1062 * Convert the filter specification into our internal format.
1063 * We copy the PF/VF specification into the Outer VLAN field
1064 * here so the rest of the code -- including the interface to
1065 * the firmware -- doesn't have to constantly do these checks.
1072 * Attempt to set the filter. If we don't succeed, we clear
1073 * it and return the failure.
1076 f->tid = fidx; /* Save the actual tid */
1077 ret = set_filter_wr(dev, filter_id);
1079 fid_bit = f->tid - adapter->tids.ftid_base;
1086 cxgbe_clear_ftid(&adapter->tids, fid_bit,
1087 fs->type ? FILTER_TYPE_IPV6 :
1094 * Handle a Hash filter write reply.
1096 void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
1098 struct tid_info *t = &adap->tids;
1099 struct filter_entry *f;
1100 struct filter_ctx *ctx = NULL;
1101 unsigned int tid = GET_TID(rpl);
1102 unsigned int ftid = G_TID_TID(G_AOPEN_ATID
1103 (be32_to_cpu(rpl->atid_status)));
1104 unsigned int status = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
1106 f = lookup_atid(t, ftid);
1108 dev_warn(adap, "%s: could not find filter entry: %d\n",
1117 case CPL_ERR_NONE: {
1119 f->pending = 0; /* asynchronous setup completed */
1122 cxgbe_insert_tid(t, f, f->tid, 0);
1123 cxgbe_free_atid(t, ftid);
1129 set_tcb_field(adap, tid,
1131 V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1132 V_TCB_T_RTT_TS_RECENT_AGE
1133 (M_TCB_T_RTT_TS_RECENT_AGE),
1134 V_TCB_TIMESTAMP(0ULL) |
1135 V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
1137 if (f->fs.newvlan == VLAN_INSERT ||
1138 f->fs.newvlan == VLAN_REWRITE)
1139 set_tcb_tflag(adap, tid, S_TF_CCTRL_RFR, 1, 1);
1143 dev_warn(adap, "%s: filter creation failed with status = %u\n",
1147 if (status == CPL_ERR_TCAM_FULL)
1148 ctx->result = -EAGAIN;
1150 ctx->result = -EINVAL;
1153 cxgbe_free_atid(t, ftid);
1158 t4_complete(&ctx->completion);
1162 * Handle a LE-TCAM filter write/deletion reply.
1164 void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
1166 struct filter_entry *f = NULL;
1167 unsigned int tid = GET_TID(rpl);
1168 int idx, max_fidx = adap->tids.nftids;
1170 /* Get the corresponding filter entry for this tid */
1171 if (adap->tids.ftid_tab) {
1172 /* Check this in normal filter region */
1173 idx = tid - adap->tids.ftid_base;
1174 if (idx >= max_fidx)
1177 f = &adap->tids.ftid_tab[idx];
1182 /* We found the filter entry for this tid */
1184 unsigned int ret = G_COOKIE(rpl->cookie);
1185 struct filter_ctx *ctx;
1188 * Pull off any filter operation context attached to the
1194 if (ret == FW_FILTER_WR_FLT_ADDED) {
1195 f->pending = 0; /* asynchronous setup completed */
1201 } else if (ret == FW_FILTER_WR_FLT_DELETED) {
1203 * Clear the filter when we get confirmation from the
1204 * hardware that the filter has been deleted.
1211 * Something went wrong. Issue a warning about the
1212 * problem and clear everything out.
1214 dev_warn(adap, "filter %u setup failed with error %u\n",
1218 ctx->result = -EINVAL;
1222 t4_complete(&ctx->completion);
1227 * Retrieve the packet count for the specified filter.
1229 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
1230 u64 *c, int hash, bool get_byte)
1232 struct filter_entry *f;
1233 unsigned int tcb_base, tcbaddr;
1236 tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
1237 if (is_hashfilter(adapter) && hash) {
1238 if (fidx < adapter->tids.ntids) {
1239 f = adapter->tids.tid_tab[fidx];
1243 if (is_t5(adapter->params.chip)) {
1247 tcbaddr = tcb_base + (fidx * TCB_SIZE);
1253 if (fidx >= adapter->tids.nftids)
1256 f = &adapter->tids.ftid_tab[fidx];
1260 tcbaddr = tcb_base + f->tid * TCB_SIZE;
1263 f = &adapter->tids.ftid_tab[fidx];
1268 if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
1270 * For T5, the Filter Packet Hit Count is maintained as a
1271 * 32-bit Big Endian value in the TCB field {timestamp}.
1272 * Similar to the craziness above, instead of the filter hit
1273 * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
1274 * sizeof(u32)), it actually shows up at offset 24. Whacky.
1277 unsigned int word_offset = 4;
1278 __be64 be64_byte_count;
1280 t4_os_lock(&adapter->win0_lock);
1281 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1283 (word_offset * sizeof(__be32)),
1284 sizeof(be64_byte_count),
1287 t4_os_unlock(&adapter->win0_lock);
1290 *c = be64_to_cpu(be64_byte_count);
1292 unsigned int word_offset = 6;
1295 t4_os_lock(&adapter->win0_lock);
1296 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1298 (word_offset * sizeof(__be32)),
1299 sizeof(be32_count), &be32_count,
1301 t4_os_unlock(&adapter->win0_lock);
1304 *c = (u64)be32_to_cpu(be32_count);
1311 * Handle a Hash filter delete reply.
1313 void hash_del_filter_rpl(struct adapter *adap,
1314 const struct cpl_abort_rpl_rss *rpl)
1316 struct tid_info *t = &adap->tids;
1317 struct filter_entry *f;
1318 struct filter_ctx *ctx = NULL;
1319 unsigned int tid = GET_TID(rpl);
1321 f = lookup_tid(t, tid);
1323 dev_warn(adap, "%s: could not find filter entry: %u\n",
1334 cxgbe_clip_release(f->dev, f->clipt);
1336 cxgbe_remove_tid(t, 0, tid, 0);
1341 t4_complete(&ctx->completion);