New upstream version 18.08
[deb_dpdk.git] / drivers / net / cxgbe / cxgbe_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 #include <rte_net.h>
6 #include "common.h"
7 #include "t4_tcb.h"
8 #include "t4_regs.h"
9 #include "cxgbe_filter.h"
10 #include "clip_tbl.h"
11
12 /**
13  * Initialize Hash Filters
14  */
15 int init_hash_filter(struct adapter *adap)
16 {
17         unsigned int n_user_filters;
18         unsigned int user_filter_perc;
19         int ret;
20         u32 params[7], val[7];
21
22 #define FW_PARAM_DEV(param) \
23         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
24         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
25
26 #define FW_PARAM_PFVF(param) \
27         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
28         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) |  \
29         V_FW_PARAMS_PARAM_Y(0) | \
30         V_FW_PARAMS_PARAM_Z(0))
31
32         params[0] = FW_PARAM_DEV(NTID);
33         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
34                               params, val);
35         if (ret < 0)
36                 return ret;
37         adap->tids.ntids = val[0];
38         adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
39
40         user_filter_perc = 100;
41         n_user_filters = mult_frac(adap->tids.nftids,
42                                    user_filter_perc,
43                                    100);
44
45         adap->tids.nftids = n_user_filters;
46         adap->params.hash_filter = 1;
47         return 0;
48 }
49
50 /**
51  * Validate if the requested filter specification can be set by checking
52  * if the requested features have been enabled
53  */
54 int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs)
55 {
56         u32 fconf;
57
58         /*
59          * Check for unconfigured fields being used.
60          */
61         fconf = adapter->params.tp.vlan_pri_map;
62
63 #define S(_field) \
64         (fs->val._field || fs->mask._field)
65 #define U(_mask, _field) \
66         (!(fconf & (_mask)) && S(_field))
67
68         if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) || U(F_PROTOCOL, proto))
69                 return -EOPNOTSUPP;
70
71 #undef S
72 #undef U
73
74         /*
75          * If the user is requesting that the filter action loop
76          * matching packets back out one of our ports, make sure that
77          * the egress port is in range.
78          */
79         if (fs->action == FILTER_SWITCH &&
80             fs->eport >= adapter->params.nports)
81                 return -ERANGE;
82
83         /*
84          * Don't allow various trivially obvious bogus out-of-range
85          * values ...
86          */
87         if (fs->val.iport >= adapter->params.nports)
88                 return -ERANGE;
89
90         return 0;
91 }
92
93 /**
94  * Get the queue to which the traffic must be steered to.
95  */
96 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
97                                       struct ch_filter_specification *fs)
98 {
99         struct port_info *pi = ethdev2pinfo(dev);
100         struct adapter *adapter = pi->adapter;
101         unsigned int iq;
102
103         /*
104          * If the user has requested steering matching Ingress Packets
105          * to a specific Queue Set, we need to make sure it's in range
106          * for the port and map that into the Absolute Queue ID of the
107          * Queue Set's Response Queue.
108          */
109         if (!fs->dirsteer) {
110                 iq = 0;
111         } else {
112                 /*
113                  * If the iq id is greater than the number of qsets,
114                  * then assume it is an absolute qid.
115                  */
116                 if (fs->iq < pi->n_rx_qsets)
117                         iq = adapter->sge.ethrxq[pi->first_qset +
118                                                  fs->iq].rspq.abs_id;
119                 else
120                         iq = fs->iq;
121         }
122
123         return iq;
124 }
125
126 /* Return an error number if the indicated filter isn't writable ... */
127 int writable_filter(struct filter_entry *f)
128 {
129         if (f->locked)
130                 return -EPERM;
131         if (f->pending)
132                 return -EBUSY;
133
134         return 0;
135 }
136
137 /**
138  * Send CPL_SET_TCB_FIELD message
139  */
140 static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
141                           u16 word, u64 mask, u64 val, int no_reply)
142 {
143         struct rte_mbuf *mbuf;
144         struct cpl_set_tcb_field *req;
145         struct sge_ctrl_txq *ctrlq;
146
147         ctrlq = &adapter->sge.ctrlq[0];
148         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
149         WARN_ON(!mbuf);
150
151         mbuf->data_len = sizeof(*req);
152         mbuf->pkt_len = mbuf->data_len;
153
154         req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
155         memset(req, 0, sizeof(*req));
156         INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
157         req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
158                                       V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
159                                       V_NO_REPLY(no_reply));
160         req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
161         req->mask = cpu_to_be64(mask);
162         req->val = cpu_to_be64(val);
163
164         t4_mgmt_tx(ctrlq, mbuf);
165 }
166
167 /**
168  * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
169  */
170 static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
171                                         struct cpl_set_tcb_field *req,
172                                         unsigned int word,
173                                         u64 mask, u64 val, u8 cookie,
174                                         int no_reply)
175 {
176         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
177         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
178
179         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
180                                       V_ULP_TXPKT_DEST(0));
181         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
182         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
183         sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
184         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
185         req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
186                                       V_QUEUENO(0));
187         req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
188         req->mask = cpu_to_be64(mask);
189         req->val = cpu_to_be64(val);
190         sc = (struct ulptx_idata *)(req + 1);
191         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
192         sc->len = cpu_to_be32(0);
193 }
194
195 /**
196  * Check if entry already filled.
197  */
198 bool is_filter_set(struct tid_info *t, int fidx, int family)
199 {
200         bool result = FALSE;
201         int i, max;
202
203         /* IPv6 requires four slots and IPv4 requires only 1 slot.
204          * Ensure, there's enough slots available.
205          */
206         max = family == FILTER_TYPE_IPV6 ? fidx + 3 : fidx;
207
208         t4_os_lock(&t->ftid_lock);
209         for (i = fidx; i <= max; i++) {
210                 if (rte_bitmap_get(t->ftid_bmap, i)) {
211                         result = TRUE;
212                         break;
213                 }
214         }
215         t4_os_unlock(&t->ftid_lock);
216         return result;
217 }
218
219 /**
220  * Allocate a available free entry
221  */
222 int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family)
223 {
224         struct tid_info *t = &adap->tids;
225         int pos;
226         int size = t->nftids;
227
228         t4_os_lock(&t->ftid_lock);
229         if (family == FILTER_TYPE_IPV6)
230                 pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size, 4);
231         else
232                 pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
233         t4_os_unlock(&t->ftid_lock);
234
235         return pos < size ? pos : -1;
236 }
237
238 /**
239  * Construct hash filter ntuple.
240  */
241 static u64 hash_filter_ntuple(const struct filter_entry *f)
242 {
243         struct adapter *adap = ethdev2adap(f->dev);
244         struct tp_params *tp = &adap->params.tp;
245         u64 ntuple = 0;
246         u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
247
248         if (tp->port_shift >= 0)
249                 ntuple |= (u64)f->fs.mask.iport << tp->port_shift;
250
251         if (tp->protocol_shift >= 0) {
252                 if (!f->fs.val.proto)
253                         ntuple |= (u64)tcp_proto << tp->protocol_shift;
254                 else
255                         ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
256         }
257
258         if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
259                 ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
260
261         if (ntuple != tp->hash_filter_mask)
262                 return 0;
263
264         return ntuple;
265 }
266
267 /**
268  * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
269  */
270 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
271                              unsigned int tid)
272 {
273         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
274         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
275
276         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
277                                       V_ULP_TXPKT_DEST(0));
278         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
279         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
280         sc->len = cpu_to_be32(sizeof(*abort_req) -
281                               sizeof(struct work_request_hdr));
282         OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
283         abort_req->rsvd0 = cpu_to_be32(0);
284         abort_req->rsvd1 = 0;
285         abort_req->cmd = CPL_ABORT_NO_RST;
286         sc = (struct ulptx_idata *)(abort_req + 1);
287         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
288         sc->len = cpu_to_be32(0);
289 }
290
291 /**
292  * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
293  */
294 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
295                              unsigned int tid)
296 {
297         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
298         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
299
300         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
301                                       V_ULP_TXPKT_DEST(0));
302         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
303         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
304         sc->len = cpu_to_be32(sizeof(*abort_rpl) -
305                               sizeof(struct work_request_hdr));
306         OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
307         abort_rpl->rsvd0 = cpu_to_be32(0);
308         abort_rpl->rsvd1 = 0;
309         abort_rpl->cmd = CPL_ABORT_NO_RST;
310         sc = (struct ulptx_idata *)(abort_rpl + 1);
311         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
312         sc->len = cpu_to_be32(0);
313 }
314
315 /**
316  * Delete the specified hash filter.
317  */
318 static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
319                                  unsigned int filter_id,
320                                  struct filter_ctx *ctx)
321 {
322         struct adapter *adapter = ethdev2adap(dev);
323         struct tid_info *t = &adapter->tids;
324         struct filter_entry *f;
325         struct sge_ctrl_txq *ctrlq;
326         unsigned int port_id = ethdev2pinfo(dev)->port_id;
327         int ret;
328
329         if (filter_id > adapter->tids.ntids)
330                 return -E2BIG;
331
332         f = lookup_tid(t, filter_id);
333         if (!f) {
334                 dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
335                         __func__, filter_id);
336                 return -EINVAL;
337         }
338
339         ret = writable_filter(f);
340         if (ret)
341                 return ret;
342
343         if (f->valid) {
344                 unsigned int wrlen;
345                 struct rte_mbuf *mbuf;
346                 struct work_request_hdr *wr;
347                 struct ulptx_idata *aligner;
348                 struct cpl_set_tcb_field *req;
349                 struct cpl_abort_req *abort_req;
350                 struct cpl_abort_rpl *abort_rpl;
351
352                 f->ctx = ctx;
353                 f->pending = 1;
354
355                 wrlen = cxgbe_roundup(sizeof(*wr) +
356                                       (sizeof(*req) + sizeof(*aligner)) +
357                                       sizeof(*abort_req) + sizeof(*abort_rpl),
358                                       16);
359
360                 ctrlq = &adapter->sge.ctrlq[port_id];
361                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
362                 if (!mbuf) {
363                         dev_err(adapter, "%s: could not allocate skb ..\n",
364                                 __func__);
365                         goto out_err;
366                 }
367
368                 mbuf->data_len = wrlen;
369                 mbuf->pkt_len = mbuf->data_len;
370
371                 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
372                 INIT_ULPTX_WR(req, wrlen, 0, 0);
373                 wr = (struct work_request_hdr *)req;
374                 wr++;
375                 req = (struct cpl_set_tcb_field *)wr;
376                 mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
377                                 V_TCB_RSS_INFO(M_TCB_RSS_INFO),
378                                 V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
379                                 0, 1);
380                 aligner = (struct ulptx_idata *)(req + 1);
381                 abort_req = (struct cpl_abort_req *)(aligner + 1);
382                 mk_abort_req_ulp(abort_req, f->tid);
383                 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
384                 mk_abort_rpl_ulp(abort_rpl, f->tid);
385                 t4_mgmt_tx(ctrlq, mbuf);
386         }
387         return 0;
388
389 out_err:
390         return -ENOMEM;
391 }
392
393 /**
394  * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
395  */
396 static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
397                              unsigned int qid_filterid, struct adapter *adap)
398 {
399         struct cpl_t6_act_open_req6 *req = NULL;
400         u64 local_lo, local_hi, peer_lo, peer_hi;
401         u32 *lip = (u32 *)f->fs.val.lip;
402         u32 *fip = (u32 *)f->fs.val.fip;
403
404         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
405         case CHELSIO_T6:
406                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
407
408                 INIT_TP_WR(req, 0);
409                 break;
410         default:
411                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
412                 return;
413         }
414
415         local_hi = ((u64)lip[1]) << 32 | lip[0];
416         local_lo = ((u64)lip[3]) << 32 | lip[2];
417         peer_hi = ((u64)fip[1]) << 32 | fip[0];
418         peer_lo = ((u64)fip[3]) << 32 | fip[2];
419
420         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
421                                                     qid_filterid));
422         req->local_port = cpu_to_be16(f->fs.val.lport);
423         req->peer_port = cpu_to_be16(f->fs.val.fport);
424         req->local_ip_hi = local_hi;
425         req->local_ip_lo = local_lo;
426         req->peer_ip_hi = peer_hi;
427         req->peer_ip_lo = peer_lo;
428         req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
429                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
430                                            << 1) |
431                                 V_TX_CHAN(f->fs.eport) |
432                                 V_ULP_MODE(ULP_MODE_NONE) |
433                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
434         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
435         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
436                             V_RSS_QUEUE(f->fs.iq) |
437                             F_T5_OPT_2_VALID |
438                             F_RX_CHANNEL |
439                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
440                                          (f->fs.dirsteer << 1)) |
441                             V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
442 }
443
444 /**
445  * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
446  */
447 static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
448                             unsigned int qid_filterid, struct adapter *adap)
449 {
450         struct cpl_t6_act_open_req *req = NULL;
451
452         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
453         case CHELSIO_T6:
454                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
455
456                 INIT_TP_WR(req, 0);
457                 break;
458         default:
459                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
460                 return;
461         }
462
463         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
464                                                     qid_filterid));
465         req->local_port = cpu_to_be16(f->fs.val.lport);
466         req->peer_port = cpu_to_be16(f->fs.val.fport);
467         req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
468                         f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
469         req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
470                         f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
471         req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
472                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
473                                            << 1) |
474                                 V_TX_CHAN(f->fs.eport) |
475                                 V_ULP_MODE(ULP_MODE_NONE) |
476                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
477         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
478         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
479                             V_RSS_QUEUE(f->fs.iq) |
480                             F_T5_OPT_2_VALID |
481                             F_RX_CHANNEL |
482                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
483                                          (f->fs.dirsteer << 1)) |
484                             V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
485 }
486
487 /**
488  * Set the specified hash filter.
489  */
490 static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
491                                  struct ch_filter_specification *fs,
492                                  struct filter_ctx *ctx)
493 {
494         struct port_info *pi = ethdev2pinfo(dev);
495         struct adapter *adapter = pi->adapter;
496         struct tid_info *t = &adapter->tids;
497         struct filter_entry *f;
498         struct rte_mbuf *mbuf;
499         struct sge_ctrl_txq *ctrlq;
500         unsigned int iq;
501         int atid, size;
502         int ret = 0;
503
504         ret = validate_filter(adapter, fs);
505         if (ret)
506                 return ret;
507
508         iq = get_filter_steerq(dev, fs);
509
510         ctrlq = &adapter->sge.ctrlq[pi->port_id];
511
512         f = t4_os_alloc(sizeof(*f));
513         if (!f)
514                 goto out_err;
515
516         f->fs = *fs;
517         f->ctx = ctx;
518         f->dev = dev;
519         f->fs.iq = iq;
520
521         atid = cxgbe_alloc_atid(t, f);
522         if (atid < 0)
523                 goto out_err;
524
525         if (f->fs.type) {
526                 /* IPv6 hash filter */
527                 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
528                 if (!f->clipt)
529                         goto free_atid;
530
531                 size = sizeof(struct cpl_t6_act_open_req6);
532                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
533                 if (!mbuf) {
534                         ret = -ENOMEM;
535                         goto free_clip;
536                 }
537
538                 mbuf->data_len = size;
539                 mbuf->pkt_len = mbuf->data_len;
540
541                 mk_act_open_req6(f, mbuf,
542                                  ((adapter->sge.fw_evtq.abs_id << 14) | atid),
543                                  adapter);
544         } else {
545                 /* IPv4 hash filter */
546                 size = sizeof(struct cpl_t6_act_open_req);
547                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
548                 if (!mbuf) {
549                         ret = -ENOMEM;
550                         goto free_atid;
551                 }
552
553                 mbuf->data_len = size;
554                 mbuf->pkt_len = mbuf->data_len;
555
556                 mk_act_open_req(f, mbuf,
557                                 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
558                                 adapter);
559         }
560
561         f->pending = 1;
562         t4_mgmt_tx(ctrlq, mbuf);
563         return 0;
564
565 free_clip:
566         cxgbe_clip_release(f->dev, f->clipt);
567 free_atid:
568         cxgbe_free_atid(t, atid);
569
570 out_err:
571         t4_os_free(f);
572         return ret;
573 }
574
575 /**
576  * Clear a filter and release any of its resources that we own.  This also
577  * clears the filter's "pending" status.
578  */
579 void clear_filter(struct filter_entry *f)
580 {
581         if (f->clipt)
582                 cxgbe_clip_release(f->dev, f->clipt);
583
584         /*
585          * The zeroing of the filter rule below clears the filter valid,
586          * pending, locked flags etc. so it's all we need for
587          * this operation.
588          */
589         memset(f, 0, sizeof(*f));
590 }
591
592 /**
593  * t4_mk_filtdelwr - create a delete filter WR
594  * @ftid: the filter ID
595  * @wr: the filter work request to populate
596  * @qid: ingress queue to receive the delete notification
597  *
598  * Creates a filter work request to delete the supplied filter.  If @qid is
599  * negative the delete notification is suppressed.
600  */
601 static void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
602 {
603         memset(wr, 0, sizeof(*wr));
604         wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
605         wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
606         wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
607                                     V_FW_FILTER_WR_NOREPLY(qid < 0));
608         wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
609         if (qid >= 0)
610                 wr->rx_chan_rx_rpl_iq =
611                                 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
612 }
613
614 /**
615  * Create FW work request to delete the filter at a specified index
616  */
617 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
618 {
619         struct adapter *adapter = ethdev2adap(dev);
620         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
621         struct rte_mbuf *mbuf;
622         struct fw_filter_wr *fwr;
623         struct sge_ctrl_txq *ctrlq;
624         unsigned int port_id = ethdev2pinfo(dev)->port_id;
625
626         ctrlq = &adapter->sge.ctrlq[port_id];
627         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
628         if (!mbuf)
629                 return -ENOMEM;
630
631         mbuf->data_len = sizeof(*fwr);
632         mbuf->pkt_len = mbuf->data_len;
633
634         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
635         t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
636
637         /*
638          * Mark the filter as "pending" and ship off the Filter Work Request.
639          * When we get the Work Request Reply we'll clear the pending status.
640          */
641         f->pending = 1;
642         t4_mgmt_tx(ctrlq, mbuf);
643         return 0;
644 }
645
646 int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
647 {
648         struct adapter *adapter = ethdev2adap(dev);
649         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
650         struct rte_mbuf *mbuf;
651         struct fw_filter_wr *fwr;
652         struct sge_ctrl_txq *ctrlq;
653         unsigned int port_id = ethdev2pinfo(dev)->port_id;
654         int ret;
655
656         ctrlq = &adapter->sge.ctrlq[port_id];
657         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
658         if (!mbuf) {
659                 ret = -ENOMEM;
660                 goto out;
661         }
662
663         mbuf->data_len = sizeof(*fwr);
664         mbuf->pkt_len = mbuf->data_len;
665
666         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
667         memset(fwr, 0, sizeof(*fwr));
668
669         /*
670          * Construct the work request to set the filter.
671          */
672         fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
673         fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
674         fwr->tid_to_iq =
675                 cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
676                             V_FW_FILTER_WR_RQTYPE(f->fs.type) |
677                             V_FW_FILTER_WR_NOREPLY(0) |
678                             V_FW_FILTER_WR_IQ(f->fs.iq));
679         fwr->del_filter_to_l2tix =
680                 cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
681                             V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
682                             V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
683                             V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
684                             V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
685                             V_FW_FILTER_WR_PRIO(f->fs.prio));
686         fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
687         fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
688         fwr->smac_sel = 0;
689         fwr->rx_chan_rx_rpl_iq =
690                 cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
691                             V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
692                                                      ));
693         fwr->maci_to_matchtypem =
694                 cpu_to_be32(V_FW_FILTER_WR_PORT(f->fs.val.iport) |
695                             V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
696         fwr->ptcl = f->fs.val.proto;
697         fwr->ptclm = f->fs.mask.proto;
698         rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
699         rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
700         rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
701         rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
702         fwr->lp = cpu_to_be16(f->fs.val.lport);
703         fwr->lpm = cpu_to_be16(f->fs.mask.lport);
704         fwr->fp = cpu_to_be16(f->fs.val.fport);
705         fwr->fpm = cpu_to_be16(f->fs.mask.fport);
706
707         /*
708          * Mark the filter as "pending" and ship off the Filter Work Request.
709          * When we get the Work Request Reply we'll clear the pending status.
710          */
711         f->pending = 1;
712         t4_mgmt_tx(ctrlq, mbuf);
713         return 0;
714
715 out:
716         return ret;
717 }
718
719 /**
720  * Set the corresponding entry in the bitmap. 4 slots are
721  * marked for IPv6, whereas only 1 slot is marked for IPv4.
722  */
723 static int cxgbe_set_ftid(struct tid_info *t, int fidx, int family)
724 {
725         t4_os_lock(&t->ftid_lock);
726         if (rte_bitmap_get(t->ftid_bmap, fidx)) {
727                 t4_os_unlock(&t->ftid_lock);
728                 return -EBUSY;
729         }
730
731         if (family == FILTER_TYPE_IPV4) {
732                 rte_bitmap_set(t->ftid_bmap, fidx);
733         } else {
734                 rte_bitmap_set(t->ftid_bmap, fidx);
735                 rte_bitmap_set(t->ftid_bmap, fidx + 1);
736                 rte_bitmap_set(t->ftid_bmap, fidx + 2);
737                 rte_bitmap_set(t->ftid_bmap, fidx + 3);
738         }
739         t4_os_unlock(&t->ftid_lock);
740         return 0;
741 }
742
743 /**
744  * Clear the corresponding entry in the bitmap. 4 slots are
745  * cleared for IPv6, whereas only 1 slot is cleared for IPv4.
746  */
747 static void cxgbe_clear_ftid(struct tid_info *t, int fidx, int family)
748 {
749         t4_os_lock(&t->ftid_lock);
750         if (family == FILTER_TYPE_IPV4) {
751                 rte_bitmap_clear(t->ftid_bmap, fidx);
752         } else {
753                 rte_bitmap_clear(t->ftid_bmap, fidx);
754                 rte_bitmap_clear(t->ftid_bmap, fidx + 1);
755                 rte_bitmap_clear(t->ftid_bmap, fidx + 2);
756                 rte_bitmap_clear(t->ftid_bmap, fidx + 3);
757         }
758         t4_os_unlock(&t->ftid_lock);
759 }
760
761 /**
762  * Check a delete filter request for validity and send it to the hardware.
763  * Return 0 on success, an error number otherwise.  We attach any provided
764  * filter operation context to the internal filter specification in order to
765  * facilitate signaling completion of the operation.
766  */
767 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
768                      struct ch_filter_specification *fs,
769                      struct filter_ctx *ctx)
770 {
771         struct port_info *pi = (struct port_info *)(dev->data->dev_private);
772         struct adapter *adapter = pi->adapter;
773         struct filter_entry *f;
774         unsigned int chip_ver;
775         int ret;
776
777         if (is_hashfilter(adapter) && fs->cap)
778                 return cxgbe_del_hash_filter(dev, filter_id, ctx);
779
780         if (filter_id >= adapter->tids.nftids)
781                 return -ERANGE;
782
783         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
784
785         ret = is_filter_set(&adapter->tids, filter_id, fs->type);
786         if (!ret) {
787                 dev_warn(adap, "%s: could not find filter entry: %u\n",
788                          __func__, filter_id);
789                 return -EINVAL;
790         }
791
792         /*
793          * Ensure filter id is aligned on the 2 slot boundary for T6,
794          * and 4 slot boundary for cards below T6.
795          */
796         if (fs->type) {
797                 if (chip_ver < CHELSIO_T6)
798                         filter_id &= ~(0x3);
799                 else
800                         filter_id &= ~(0x1);
801         }
802
803         f = &adapter->tids.ftid_tab[filter_id];
804         ret = writable_filter(f);
805         if (ret)
806                 return ret;
807
808         if (f->valid) {
809                 f->ctx = ctx;
810                 cxgbe_clear_ftid(&adapter->tids,
811                                  f->tid - adapter->tids.ftid_base,
812                                  f->fs.type ? FILTER_TYPE_IPV6 :
813                                               FILTER_TYPE_IPV4);
814                 return del_filter_wr(dev, filter_id);
815         }
816
817         /*
818          * If the caller has passed in a Completion Context then we need to
819          * mark it as a successful completion so they don't stall waiting
820          * for it.
821          */
822         if (ctx) {
823                 ctx->result = 0;
824                 t4_complete(&ctx->completion);
825         }
826
827         return 0;
828 }
829
830 /**
831  * Check a Chelsio Filter Request for validity, convert it into our internal
832  * format and send it to the hardware.  Return 0 on success, an error number
833  * otherwise.  We attach any provided filter operation context to the internal
834  * filter specification in order to facilitate signaling completion of the
835  * operation.
836  */
837 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
838                      struct ch_filter_specification *fs,
839                      struct filter_ctx *ctx)
840 {
841         struct port_info *pi = ethdev2pinfo(dev);
842         struct adapter *adapter = pi->adapter;
843         unsigned int fidx, iq, fid_bit = 0;
844         struct filter_entry *f;
845         unsigned int chip_ver;
846         uint8_t bitoff[16] = {0};
847         int ret;
848
849         if (is_hashfilter(adapter) && fs->cap)
850                 return cxgbe_set_hash_filter(dev, fs, ctx);
851
852         if (filter_id >= adapter->tids.nftids)
853                 return -ERANGE;
854
855         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
856
857         ret = validate_filter(adapter, fs);
858         if (ret)
859                 return ret;
860
861         /*
862          * Ensure filter id is aligned on the 4 slot boundary for IPv6
863          * maskfull filters.
864          */
865         if (fs->type)
866                 filter_id &= ~(0x3);
867
868         ret = is_filter_set(&adapter->tids, filter_id, fs->type);
869         if (ret)
870                 return -EBUSY;
871
872         iq = get_filter_steerq(dev, fs);
873
874         /*
875          * IPv6 filters occupy four slots and must be aligned on four-slot
876          * boundaries for T5. On T6, IPv6 filters occupy two-slots and
877          * must be aligned on two-slot boundaries.
878          *
879          * IPv4 filters only occupy a single slot and have no alignment
880          * requirements but writing a new IPv4 filter into the middle
881          * of an existing IPv6 filter requires clearing the old IPv6
882          * filter.
883          */
884         if (fs->type == FILTER_TYPE_IPV4) { /* IPv4 */
885                 /*
886                  * For T6, If our IPv4 filter isn't being written to a
887                  * multiple of two filter index and there's an IPv6
888                  * filter at the multiple of 2 base slot, then we need
889                  * to delete that IPv6 filter ...
890                  * For adapters below T6, IPv6 filter occupies 4 entries.
891                  */
892                 if (chip_ver < CHELSIO_T6)
893                         fidx = filter_id & ~0x3;
894                 else
895                         fidx = filter_id & ~0x1;
896
897                 if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) {
898                         f = &adapter->tids.ftid_tab[fidx];
899                         if (f->valid)
900                                 return -EBUSY;
901                 }
902         } else { /* IPv6 */
903                 unsigned int max_filter_id;
904
905                 if (chip_ver < CHELSIO_T6) {
906                         /*
907                          * Ensure that the IPv6 filter is aligned on a
908                          * multiple of 4 boundary.
909                          */
910                         if (filter_id & 0x3)
911                                 return -EINVAL;
912
913                         max_filter_id = filter_id + 4;
914                 } else {
915                         /*
916                          * For T6, CLIP being enabled, IPv6 filter would occupy
917                          * 2 entries.
918                          */
919                         if (filter_id & 0x1)
920                                 return -EINVAL;
921
922                         max_filter_id = filter_id + 2;
923                 }
924
925                 /*
926                  * Check all except the base overlapping IPv4 filter
927                  * slots.
928                  */
929                 for (fidx = filter_id + 1; fidx < max_filter_id; fidx++) {
930                         f = &adapter->tids.ftid_tab[fidx];
931                         if (f->valid)
932                                 return -EBUSY;
933                 }
934         }
935
936         /*
937          * Check to make sure that provided filter index is not
938          * already in use by someone else
939          */
940         f = &adapter->tids.ftid_tab[filter_id];
941         if (f->valid)
942                 return -EBUSY;
943
944         fidx = adapter->tids.ftid_base + filter_id;
945         fid_bit = filter_id;
946         ret = cxgbe_set_ftid(&adapter->tids, fid_bit,
947                              fs->type ? FILTER_TYPE_IPV6 : FILTER_TYPE_IPV4);
948         if (ret)
949                 return ret;
950
951         /*
952          * Check to make sure the filter requested is writable ...
953          */
954         ret = writable_filter(f);
955         if (ret) {
956                 /* Clear the bits we have set above */
957                 cxgbe_clear_ftid(&adapter->tids, fid_bit,
958                                  fs->type ? FILTER_TYPE_IPV6 :
959                                             FILTER_TYPE_IPV4);
960                 return ret;
961         }
962
963         /*
964          * Allocate a clip table entry only if we have non-zero IPv6 address
965          */
966         if (chip_ver > CHELSIO_T5 && fs->type &&
967             memcmp(fs->val.lip, bitoff, sizeof(bitoff))) {
968                 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
969                 if (!f->clipt)
970                         goto free_tid;
971         }
972
973         /*
974          * Convert the filter specification into our internal format.
975          * We copy the PF/VF specification into the Outer VLAN field
976          * here so the rest of the code -- including the interface to
977          * the firmware -- doesn't have to constantly do these checks.
978          */
979         f->fs = *fs;
980         f->fs.iq = iq;
981         f->dev = dev;
982
983         /*
984          * Attempt to set the filter.  If we don't succeed, we clear
985          * it and return the failure.
986          */
987         f->ctx = ctx;
988         f->tid = fidx; /* Save the actual tid */
989         ret = set_filter_wr(dev, filter_id);
990         if (ret) {
991                 fid_bit = f->tid - adapter->tids.ftid_base;
992                 goto free_tid;
993         }
994
995         return ret;
996
997 free_tid:
998         cxgbe_clear_ftid(&adapter->tids, fid_bit,
999                          fs->type ? FILTER_TYPE_IPV6 :
1000                                     FILTER_TYPE_IPV4);
1001         clear_filter(f);
1002         return ret;
1003 }
1004
1005 /**
1006  * Handle a Hash filter write reply.
1007  */
1008 void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
1009 {
1010         struct tid_info *t = &adap->tids;
1011         struct filter_entry *f;
1012         struct filter_ctx *ctx = NULL;
1013         unsigned int tid = GET_TID(rpl);
1014         unsigned int ftid = G_TID_TID(G_AOPEN_ATID
1015                                       (be32_to_cpu(rpl->atid_status)));
1016         unsigned int status  = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
1017
1018         f = lookup_atid(t, ftid);
1019         if (!f) {
1020                 dev_warn(adap, "%s: could not find filter entry: %d\n",
1021                          __func__, ftid);
1022                 return;
1023         }
1024
1025         ctx = f->ctx;
1026         f->ctx = NULL;
1027
1028         switch (status) {
1029         case CPL_ERR_NONE: {
1030                 f->tid = tid;
1031                 f->pending = 0;  /* asynchronous setup completed */
1032                 f->valid = 1;
1033
1034                 cxgbe_insert_tid(t, f, f->tid, 0);
1035                 cxgbe_free_atid(t, ftid);
1036                 if (ctx) {
1037                         ctx->tid = f->tid;
1038                         ctx->result = 0;
1039                 }
1040                 if (f->fs.hitcnts)
1041                         set_tcb_field(adap, tid,
1042                                       W_TCB_TIMESTAMP,
1043                                       V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1044                                       V_TCB_T_RTT_TS_RECENT_AGE
1045                                               (M_TCB_T_RTT_TS_RECENT_AGE),
1046                                       V_TCB_TIMESTAMP(0ULL) |
1047                                       V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
1048                                       1);
1049                 break;
1050         }
1051         default:
1052                 dev_warn(adap, "%s: filter creation failed with status = %u\n",
1053                          __func__, status);
1054
1055                 if (ctx) {
1056                         if (status == CPL_ERR_TCAM_FULL)
1057                                 ctx->result = -EAGAIN;
1058                         else
1059                                 ctx->result = -EINVAL;
1060                 }
1061
1062                 cxgbe_free_atid(t, ftid);
1063                 t4_os_free(f);
1064         }
1065
1066         if (ctx)
1067                 t4_complete(&ctx->completion);
1068 }
1069
1070 /**
1071  * Handle a LE-TCAM filter write/deletion reply.
1072  */
1073 void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
1074 {
1075         struct filter_entry *f = NULL;
1076         unsigned int tid = GET_TID(rpl);
1077         int idx, max_fidx = adap->tids.nftids;
1078
1079         /* Get the corresponding filter entry for this tid */
1080         if (adap->tids.ftid_tab) {
1081                 /* Check this in normal filter region */
1082                 idx = tid - adap->tids.ftid_base;
1083                 if (idx >= max_fidx)
1084                         return;
1085
1086                 f = &adap->tids.ftid_tab[idx];
1087                 if (f->tid != tid)
1088                         return;
1089         }
1090
1091         /* We found the filter entry for this tid */
1092         if (f) {
1093                 unsigned int ret = G_COOKIE(rpl->cookie);
1094                 struct filter_ctx *ctx;
1095
1096                 /*
1097                  * Pull off any filter operation context attached to the
1098                  * filter.
1099                  */
1100                 ctx = f->ctx;
1101                 f->ctx = NULL;
1102
1103                 if (ret == FW_FILTER_WR_FLT_ADDED) {
1104                         f->pending = 0;  /* asynchronous setup completed */
1105                         f->valid = 1;
1106                         if (ctx) {
1107                                 ctx->tid = f->tid;
1108                                 ctx->result = 0;
1109                         }
1110                 } else if (ret == FW_FILTER_WR_FLT_DELETED) {
1111                         /*
1112                          * Clear the filter when we get confirmation from the
1113                          * hardware that the filter has been deleted.
1114                          */
1115                         clear_filter(f);
1116                         if (ctx)
1117                                 ctx->result = 0;
1118                 } else {
1119                         /*
1120                          * Something went wrong.  Issue a warning about the
1121                          * problem and clear everything out.
1122                          */
1123                         dev_warn(adap, "filter %u setup failed with error %u\n",
1124                                  idx, ret);
1125                         clear_filter(f);
1126                         if (ctx)
1127                                 ctx->result = -EINVAL;
1128                 }
1129
1130                 if (ctx)
1131                         t4_complete(&ctx->completion);
1132         }
1133 }
1134
1135 /*
1136  * Retrieve the packet count for the specified filter.
1137  */
1138 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
1139                            u64 *c, int hash, bool get_byte)
1140 {
1141         struct filter_entry *f;
1142         unsigned int tcb_base, tcbaddr;
1143         int ret;
1144
1145         tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
1146         if (is_hashfilter(adapter) && hash) {
1147                 if (fidx < adapter->tids.ntids) {
1148                         f = adapter->tids.tid_tab[fidx];
1149                         if (!f)
1150                                 return -EINVAL;
1151
1152                         if (is_t5(adapter->params.chip)) {
1153                                 *c = 0;
1154                                 return 0;
1155                         }
1156                         tcbaddr = tcb_base + (fidx * TCB_SIZE);
1157                         goto get_count;
1158                 } else {
1159                         return -ERANGE;
1160                 }
1161         } else {
1162                 if (fidx >= adapter->tids.nftids)
1163                         return -ERANGE;
1164
1165                 f = &adapter->tids.ftid_tab[fidx];
1166                 if (!f->valid)
1167                         return -EINVAL;
1168
1169                 tcbaddr = tcb_base + f->tid * TCB_SIZE;
1170         }
1171
1172         f = &adapter->tids.ftid_tab[fidx];
1173         if (!f->valid)
1174                 return -EINVAL;
1175
1176 get_count:
1177         if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
1178                 /*
1179                  * For T5, the Filter Packet Hit Count is maintained as a
1180                  * 32-bit Big Endian value in the TCB field {timestamp}.
1181                  * Similar to the craziness above, instead of the filter hit
1182                  * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
1183                  * sizeof(u32)), it actually shows up at offset 24.  Whacky.
1184                  */
1185                 if (get_byte) {
1186                         unsigned int word_offset = 4;
1187                         __be64 be64_byte_count;
1188
1189                         t4_os_lock(&adapter->win0_lock);
1190                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1191                                            tcbaddr +
1192                                            (word_offset * sizeof(__be32)),
1193                                            sizeof(be64_byte_count),
1194                                            &be64_byte_count,
1195                                            T4_MEMORY_READ);
1196                         t4_os_unlock(&adapter->win0_lock);
1197                         if (ret < 0)
1198                                 return ret;
1199                         *c = be64_to_cpu(be64_byte_count);
1200                 } else {
1201                         unsigned int word_offset = 6;
1202                         __be32 be32_count;
1203
1204                         t4_os_lock(&adapter->win0_lock);
1205                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1206                                            tcbaddr +
1207                                            (word_offset * sizeof(__be32)),
1208                                            sizeof(be32_count), &be32_count,
1209                                            T4_MEMORY_READ);
1210                         t4_os_unlock(&adapter->win0_lock);
1211                         if (ret < 0)
1212                                 return ret;
1213                         *c = (u64)be32_to_cpu(be32_count);
1214                 }
1215         }
1216         return 0;
1217 }
1218
1219 /**
1220  * Handle a Hash filter delete reply.
1221  */
1222 void hash_del_filter_rpl(struct adapter *adap,
1223                          const struct cpl_abort_rpl_rss *rpl)
1224 {
1225         struct tid_info *t = &adap->tids;
1226         struct filter_entry *f;
1227         struct filter_ctx *ctx = NULL;
1228         unsigned int tid = GET_TID(rpl);
1229
1230         f = lookup_tid(t, tid);
1231         if (!f) {
1232                 dev_warn(adap, "%s: could not find filter entry: %u\n",
1233                          __func__, tid);
1234                 return;
1235         }
1236
1237         ctx = f->ctx;
1238         f->ctx = NULL;
1239
1240         f->valid = 0;
1241
1242         if (f->clipt)
1243                 cxgbe_clip_release(f->dev, f->clipt);
1244
1245         cxgbe_remove_tid(t, 0, tid, 0);
1246         t4_os_free(f);
1247
1248         if (ctx) {
1249                 ctx->result = 0;
1250                 t4_complete(&ctx->completion);
1251         }
1252 }