2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
19 #include <vlib/vlib.h>
20 #include <vppinfra/ring.h>
21 #include <vlib/unix/unix.h>
22 #include <vlib/pci/pci.h>
23 #include <vnet/ethernet/ethernet.h>
26 #include <avf/avf_advanced_flow.h>
29 avf_fdir_vc_op_callback (void *vc_hdl, enum virthnl_adv_ops vc_op, void *in,
30 u32 in_len, void *out, u32 out_len)
32 u32 dev_instance = *(u32 *) vc_hdl;
33 avf_device_t *ad = avf_get_device (dev_instance);
34 clib_error_t *err = 0;
37 if (vc_op >= VIRTCHNL_ADV_OP_MAX)
44 case VIRTCHNL_ADV_OP_ADD_FDIR_FILTER:
47 case VIRTCHNL_ADV_OP_DEL_FDIR_FILTER:
51 avf_log_err (ad, "unsupported avf virtual channel opcode %u\n",
56 err = avf_program_flow (dev_instance, is_add, in, in_len, out, out_len);
59 avf_log_err (ad, "avf fdir program failed: %U", format_clib_error, err);
60 clib_error_free (err);
64 avf_log_debug (ad, "avf fdir program success");
69 avf_flow_add (u32 dev_instance, vnet_flow_t *f, avf_flow_entry_t *fe)
71 avf_device_t *ad = avf_get_device (dev_instance);
74 u16 src_port = 0, dst_port = 0;
75 u16 src_port_mask = 0, dst_port_mask = 0;
76 u8 protocol = IP_PROTOCOL_RESERVED;
78 struct avf_flow_error error;
83 struct avf_fdir_vc_ctx vc_ctx;
84 struct avf_fdir_conf *filter;
85 struct avf_flow_item avf_items[VIRTCHNL_MAX_NUM_PROTO_HDRS];
86 struct avf_flow_action avf_actions[VIRTCHNL_MAX_NUM_ACTIONS];
88 struct avf_ipv4_hdr ip4_spec, ip4_mask;
89 struct avf_tcp_hdr tcp_spec, tcp_mask;
90 struct avf_udp_hdr udp_spec, udp_mask;
91 struct avf_gtp_hdr gtp_spec, gtp_mask;
93 struct avf_flow_action_queue act_q;
94 struct avf_flow_action_mark act_msk;
96 ret = avf_fdir_rcfg_create (&filter, 0, ad->vsi_id, ad->n_rx_queues);
99 rv = VNET_FLOW_ERROR_INTERNAL;
103 /* init a virtual channel context */
104 vc_ctx.vc_hdl = &dev_instance;
105 vc_ctx.vc_op = avf_fdir_vc_op_callback;
107 clib_memset (avf_items, 0, sizeof (avf_actions));
108 clib_memset (avf_actions, 0, sizeof (avf_actions));
111 avf_items[layer].type = VIRTCHNL_PROTO_HDR_ETH;
112 avf_items[layer].spec = NULL;
116 if ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) ||
117 (f->type == VNET_FLOW_TYPE_IP4_GTPU))
119 vnet_flow_ip4_n_tuple_t *t4 = &f->ip4_n_tuple;
120 memset (&ip4_spec, 0, sizeof (ip4_spec));
121 memset (&ip4_mask, 0, sizeof (ip4_mask));
124 avf_items[layer].type = VIRTCHNL_PROTO_HDR_IPV4;
125 avf_items[layer].spec = &ip4_spec;
126 avf_items[layer].mask = &ip4_mask;
129 src_port = t4->src_port.port;
130 dst_port = t4->dst_port.port;
131 src_port_mask = t4->src_port.mask;
132 dst_port_mask = t4->dst_port.mask;
133 protocol = t4->protocol.prot;
135 if (t4->src_addr.mask.as_u32)
137 ip4_spec.src_addr = t4->src_addr.addr.as_u32;
138 ip4_mask.src_addr = t4->src_addr.mask.as_u32;
140 if (t4->dst_addr.mask.as_u32)
142 ip4_spec.dst_addr = t4->dst_addr.addr.as_u32;
143 ip4_mask.dst_addr = t4->dst_addr.mask.as_u32;
147 if (protocol == IP_PROTOCOL_TCP)
149 memset (&tcp_spec, 0, sizeof (tcp_spec));
150 memset (&tcp_mask, 0, sizeof (tcp_mask));
152 avf_items[layer].type = VIRTCHNL_PROTO_HDR_TCP;
153 avf_items[layer].spec = &tcp_spec;
154 avf_items[layer].mask = &tcp_mask;
159 tcp_spec.src_port = clib_host_to_net_u16 (src_port);
160 tcp_mask.src_port = clib_host_to_net_u16 (src_port_mask);
164 tcp_spec.dst_port = clib_host_to_net_u16 (dst_port);
165 tcp_mask.dst_port = clib_host_to_net_u16 (dst_port_mask);
168 else if (protocol == IP_PROTOCOL_UDP)
170 memset (&udp_spec, 0, sizeof (udp_spec));
171 memset (&udp_mask, 0, sizeof (udp_mask));
173 avf_items[layer].type = VIRTCHNL_PROTO_HDR_UDP;
174 avf_items[layer].spec = &udp_spec;
175 avf_items[layer].mask = &udp_mask;
180 udp_spec.src_port = clib_host_to_net_u16 (src_port);
181 udp_mask.src_port = clib_host_to_net_u16 (src_port_mask);
185 udp_spec.dst_port = clib_host_to_net_u16 (dst_port);
186 udp_mask.dst_port = clib_host_to_net_u16 (dst_port_mask);
191 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
195 if (f->type == VNET_FLOW_TYPE_IP4_GTPU)
198 memset (>p_spec, 0, sizeof (gtp_spec));
199 memset (>p_mask, 0, sizeof (gtp_mask));
201 vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
202 gtp_spec.teid = clib_host_to_net_u32 (gu->teid);
205 avf_items[layer].type = VIRTCHNL_PROTO_HDR_GTPU_IP;
206 avf_items[layer].spec = >p_spec;
207 avf_items[layer].mask = >p_mask;
211 /* pattern end flag */
212 avf_items[layer].type = VIRTCHNL_PROTO_HDR_NONE;
213 ret = avf_fdir_parse_pattern (filter, avf_items, &error);
216 avf_log_err (ad, "avf fdir parse pattern failed: %s", error.message);
217 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
222 /* Only one 'fate' can be assigned */
223 if (f->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
225 avf_actions[action_count].type = VIRTCHNL_ACTION_QUEUE;
226 avf_actions[action_count].conf = &act_q;
228 act_q.index = f->redirect_queue;
233 if (f->actions & VNET_FLOW_ACTION_DROP)
235 avf_actions[action_count].type = VIRTCHNL_ACTION_DROP;
236 avf_actions[action_count].conf = NULL;
240 rv = VNET_FLOW_ERROR_INTERNAL;
251 avf_actions[action_count].type = VIRTCHNL_ACTION_PASSTHRU;
252 avf_actions[action_count].conf = NULL;
258 if (f->actions & VNET_FLOW_ACTION_MARK)
260 avf_actions[action_count].type = VIRTCHNL_ACTION_MARK;
261 avf_actions[action_count].conf = &act_msk;
264 act_msk.id = fe->mark;
267 /* action end flag */
268 avf_actions[action_count].type = VIRTCHNL_ACTION_NONE;
271 ret = avf_fdir_parse_action (avf_actions, filter, &error);
274 avf_log_err (ad, "avf fdir parse action failed: %s", error.message);
275 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
279 /* create flow rule, save rule */
280 ret = avf_fdir_rule_create (&vc_ctx, filter);
284 avf_log_err (ad, "avf fdir rule create failed: %s",
285 avf_fdir_prgm_error_decode (ret));
286 rv = VNET_FLOW_ERROR_INTERNAL;
299 avf_flow_ops_fn (vnet_main_t *vm, vnet_flow_dev_op_t op, u32 dev_instance,
300 u32 flow_index, uword *private_data)
302 vnet_flow_t *flow = vnet_get_flow (flow_index);
303 avf_device_t *ad = avf_get_device (dev_instance);
304 avf_flow_entry_t *fe = NULL;
305 avf_flow_lookup_entry_t *fle = NULL;
308 if ((ad->feature_bitmap & VIRTCHNL_VF_OFFLOAD_FDIR_PF) == 0)
310 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
314 if (op == VNET_FLOW_DEV_OP_ADD_FLOW)
316 pool_get (ad->flow_entries, fe);
317 fe->flow_index = flow->index;
319 /* if we need to mark packets, assign one mark */
321 (VNET_FLOW_ACTION_MARK | VNET_FLOW_ACTION_REDIRECT_TO_NODE |
322 VNET_FLOW_ACTION_BUFFER_ADVANCE))
325 if (ad->flow_lookup_entries == 0)
326 pool_get_aligned (ad->flow_lookup_entries, fle,
327 CLIB_CACHE_LINE_BYTES);
328 pool_get_aligned (ad->flow_lookup_entries, fle,
329 CLIB_CACHE_LINE_BYTES);
330 fe->mark = fle - ad->flow_lookup_entries;
332 /* install entry in the lookup table */
333 clib_memset (fle, -1, sizeof (*fle));
334 if (flow->actions & VNET_FLOW_ACTION_MARK)
335 fle->flow_id = flow->mark_flow_id;
336 if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
337 fle->next_index = flow->redirect_device_input_next_index;
338 if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
339 fle->buffer_advance = flow->buffer_advance;
341 if ((ad->flags & AVF_DEVICE_F_RX_FLOW_OFFLOAD) == 0)
343 ad->flags |= AVF_DEVICE_F_RX_FLOW_OFFLOAD;
351 case VNET_FLOW_TYPE_IP4_N_TUPLE:
352 case VNET_FLOW_TYPE_IP4_GTPU:
353 if ((rv = avf_flow_add (dev_instance, flow, fe)))
357 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
361 *private_data = fe - ad->flow_entries;
363 else if (op == VNET_FLOW_DEV_OP_DEL_FLOW)
365 fe = vec_elt_at_index (ad->flow_entries, *private_data);
367 struct avf_fdir_vc_ctx ctx;
368 ctx.vc_hdl = &dev_instance;
369 ctx.vc_op = avf_fdir_vc_op_callback;
371 rv = avf_fdir_rule_destroy (&ctx, fe->rcfg);
373 return VNET_FLOW_ERROR_INTERNAL;
377 fle = pool_elt_at_index (ad->flow_lookup_entries, fe->mark);
378 clib_memset (fle, -1, sizeof (*fle));
379 pool_put_index (ad->flow_lookup_entries, fe->mark);
382 (void) avf_fdir_rcfg_destroy (fe->rcfg);
383 clib_memset (fe, 0, sizeof (*fe));
384 pool_put (ad->flow_entries, fe);
385 goto disable_rx_offload;
388 return VNET_FLOW_ERROR_NOT_SUPPORTED;
395 clib_memset (fe, 0, sizeof (*fe));
396 pool_put (ad->flow_entries, fe);
401 clib_memset (fle, -1, sizeof (*fle));
402 pool_put (ad->flow_lookup_entries, fle);
406 if ((ad->flags & AVF_DEVICE_F_RX_FLOW_OFFLOAD) != 0 &&
407 pool_elts (ad->flow_entries) == 0)
409 ad->flags &= ~AVF_DEVICE_F_RX_FLOW_OFFLOAD;
416 * fd.io coding-style-patch-verification: ON
419 * eval: (c-set-style "gnu")