2 * Copyright (c) 2024 Marvell.
3 * SPDX-License-Identifier: Apache-2.0
4 * https://spdx.org/licenses/Apache-2.0.html
7 #include <dev_octeon/octeon.h>
8 #include <base/roc_npc_priv.h>
10 VLIB_REGISTER_LOG_CLASS (oct_log, static) = {
11 .class_name = "octeon",
12 .subclass_name = "flow",
15 #define FLOW_IS_ETHERNET_CLASS(f) (f->type == VNET_FLOW_TYPE_ETHERNET)
17 #define FLOW_IS_IPV4_CLASS(f) \
18 ((f->type == VNET_FLOW_TYPE_IP4) || \
19 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
20 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
21 (f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
22 (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
23 (f->type == VNET_FLOW_TYPE_IP4_GTPU) || \
24 (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP) || \
25 (f->type == VNET_FLOW_TYPE_IP4_IPSEC_ESP) || \
26 (f->type == VNET_FLOW_TYPE_IP4_IPSEC_AH))
28 #define FLOW_IS_IPV6_CLASS(f) \
29 ((f->type == VNET_FLOW_TYPE_IP6) || \
30 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
31 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED) || \
32 (f->type == VNET_FLOW_TYPE_IP6_VXLAN))
34 #define FLOW_IS_L3_TYPE(f) \
35 ((f->type == VNET_FLOW_TYPE_IP4) || (f->type == VNET_FLOW_TYPE_IP6))
37 #define FLOW_IS_L4_TYPE(f) \
38 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
39 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
40 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
41 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
43 #define FLOW_IS_L4_TUNNEL_TYPE(f) \
44 ((f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
45 (f->type == VNET_FLOW_TYPE_IP6_VXLAN) || \
46 (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
47 (f->type == VNET_FLOW_TYPE_IP4_GTPU))
49 #define OCT_FLOW_UNSUPPORTED_ACTIONS(f) \
50 ((f->actions == VNET_FLOW_ACTION_BUFFER_ADVANCE) || \
51 (f->actions == VNET_FLOW_ACTION_REDIRECT_TO_NODE))
53 /* Keep values in sync with vnet/flow.h */
54 #define foreach_oct_flow_rss_types \
55 _ (1, FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_TCP, "ipv4-tcp") \
56 _ (2, FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_UDP, "ipv4-udp") \
57 _ (3, FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_SCTP, "ipv4-sctp") \
58 _ (5, FLOW_KEY_TYPE_IPV4, "ipv4") \
59 _ (9, FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_TCP, "ipv6-tcp") \
60 _ (10, FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_UDP, "ipv6-udp") \
61 _ (11, FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_SCTP, "ipv6-sctp") \
62 _ (13, FLOW_KEY_TYPE_IPV6_EXT, "ipv6-ex") \
63 _ (14, FLOW_KEY_TYPE_IPV6, "ipv6") \
64 _ (16, FLOW_KEY_TYPE_PORT, "port") \
65 _ (17, FLOW_KEY_TYPE_VXLAN, "vxlan") \
66 _ (18, FLOW_KEY_TYPE_GENEVE, "geneve") \
67 _ (19, FLOW_KEY_TYPE_NVGRE, "nvgre") \
68 _ (20, FLOW_KEY_TYPE_GTPU, "gtpu") \
69 _ (60, FLOW_KEY_TYPE_L4_DST, "l4-dst-only") \
70 _ (61, FLOW_KEY_TYPE_L4_SRC, "l4-src-only") \
71 _ (62, FLOW_KEY_TYPE_L3_DST, "l3-dst-only") \
72 _ (63, FLOW_KEY_TYPE_L3_SRC, "l3-src-only")
91 oct_flow_convert_rss_types (u64 *key, u64 rss_types)
94 if (rss_types & (1UL << a)) \
97 foreach_oct_flow_rss_types
104 oct_flow_validate_params (vlib_main_t *vm, vnet_dev_port_t *port,
105 vnet_dev_port_cfg_type_t type, u32 flow_index,
108 vnet_flow_t *flow = vnet_get_flow (flow_index);
112 if (type == VNET_DEV_PORT_CFG_GET_RX_FLOW_COUNTER ||
113 type == VNET_DEV_PORT_CFG_RESET_RX_FLOW_COUNTER)
115 log_err (port->dev, "Unsupported request type");
116 return VNET_DEV_ERR_NOT_SUPPORTED;
119 if (OCT_FLOW_UNSUPPORTED_ACTIONS (flow))
121 log_err (port->dev, "Unsupported flow action");
122 return VNET_DEV_ERR_NOT_SUPPORTED;
125 if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
127 qid = flow->redirect_queue;
128 if (qid > port->intf.num_rx_queues - 1 || qid < 0)
131 "Given Q(%d) is invalid, supported range is %d-%d", qid, 0,
132 port->intf.num_rx_queues - 1);
133 return VNET_DEV_ERR_NOT_SUPPORTED;
137 if (flow->actions & VNET_FLOW_ACTION_RSS)
139 last_queue = flow->queue_index + flow->queue_num;
140 if (last_queue > port->intf.num_rx_queues - 1)
143 "Given Q range(%d-%d) is invalid, supported range is %d-%d",
144 flow->queue_index, flow->queue_index + flow->queue_num, 0,
145 port->intf.num_rx_queues - 1);
146 return VNET_DEV_ERR_NOT_SUPPORTED;
153 oct_flow_rule_create (vnet_dev_port_t *port, struct roc_npc_action *actions,
154 struct roc_npc_item_info *item_info, vnet_flow_t *flow,
157 oct_port_t *oct_port = vnet_dev_get_port_data (port);
158 struct roc_npc_attr attr = { .priority = 1, .ingress = 1 };
159 struct roc_npc_flow *npc_flow;
160 oct_flow_entry_t *flow_entry;
164 npc = &oct_port->npc;
167 roc_npc_flow_create (npc, &attr, item_info, actions, npc->pf_func, &rv);
170 log_err (port->dev, "roc_npc_flow_create failed with '%s' error",
171 roc_error_msg_get (rv));
172 return VNET_DEV_ERR_NOT_SUPPORTED;
174 roc_npc_mcam_clear_counter (npc, npc_flow->ctr_id);
176 pool_get_zero (oct_port->flow_entries, flow_entry);
177 flow_entry->index = flow_entry - oct_port->flow_entries;
178 flow_entry->vnet_flow_index = flow->index;
179 flow_entry->npc_flow = npc_flow;
181 *private_data = flow_entry->index;
187 oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
190 struct roc_npc_item_info item_info[ROC_NPC_ITEM_TYPE_END] = {};
191 struct roc_npc_action actions[ROC_NPC_ITEM_TYPE_END] = {};
192 oct_port_t *oct_port = vnet_dev_get_port_data (port);
193 u16 l4_src_port = 0, l4_dst_port = 0;
194 u16 l4_src_mask = 0, l4_dst_mask = 0;
195 struct roc_npc_action_rss rss_conf = {};
196 struct roc_npc_action_queue conf = {};
197 struct roc_npc_action_mark mark = {};
198 struct roc_npc *npc = &oct_port->npc;
199 vnet_dev_rv_t rv = VNET_DEV_OK;
200 int layer = 0, index = 0;
206 if (FLOW_IS_ETHERNET_CLASS (flow))
208 ethernet_header_t eth_spec = { .type = clib_host_to_net_u16 (
209 flow->ethernet.eth_hdr.type) },
210 eth_mask = { .type = 0xFFFF };
212 item_info[layer].spec = (void *) ð_spec;
213 item_info[layer].mask = (void *) ð_mask;
214 item_info[layer].size = sizeof (ethernet_header_t);
215 item_info[layer].type = ROC_NPC_ITEM_TYPE_ETH;
219 else if (FLOW_IS_IPV4_CLASS (flow))
221 vnet_flow_ip4_t *ip4_hdr = &flow->ip4;
222 proto = ip4_hdr->protocol.prot;
223 ip4_header_t ip4_spec = { .src_address = ip4_hdr->src_addr.addr,
224 .dst_address = ip4_hdr->dst_addr.addr },
225 ip4_mask = { .src_address = ip4_hdr->src_addr.mask,
226 .dst_address = ip4_hdr->dst_addr.mask };
228 item_info[layer].spec = (void *) &ip4_spec;
229 item_info[layer].mask = (void *) &ip4_mask;
230 item_info[layer].size = sizeof (ip4_header_t);
231 item_info[layer].type = ROC_NPC_ITEM_TYPE_IPV4;
234 if (FLOW_IS_L4_TYPE (flow))
236 vnet_flow_ip4_n_tuple_t *ip4_tuple_hdr = &flow->ip4_n_tuple;
238 l4_src_port = clib_host_to_net_u16 (ip4_tuple_hdr->src_port.port);
239 l4_dst_port = clib_host_to_net_u16 (ip4_tuple_hdr->dst_port.port);
240 l4_src_mask = clib_host_to_net_u16 (ip4_tuple_hdr->src_port.mask);
241 l4_dst_mask = clib_host_to_net_u16 (ip4_tuple_hdr->dst_port.mask);
244 else if (FLOW_IS_IPV6_CLASS (flow))
246 vnet_flow_ip6_t *ip6_hdr = &flow->ip6;
247 proto = ip6_hdr->protocol.prot;
248 ip6_header_t ip6_spec = { .src_address = ip6_hdr->src_addr.addr,
249 .dst_address = ip6_hdr->dst_addr.addr },
250 ip6_mask = { .src_address = ip6_hdr->src_addr.mask,
251 .dst_address = ip6_hdr->dst_addr.mask };
253 item_info[layer].spec = (void *) &ip6_spec;
254 item_info[layer].mask = (void *) &ip6_mask;
255 item_info[layer].size = sizeof (ip6_header_t);
256 item_info[layer].type = ROC_NPC_ITEM_TYPE_IPV6;
259 if (FLOW_IS_L4_TYPE (flow))
261 vnet_flow_ip6_n_tuple_t *ip6_tuple_hdr = &flow->ip6_n_tuple;
263 l4_src_port = clib_host_to_net_u16 (ip6_tuple_hdr->src_port.port);
264 l4_dst_port = clib_host_to_net_u16 (ip6_tuple_hdr->dst_port.port);
265 l4_src_mask = clib_host_to_net_u16 (ip6_tuple_hdr->src_port.mask);
266 l4_dst_mask = clib_host_to_net_u16 (ip6_tuple_hdr->dst_port.mask);
275 case IP_PROTOCOL_UDP:
276 item_info[layer].type = ROC_NPC_ITEM_TYPE_UDP;
278 udp_header_t udp_spec = { .src_port = l4_src_port,
279 .dst_port = l4_dst_port },
280 udp_mask = { .src_port = l4_src_mask,
281 .dst_port = l4_dst_mask };
283 item_info[layer].spec = (void *) &udp_spec;
284 item_info[layer].mask = (void *) &udp_mask;
285 item_info[layer].size = sizeof (udp_header_t);
288 if (FLOW_IS_L4_TUNNEL_TYPE (flow))
292 case VNET_FLOW_TYPE_IP4_GTPU:
293 item_info[layer].type = ROC_NPC_ITEM_TYPE_GTPU;
294 gtpu_header_t gtpu_spec = { .teid = clib_host_to_net_u32 (
295 flow->ip4_gtpu.teid) },
296 gtpu_mask = { .teid = 0XFFFFFFFF };
298 item_info[layer].spec = (void *) >pu_spec;
299 item_info[layer].mask = (void *) >pu_mask;
300 item_info[layer].size = sizeof (gtpu_header_t);
305 log_err (port->dev, "Unsupported L4 tunnel type");
306 return VNET_DEV_ERR_NOT_SUPPORTED;
308 } /* FLOW_IS_L4_TUNNEL_TYPE */
311 case IP_PROTOCOL_TCP:
312 item_info[layer].type = ROC_NPC_ITEM_TYPE_TCP;
314 tcp_header_t tcp_spec = { .src_port = l4_src_port,
315 .dst_port = l4_dst_port },
316 tcp_mask = { .src_port = l4_src_mask,
317 .dst_port = l4_dst_mask };
319 item_info[layer].spec = (void *) &tcp_spec;
320 item_info[layer].mask = (void *) &tcp_mask;
321 item_info[layer].size = sizeof (tcp_header_t);
325 case IP_PROTOCOL_SCTP:
326 item_info[layer].type = ROC_NPC_ITEM_TYPE_SCTP;
328 sctp_header_t sctp_spec = { .src_port = l4_src_port,
329 .dst_port = l4_dst_port },
330 sctp_mask = { .src_port = l4_src_mask,
331 .dst_port = l4_dst_mask };
333 item_info[layer].spec = (void *) &sctp_spec;
334 item_info[layer].mask = (void *) &sctp_mask;
335 item_info[layer].size = sizeof (sctp_header_t);
339 case IP_PROTOCOL_IPSEC_ESP:
340 item_info[layer].type = ROC_NPC_ITEM_TYPE_ESP;
341 esp_header_t esp_spec = { .spi = clib_host_to_net_u32 (
342 flow->ip4_ipsec_esp.spi) },
343 esp_mask = { .spi = 0xFFFFFFFF };
345 item_info[layer].spec = (void *) &esp_spec;
346 item_info[layer].mask = (void *) &esp_mask;
347 item_info[layer].size = sizeof (u32);
352 log_err (port->dev, "Unsupported IP protocol '%U'", format_ip_protocol,
354 return VNET_DEV_ERR_NOT_SUPPORTED;
358 item_info[layer].type = ROC_NPC_ITEM_TYPE_END;
360 if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
362 conf.index = flow->redirect_queue;
363 actions[action].type = ROC_NPC_ACTION_TYPE_QUEUE;
364 actions[action].conf = &conf;
368 else if (flow->actions & VNET_FLOW_ACTION_DROP)
370 actions[action].type = ROC_NPC_ACTION_TYPE_DROP;
374 else if (flow->actions & VNET_FLOW_ACTION_RSS)
376 if (!flow->queue_num)
378 log_err (port->dev, "RSS action has no queues");
379 return VNET_DEV_ERR_NOT_SUPPORTED;
381 queues = clib_mem_alloc (sizeof (u16) * port->intf.num_rx_queues);
383 for (index = 0; index < flow->queue_num; index++)
384 queues[index] = flow->queue_index++;
386 oct_flow_convert_rss_types (&flow_key, flow->rss_types);
389 log_err (port->dev, "Invalid RSS hash function");
390 return VNET_DEV_ERR_NOT_SUPPORTED;
392 npc->flowkey_cfg_state = flow_key;
393 rss_conf.queue_num = flow->queue_num;
394 rss_conf.queue = queues;
396 actions[action].type = ROC_NPC_ACTION_TYPE_RSS;
397 actions[action].conf = &rss_conf;
401 if (flow->actions & VNET_FLOW_ACTION_MARK)
403 if (flow->mark_flow_id == 0 ||
404 flow->mark_flow_id > (NPC_FLOW_FLAG_VAL - 2))
406 log_err (port->dev, "mark flow id must be > 0 and < 0xfffe");
407 return VNET_DEV_ERR_NOT_SUPPORTED;
409 /* RoC library adds 1 to id, so subtract 1 */
410 mark.id = flow->mark_flow_id - 1;
411 actions[action].type = ROC_NPC_ACTION_TYPE_MARK;
412 actions[action].conf = &mark;
416 /* make count as default action */
417 actions[action].type = ROC_NPC_ACTION_TYPE_COUNT;
418 actions[action + 1].type = ROC_NPC_ACTION_TYPE_END;
420 rv = oct_flow_rule_create (port, actions, item_info, flow, private_data);
423 clib_mem_free (queues);
429 oct_flow_del (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
432 oct_port_t *oct_port = vnet_dev_get_port_data (port);
433 struct roc_npc *npc = &oct_port->npc;
434 struct roc_npc_flow *npc_flow;
435 oct_flow_entry_t *flow_entry;
438 index = *private_data;
439 flow_entry = pool_elt_at_index (oct_port->flow_entries, index);
440 npc_flow = flow_entry->npc_flow;
441 rv = roc_npc_flow_destroy (npc, npc_flow);
444 log_err (port->dev, "roc_npc_flow_destroy failed with '%s' error",
445 roc_error_msg_get (rv));
446 return VNET_DEV_ERR_NOT_SUPPORTED;
448 pool_put (oct_port->flow_entries, flow_entry);
454 oct_flow_query (vlib_main_t *vm, vnet_dev_port_t *port, u32 flow_index,
455 uword private_data, u64 *hits)
457 oct_port_t *oct_port = vnet_dev_get_port_data (port);
458 struct roc_npc *npc = &oct_port->npc;
459 struct roc_npc_flow *npc_flow;
460 oct_flow_entry_t *flow_entry;
464 flow_count = pool_elts (oct_port->flow_entries);
467 log_err (port->dev, "Flow entry pool is empty");
468 return VNET_DEV_ERR_NOT_SUPPORTED;
471 flow_entry = pool_elt_at_index (oct_port->flow_entries, private_data);
472 npc_flow = flow_entry->npc_flow;
473 if (npc_flow->ctr_id == NPC_COUNTER_NONE)
475 log_err (port->dev, "Counters are not available for given flow id (%u)",
477 return VNET_DEV_ERR_NOT_SUPPORTED;
480 rv = roc_npc_mcam_read_counter (npc, npc_flow->ctr_id, hits);
483 log_err (port->dev, "Error reading flow counter for given flow id (%u)",
485 return VNET_DEV_ERR_INTERNAL;
492 oct_flow_ops_fn (vlib_main_t *vm, vnet_dev_port_t *port,
493 vnet_dev_port_cfg_type_t type, u32 flow_index,
496 vnet_flow_t *flow = vnet_get_flow (flow_index);
498 if (type == VNET_DEV_PORT_CFG_ADD_RX_FLOW)
499 return oct_flow_add (vm, port, flow, priv_data);
501 if (type == VNET_DEV_PORT_CFG_DEL_RX_FLOW)
502 return oct_flow_del (vm, port, flow, priv_data);
504 return VNET_DEV_ERR_NOT_SUPPORTED;