.redirect_node_index = gtpu4_flow_input_node.index,
.buffer_advance = sizeof (ethernet_header_t)
+ sizeof (ip4_header_t) + sizeof (udp_header_t),
- .type = VNET_FLOW_TYPE_IP4_GTPU_IP4,
+ .type = VNET_FLOW_TYPE_IP4_GTPU,
.ip4_gtpu = {
.protocol = IP_PROTOCOL_UDP,
.src_addr.addr = t->dst.ip4,
if (!ip46_address_is_ip4 (&t->dst))
return clib_error_return (0, "currently only IPV4 tunnels are supported");
- /* inner protocol should be IPv4 */
- if (t->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT)
+ /* inner protocol should be IPv4/IPv6 */
+ if ((t->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
+ (t->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
return clib_error_return (0,
- "currently only inner IPV4 protocol is supported");
+ "currently only inner IPv4/IPv6 protocol is supported");
vnet_hw_interface_t *hw_if = vnet_get_hw_interface (vnm, hw_if_index);
ip4_main_t *im = &ip4_main;
#define foreach_gtpu_flow_error \
_(NONE, "no error") \
+ _(PAYLOAD_ERROR, "Payload type errors") \
_(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
_(IP_HEADER_ERROR, "Rx ip header errors") \
_(UDP_CHECKSUM_ERROR, "Rx udp checksum errors") \
/* Pop gtpu header */
vlib_buffer_advance (b0, gtpu_hdr_len0);
- next0 = GTPU_INPUT_NEXT_IP4_INPUT;
+ /* assign the next node */
+ if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
+ (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
+ {
+ error0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
+ next0 = GTPU_INPUT_NEXT_DROP;
+ goto trace0;
+ }
+ next0 = t0->decap_next_index;
+
sw_if_index0 = t0->sw_if_index;
/* Set packet input sw_if_index to unicast GTPU tunnel for learning */
/* Pop gtpu header */
vlib_buffer_advance (b1, gtpu_hdr_len1);
- next1 = GTPU_INPUT_NEXT_IP4_INPUT;
+ /* assign the next node */
+ if (PREDICT_FALSE (t1->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
+ (t1->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
+ {
+ next1 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
+ next1 = GTPU_INPUT_NEXT_DROP;
+ goto trace1;
+ }
+ next1 = t1->decap_next_index;
+
sw_if_index1 = t1->sw_if_index;
/* Required to make the l2 tag push / pop code work on l2 subifs */
/* Pop gtpu header */
vlib_buffer_advance (b0, gtpu_hdr_len0);
- next0 = GTPU_INPUT_NEXT_IP4_INPUT;
+ /* assign the next node */
+ if (PREDICT_FALSE (t0->decap_next_index != GTPU_INPUT_NEXT_IP4_INPUT) &&
+ (t0->decap_next_index != GTPU_INPUT_NEXT_IP6_INPUT))
+ {
+ next0 = GTPU_FLOW_ERROR_PAYLOAD_ERROR;
+ next0 = GTPU_INPUT_NEXT_DROP;
+ goto trace00;
+ }
+ next0 = t0->decap_next_index;
+
sw_if_index0 = t0->sw_if_index;
/* Set packet input sw_if_index to unicast GTPU tunnel for learning */