X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fip%2Fip4_forward.c;h=b9875d72b8e9fa5c6ed7a7f16cc3bb1917560739;hb=ffd78d1ef8fe80d1b756a71d42d5eadda60ae996;hp=201d391a77ee9213861587952107363be278d241;hpb=f068c3ed296c49dfbfe17677fc1ad2428fb4e3e4;p=vpp.git diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c old mode 100755 new mode 100644 index 201d391a77e..b9875d72b8e --- a/src/vnet/ip/ip4_forward.c +++ b/src/vnet/ip/ip4_forward.c @@ -53,378 +53,7 @@ #include #include /* for mFIB table and entry creation */ -/** - * @file - * @brief IPv4 Forwarding. - * - * This file contains the source code for IPv4 forwarding. - */ - -always_inline uword -ip4_lookup_inline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame, - int lookup_for_responses_to_locally_received_packets) -{ - ip4_main_t *im = &ip4_main; - vlib_combined_counter_main_t *cm = &load_balance_main.lbm_to_counters; - u32 n_left_from, n_left_to_next, *from, *to_next; - ip_lookup_next_t next; - u32 thread_index = vlib_get_thread_index (); - - from = vlib_frame_vector_args (frame); - n_left_from = frame->n_vectors; - next = node->cached_next_index; - - while (n_left_from > 0) - { - vlib_get_next_frame (vm, node, next, to_next, n_left_to_next); - - while (n_left_from >= 8 && n_left_to_next >= 4) - { - vlib_buffer_t *p0, *p1, *p2, *p3; - ip4_header_t *ip0, *ip1, *ip2, *ip3; - ip_lookup_next_t next0, next1, next2, next3; - const load_balance_t *lb0, *lb1, *lb2, *lb3; - ip4_fib_mtrie_t *mtrie0, *mtrie1, *mtrie2, *mtrie3; - ip4_fib_mtrie_leaf_t leaf0, leaf1, leaf2, leaf3; - ip4_address_t *dst_addr0, *dst_addr1, *dst_addr2, *dst_addr3; - u32 pi0, fib_index0, lb_index0; - u32 pi1, fib_index1, lb_index1; - u32 pi2, fib_index2, lb_index2; - u32 pi3, fib_index3, lb_index3; - flow_hash_config_t flow_hash_config0, flow_hash_config1; - flow_hash_config_t flow_hash_config2, flow_hash_config3; - u32 hash_c0, hash_c1, hash_c2, hash_c3; - const dpo_id_t *dpo0, *dpo1, *dpo2, *dpo3; - - /* Prefetch next iteration. */ - { - vlib_buffer_t *p4, *p5, *p6, *p7; - - p4 = vlib_get_buffer (vm, from[4]); - p5 = vlib_get_buffer (vm, from[5]); - p6 = vlib_get_buffer (vm, from[6]); - p7 = vlib_get_buffer (vm, from[7]); - - vlib_prefetch_buffer_header (p4, LOAD); - vlib_prefetch_buffer_header (p5, LOAD); - vlib_prefetch_buffer_header (p6, LOAD); - vlib_prefetch_buffer_header (p7, LOAD); - - CLIB_PREFETCH (p4->data, sizeof (ip0[0]), LOAD); - CLIB_PREFETCH (p5->data, sizeof (ip0[0]), LOAD); - CLIB_PREFETCH (p6->data, sizeof (ip0[0]), LOAD); - CLIB_PREFETCH (p7->data, sizeof (ip0[0]), LOAD); - } - - pi0 = to_next[0] = from[0]; - pi1 = to_next[1] = from[1]; - pi2 = to_next[2] = from[2]; - pi3 = to_next[3] = from[3]; - - from += 4; - to_next += 4; - n_left_to_next -= 4; - n_left_from -= 4; - - p0 = vlib_get_buffer (vm, pi0); - p1 = vlib_get_buffer (vm, pi1); - p2 = vlib_get_buffer (vm, pi2); - p3 = vlib_get_buffer (vm, pi3); - - ip0 = vlib_buffer_get_current (p0); - ip1 = vlib_buffer_get_current (p1); - ip2 = vlib_buffer_get_current (p2); - ip3 = vlib_buffer_get_current (p3); - - dst_addr0 = &ip0->dst_address; - dst_addr1 = &ip1->dst_address; - dst_addr2 = &ip2->dst_address; - dst_addr3 = &ip3->dst_address; - - fib_index0 = - vec_elt (im->fib_index_by_sw_if_index, - vnet_buffer (p0)->sw_if_index[VLIB_RX]); - fib_index1 = - vec_elt (im->fib_index_by_sw_if_index, - vnet_buffer (p1)->sw_if_index[VLIB_RX]); - fib_index2 = - vec_elt (im->fib_index_by_sw_if_index, - vnet_buffer (p2)->sw_if_index[VLIB_RX]); - fib_index3 = - vec_elt (im->fib_index_by_sw_if_index, - vnet_buffer (p3)->sw_if_index[VLIB_RX]); - fib_index0 = - (vnet_buffer (p0)->sw_if_index[VLIB_TX] == - (u32) ~ 0) ? fib_index0 : vnet_buffer (p0)->sw_if_index[VLIB_TX]; - fib_index1 = - (vnet_buffer (p1)->sw_if_index[VLIB_TX] == - (u32) ~ 0) ? fib_index1 : vnet_buffer (p1)->sw_if_index[VLIB_TX]; - fib_index2 = - (vnet_buffer (p2)->sw_if_index[VLIB_TX] == - (u32) ~ 0) ? fib_index2 : vnet_buffer (p2)->sw_if_index[VLIB_TX]; - fib_index3 = - (vnet_buffer (p3)->sw_if_index[VLIB_TX] == - (u32) ~ 0) ? fib_index3 : vnet_buffer (p3)->sw_if_index[VLIB_TX]; - - - if (!lookup_for_responses_to_locally_received_packets) - { - mtrie0 = &ip4_fib_get (fib_index0)->mtrie; - mtrie1 = &ip4_fib_get (fib_index1)->mtrie; - mtrie2 = &ip4_fib_get (fib_index2)->mtrie; - mtrie3 = &ip4_fib_get (fib_index3)->mtrie; - - leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, dst_addr0); - leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, dst_addr1); - leaf2 = ip4_fib_mtrie_lookup_step_one (mtrie2, dst_addr2); - leaf3 = ip4_fib_mtrie_lookup_step_one (mtrie3, dst_addr3); - } - - if (!lookup_for_responses_to_locally_received_packets) - { - leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2); - leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 2); - leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, dst_addr2, 2); - leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, dst_addr3, 2); - } - - if (!lookup_for_responses_to_locally_received_packets) - { - leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 3); - leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, dst_addr1, 3); - leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, dst_addr2, 3); - leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, dst_addr3, 3); - } - - if (lookup_for_responses_to_locally_received_packets) - { - lb_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_RX]; - lb_index1 = vnet_buffer (p1)->ip.adj_index[VLIB_RX]; - lb_index2 = vnet_buffer (p2)->ip.adj_index[VLIB_RX]; - lb_index3 = vnet_buffer (p3)->ip.adj_index[VLIB_RX]; - } - else - { - lb_index0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0); - lb_index1 = ip4_fib_mtrie_leaf_get_adj_index (leaf1); - lb_index2 = ip4_fib_mtrie_leaf_get_adj_index (leaf2); - lb_index3 = ip4_fib_mtrie_leaf_get_adj_index (leaf3); - } - - ASSERT (lb_index0 && lb_index1 && lb_index2 && lb_index3); - lb0 = load_balance_get (lb_index0); - lb1 = load_balance_get (lb_index1); - lb2 = load_balance_get (lb_index2); - lb3 = load_balance_get (lb_index3); - - ASSERT (lb0->lb_n_buckets > 0); - ASSERT (is_pow2 (lb0->lb_n_buckets)); - ASSERT (lb1->lb_n_buckets > 0); - ASSERT (is_pow2 (lb1->lb_n_buckets)); - ASSERT (lb2->lb_n_buckets > 0); - ASSERT (is_pow2 (lb2->lb_n_buckets)); - ASSERT (lb3->lb_n_buckets > 0); - ASSERT (is_pow2 (lb3->lb_n_buckets)); - - /* Use flow hash to compute multipath adjacency. */ - hash_c0 = vnet_buffer (p0)->ip.flow_hash = 0; - hash_c1 = vnet_buffer (p1)->ip.flow_hash = 0; - hash_c2 = vnet_buffer (p2)->ip.flow_hash = 0; - hash_c3 = vnet_buffer (p3)->ip.flow_hash = 0; - if (PREDICT_FALSE (lb0->lb_n_buckets > 1)) - { - flow_hash_config0 = lb0->lb_hash_config; - hash_c0 = vnet_buffer (p0)->ip.flow_hash = - ip4_compute_flow_hash (ip0, flow_hash_config0); - dpo0 = - load_balance_get_fwd_bucket (lb0, - (hash_c0 & - (lb0->lb_n_buckets_minus_1))); - } - else - { - dpo0 = load_balance_get_bucket_i (lb0, 0); - } - if (PREDICT_FALSE (lb1->lb_n_buckets > 1)) - { - flow_hash_config1 = lb1->lb_hash_config; - hash_c1 = vnet_buffer (p1)->ip.flow_hash = - ip4_compute_flow_hash (ip1, flow_hash_config1); - dpo1 = - load_balance_get_fwd_bucket (lb1, - (hash_c1 & - (lb1->lb_n_buckets_minus_1))); - } - else - { - dpo1 = load_balance_get_bucket_i (lb1, 0); - } - if (PREDICT_FALSE (lb2->lb_n_buckets > 1)) - { - flow_hash_config2 = lb2->lb_hash_config; - hash_c2 = vnet_buffer (p2)->ip.flow_hash = - ip4_compute_flow_hash (ip2, flow_hash_config2); - dpo2 = - load_balance_get_fwd_bucket (lb2, - (hash_c2 & - (lb2->lb_n_buckets_minus_1))); - } - else - { - dpo2 = load_balance_get_bucket_i (lb2, 0); - } - if (PREDICT_FALSE (lb3->lb_n_buckets > 1)) - { - flow_hash_config3 = lb3->lb_hash_config; - hash_c3 = vnet_buffer (p3)->ip.flow_hash = - ip4_compute_flow_hash (ip3, flow_hash_config3); - dpo3 = - load_balance_get_fwd_bucket (lb3, - (hash_c3 & - (lb3->lb_n_buckets_minus_1))); - } - else - { - dpo3 = load_balance_get_bucket_i (lb3, 0); - } - - next0 = dpo0->dpoi_next_node; - vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; - next1 = dpo1->dpoi_next_node; - vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; - next2 = dpo2->dpoi_next_node; - vnet_buffer (p2)->ip.adj_index[VLIB_TX] = dpo2->dpoi_index; - next3 = dpo3->dpoi_next_node; - vnet_buffer (p3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index; - - vlib_increment_combined_counter - (cm, thread_index, lb_index0, 1, - vlib_buffer_length_in_chain (vm, p0)); - vlib_increment_combined_counter - (cm, thread_index, lb_index1, 1, - vlib_buffer_length_in_chain (vm, p1)); - vlib_increment_combined_counter - (cm, thread_index, lb_index2, 1, - vlib_buffer_length_in_chain (vm, p2)); - vlib_increment_combined_counter - (cm, thread_index, lb_index3, 1, - vlib_buffer_length_in_chain (vm, p3)); - - vlib_validate_buffer_enqueue_x4 (vm, node, next, - to_next, n_left_to_next, - pi0, pi1, pi2, pi3, - next0, next1, next2, next3); - } - - while (n_left_from > 0 && n_left_to_next > 0) - { - vlib_buffer_t *p0; - ip4_header_t *ip0; - ip_lookup_next_t next0; - const load_balance_t *lb0; - ip4_fib_mtrie_t *mtrie0; - ip4_fib_mtrie_leaf_t leaf0; - ip4_address_t *dst_addr0; - u32 pi0, fib_index0, lbi0; - flow_hash_config_t flow_hash_config0; - const dpo_id_t *dpo0; - u32 hash_c0; - - pi0 = from[0]; - to_next[0] = pi0; - - p0 = vlib_get_buffer (vm, pi0); - - ip0 = vlib_buffer_get_current (p0); - - dst_addr0 = &ip0->dst_address; - - fib_index0 = - vec_elt (im->fib_index_by_sw_if_index, - vnet_buffer (p0)->sw_if_index[VLIB_RX]); - fib_index0 = - (vnet_buffer (p0)->sw_if_index[VLIB_TX] == - (u32) ~ 0) ? fib_index0 : vnet_buffer (p0)->sw_if_index[VLIB_TX]; - - if (!lookup_for_responses_to_locally_received_packets) - { - mtrie0 = &ip4_fib_get (fib_index0)->mtrie; - - leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, dst_addr0); - } - - if (!lookup_for_responses_to_locally_received_packets) - leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2); - - if (!lookup_for_responses_to_locally_received_packets) - leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 3); - - if (lookup_for_responses_to_locally_received_packets) - lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_RX]; - else - { - /* Handle default route. */ - lbi0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0); - } - - ASSERT (lbi0); - lb0 = load_balance_get (lbi0); - - ASSERT (lb0->lb_n_buckets > 0); - ASSERT (is_pow2 (lb0->lb_n_buckets)); - - /* Use flow hash to compute multipath adjacency. */ - hash_c0 = vnet_buffer (p0)->ip.flow_hash = 0; - if (PREDICT_FALSE (lb0->lb_n_buckets > 1)) - { - flow_hash_config0 = lb0->lb_hash_config; - - hash_c0 = vnet_buffer (p0)->ip.flow_hash = - ip4_compute_flow_hash (ip0, flow_hash_config0); - dpo0 = - load_balance_get_fwd_bucket (lb0, - (hash_c0 & - (lb0->lb_n_buckets_minus_1))); - } - else - { - dpo0 = load_balance_get_bucket_i (lb0, 0); - } - - next0 = dpo0->dpoi_next_node; - vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; - - vlib_increment_combined_counter (cm, thread_index, lbi0, 1, - vlib_buffer_length_in_chain (vm, - p0)); - - from += 1; - to_next += 1; - n_left_to_next -= 1; - n_left_from -= 1; - - if (PREDICT_FALSE (next0 != next)) - { - n_left_to_next += 1; - vlib_put_next_frame (vm, node, next, n_left_to_next); - next = next0; - vlib_get_next_frame (vm, node, next, to_next, n_left_to_next); - to_next[0] = pi0; - to_next += 1; - n_left_to_next -= 1; - } - } - - vlib_put_next_frame (vm, node, next, n_left_to_next); - } - - if (node->flags & VLIB_NODE_FLAG_TRACE) - ip4_forward_next_trace (vm, node, frame, VLIB_TX); - - return frame->n_vectors; -} +#include /** @brief IPv4 lookup node. @node ip4-lookup @@ -883,11 +512,11 @@ ip4_sw_interface_enable_disable (u32 sw_if_index, u32 is_enable) if (0 != --im->ip_enabled_by_sw_if_index[sw_if_index]) return; } - vnet_feature_enable_disable ("ip4-unicast", "ip4-drop", sw_if_index, + vnet_feature_enable_disable ("ip4-unicast", "ip4-not-enabled", sw_if_index, !is_enable, 0, 0); - vnet_feature_enable_disable ("ip4-multicast", "ip4-drop", + vnet_feature_enable_disable ("ip4-multicast", "ip4-not-enabled", sw_if_index, !is_enable, 0, 0); } @@ -1066,10 +695,10 @@ VNET_FEATURE_INIT (ip4_vxlan_bypass, static) = .runs_before = VNET_FEATURES ("ip4-lookup"), }; -VNET_FEATURE_INIT (ip4_drop, static) = +VNET_FEATURE_INIT (ip4_not_enabled, static) = { .arc_name = "ip4-unicast", - .node_name = "ip4-drop", + .node_name = "ip4-not-enabled", .runs_before = VNET_FEATURES ("ip4-lookup"), }; @@ -1095,10 +724,10 @@ VNET_FEATURE_INIT (ip4_vpath_mc, static) = .runs_before = VNET_FEATURES ("ip4-mfib-forward-lookup"), }; -VNET_FEATURE_INIT (ip4_mc_drop, static) = +VNET_FEATURE_INIT (ip4_mc_not_enabled, static) = { .arc_name = "ip4-multicast", - .node_name = "ip4-drop", + .node_name = "ip4-not-enabled", .runs_before = VNET_FEATURES ("ip4-mfib-forward-lookup"), }; @@ -1121,6 +750,13 @@ VNET_FEATURE_INIT (ip4_source_and_port_range_check_tx, static) = { .arc_name = "ip4-output", .node_name = "ip4-source-and-port-range-check-tx", + .runs_before = VNET_FEATURES ("ip4-outacl"), +}; + +VNET_FEATURE_INIT (ip4_outacl, static) = +{ + .arc_name = "ip4-output", + .node_name = "ip4-outacl", .runs_before = VNET_FEATURES ("ipsec-output-ip4"), }; @@ -1166,11 +802,11 @@ ip4_sw_interface_add_del (vnet_main_t * vnm, u32 sw_if_index, u32 is_add) /* *INDENT-ON* */ } - vnet_feature_enable_disable ("ip4-unicast", "ip4-drop", sw_if_index, + vnet_feature_enable_disable ("ip4-unicast", "ip4-not-enabled", sw_if_index, is_add, 0, 0); - vnet_feature_enable_disable ("ip4-multicast", "ip4-drop", sw_if_index, - is_add, 0, 0); + vnet_feature_enable_disable ("ip4-multicast", "ip4-not-enabled", + sw_if_index, is_add, 0, 0); return /* no error */ 0; } @@ -1582,8 +1218,12 @@ ip4_local_inline (vlib_main_t * vm, /* Treat IP frag packets as "experimental" protocol for now until support of IP frag reassembly is implemented */ - proto0 = ip4_is_fragment (ip0) ? 0xfe : ip0->protocol; - proto1 = ip4_is_fragment (ip1) ? 0xfe : ip1->protocol; + proto0 = + ip4_is_fragment (ip0) ? IP_PROTOCOL_VPP_FRAGMENTATION : + ip0->protocol; + proto1 = + ip4_is_fragment (ip1) ? IP_PROTOCOL_VPP_FRAGMENTATION : + ip1->protocol; if (head_of_feature_arc == 0) goto skip_checks; @@ -1748,7 +1388,9 @@ ip4_local_inline (vlib_main_t * vm, /* Treat IP frag packets as "experimental" protocol for now until support of IP frag reassembly is implemented */ - proto0 = ip4_is_fragment (ip0) ? 0xfe : ip0->protocol; + proto0 = + ip4_is_fragment (ip0) ? IP_PROTOCOL_VPP_FRAGMENTATION : + ip0->protocol; if (head_of_feature_arc == 0 || p0->flags & VNET_BUFFER_F_IS_NATED) goto skip_check; @@ -1839,6 +1481,7 @@ VLIB_REGISTER_NODE (ip4_local_node) = [IP_LOCAL_NEXT_PUNT] = "ip4-punt", [IP_LOCAL_NEXT_UDP_LOOKUP] = "ip4-udp-lookup", [IP_LOCAL_NEXT_ICMP] = "ip4-icmp-input", + [IP_LOCAL_NEXT_REASSEMBLY] = "ip4-reassembly", }, }; /* *INDENT-ON* */ @@ -2466,16 +2109,26 @@ ip4_rewrite_inline (vlib_main_t * vm, vnet_buffer (p1)->ip.save_rewrite_length = rw_len1; /* Check MTU of outgoing interface. */ - error0 = - (vlib_buffer_length_in_chain (vm, p0) > - adj0[0]. - rewrite_header.max_l3_packet_bytes ? IP4_ERROR_MTU_EXCEEDED : - error0); - error1 = - (vlib_buffer_length_in_chain (vm, p1) > - adj1[0]. - rewrite_header.max_l3_packet_bytes ? IP4_ERROR_MTU_EXCEEDED : - error1); + if (vlib_buffer_length_in_chain (vm, p0) > + adj0[0].rewrite_header.max_l3_packet_bytes) + { + error0 = IP4_ERROR_MTU_EXCEEDED; + next0 = IP4_REWRITE_NEXT_ICMP_ERROR; + icmp4_error_set_vnet_buffer + (p0, ICMP4_destination_unreachable, + ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set, + 0); + } + if (vlib_buffer_length_in_chain (vm, p1) > + adj1[0].rewrite_header.max_l3_packet_bytes) + { + error1 = IP4_ERROR_MTU_EXCEEDED; + next1 = IP4_REWRITE_NEXT_ICMP_ERROR; + icmp4_error_set_vnet_buffer + (p1, ICMP4_destination_unreachable, + ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set, + 0); + } if (is_mcast) { @@ -2487,6 +2140,8 @@ ip4_rewrite_inline (vlib_main_t * vm, IP4_ERROR_SAME_INTERFACE : error1); } + p0->error = error_node->errors[error0]; + p1->error = error_node->errors[error1]; /* Don't adjust the buffer for ttl issue; icmp-error node wants * to see the IP headerr */ if (PREDICT_TRUE (error0 == IP4_ERROR_NONE)) @@ -2541,8 +2196,10 @@ ip4_rewrite_inline (vlib_main_t * vm, if (is_midchain) { - adj0->sub_type.midchain.fixup_func (vm, adj0, p0); - adj1->sub_type.midchain.fixup_func (vm, adj1, p1); + adj0->sub_type.midchain.fixup_func + (vm, adj0, p0, adj0->sub_type.midchain.fixup_data); + adj1->sub_type.midchain.fixup_func + (vm, adj1, p1, adj0->sub_type.midchain.fixup_data); } if (is_mcast) { @@ -2643,9 +2300,16 @@ ip4_rewrite_inline (vlib_main_t * vm, vlib_buffer_length_in_chain (vm, p0) + rw_len0); /* Check MTU of outgoing interface. */ - error0 = (vlib_buffer_length_in_chain (vm, p0) - > adj0[0].rewrite_header.max_l3_packet_bytes - ? IP4_ERROR_MTU_EXCEEDED : error0); + if (vlib_buffer_length_in_chain (vm, p0) > + adj0[0].rewrite_header.max_l3_packet_bytes) + { + error0 = IP4_ERROR_MTU_EXCEEDED; + next0 = IP4_REWRITE_NEXT_ICMP_ERROR; + icmp4_error_set_vnet_buffer + (p0, ICMP4_destination_unreachable, + ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set, + 0); + } if (is_mcast) { error0 = ((adj0[0].rewrite_header.sw_if_index == @@ -2667,7 +2331,8 @@ ip4_rewrite_inline (vlib_main_t * vm, if (is_midchain) { - adj0->sub_type.midchain.fixup_func (vm, adj0, p0); + adj0->sub_type.midchain.fixup_func + (vm, adj0, p0, adj0->sub_type.midchain.fixup_data); } if (PREDICT_FALSE