feat(swasync): switch to polling mode 88/39388/10
authorVratko Polak <vrpolak@cisco.com>
Thu, 17 Aug 2023 12:53:26 +0000 (14:53 +0200)
committerVratko Polak <vrpolak@cisco.com>
Thu, 17 Aug 2023 12:53:26 +0000 (14:53 +0200)
Performance of adaptive mode is bad (different bug),
keep continuity of ipsec swasync tests (when VPP allows).

As 23.06-release does not have the new API message,
the new CSIT code needs to be more careful around CRC checking.

+ Add new crc collection with the new API call used.
 + Also keep the old collection so older VPP does not fail.
+ Document how papi executor works with VPP without a message.
+ Prevent CRC checker from raising bodus errors with old VPP.

Change-Id: I9ff933a8a9558289d22d55526905d63e7894378c
Signed-off-by: Vratko Polak <vrpolak@cisco.com>
resources/api/vpp/supported_crcs.yaml
resources/libraries/python/IPsecUtil.py
resources/libraries/python/PapiExecutor.py
resources/libraries/python/VppApiCrc.py

index 3a11094..58825af 100644 (file)
     wg_set_async_mode: '0xa6465f7c'
     wg_set_async_mode_reply: '0xe8d4e804'
 
+23.10-rc0:
+    # plugins/acl/acl.api
+    acl_add_replace: '0xee5c2f18'  # dev
+    acl_add_replace_reply: '0xac407b0c'  # dev
+    acl_details: '0x95babae0'  # dev teardown
+    acl_dump: '0xef34fea4'  # dev teardown
+    acl_interface_list_details: '0xe695d256'  # dev teardown
+    acl_interface_list_dump: '0xf9e6675e'  # dev teardown
+    acl_interface_set_acl_list: '0x473982bd'  # dev
+    acl_interface_set_acl_list_reply: '0xe8d4e804'  # dev
+
+    # vlibmemory/vlib.api
+    add_node_next: '0x2457116d'  # dev
+    add_node_next_reply: '0x2ed75f32'  # dev
+
+    # plugins/adl/adl.api
+    adl_allowlist_enable_disable: '0xea88828d'  # dev
+    adl_allowlist_enable_disable_reply: '0xe8d4e804'  # dev
+    adl_interface_enable_disable: '0x5501adee'  # dev
+    adl_interface_enable_disable_reply: '0xe8d4e804'  # dev
+
+    # plugins/af_xdp/af_xdp.api
+    af_xdp_create_v2: '0xe17ec2eb'  # dev
+    af_xdp_create_v2_reply: '0x5383d31f'  # dev
+
+    # plugins/avf/avf.api
+    avf_create: '0xdaab8ae2'  # dev
+    avf_create_reply: '0x5383d31f'  # dev
+
+    # vnet/bonding/bond.api
+    bond_add_member: '0xe7d14948'  # perf
+    bond_add_member_reply: '0xe8d4e804'  # perf
+    bond_create2: '0x912fda76'  # perf
+    bond_create2_reply: '0x5383d31f'  # perf
+    # 4x^ 64B-1c-1lbvpplacp-dot1q-l2xcbase-eth-2vhostvr1024-1vm-ndrpdr
+    # ^ ndrpdrAND1cAND64bAND1lbvpplacp-dot1q-l2xcbase-eth-2vhostvr1024-1vm
+
+    # vnet/l2/l2.api
+    bridge_domain_add_del_v2: '0x600b7170'  # dev
+    bridge_domain_add_del_v2_reply: '0xfcb1e980'  # dev
+    # bridge_domain_dump / details # honeycomb
+
+    # vnet/classify/classify.api
+    classify_add_del_session: '0xf20879f0'  # dev
+    classify_add_del_session_reply: '0xe8d4e804'  # dev
+    classify_add_del_table: '0x6849e39e'  # dev
+    classify_add_del_table_reply: '0x05486349'  # dev
+    # classify_session_dump / details # honeycomb
+    # classify_table_by_interface / reply # honeycomb
+    # classify_table_info / reply # honeycomb
+
+    # vlibmemory/vlib.api
+    cli_inband: '0xf8377302'  # dev setup
+    cli_inband_reply: '0x05879051'  # dev setup
+
+    # vnet/interface.api
+    create_loopback_instance: '0xd36a3ee2'  # dev
+    create_loopback_instance_reply: '0x5383d31f'  # dev
+
+    # vnet/interface.api
+    create_subif: '0x790ca755'  # perf
+    create_subif_reply: '0x5383d31f'  # perf
+    # ^^ 64B-1c-dot1ad-l2xcbase-ndrpdr
+    # ^ ndrpdrAND1cAND64bANDdot1ad-l2xcbase
+
+    # plugins/vhost/vhost_user.api
+    create_vhost_user_if_v2: '0xdba1cc1d'  # dev
+    create_vhost_user_if_v2_reply: '0x5383d31f'  # dev
+
+    # vnet/interface.api
+    create_vlan_subif: '0xaf34ac8b'  # dev
+    create_vlan_subif_reply: '0x5383d31f'  # dev
+
+    # plugins/crypto_sw_scheduler/crypto_sw_scheduler.api
+    crypto_set_async_dispatch_v2: '0x667d2d54'  # perf
+    crypto_set_async_dispatch_v2_reply: '0xe8d4e804'  # perf
+    crypto_sw_scheduler_set_worker: '0xb4274502'  # perf
+    crypto_sw_scheduler_set_worker_reply: '0xe8d4e804'  # perf
+    # 4^ 64B-1c-ethip4ipsec8tnlswasync-scheduler-ip4base-int-aes128gcm-udir-ndrpdr
+    # ^ ndrpdrAND1cAND64BANDethip4ipsec8tnlswasync-scheduler-ip4base-int-\
+    #   aes128gcm-udir
+
+    # plugins/nat/det44/det44.api
+    det44_add_del_map: '0x1150a190'  # dev
+    det44_add_del_map_reply: '0xe8d4e804'  # dev
+    det44_interface_add_del_feature: '0xdc17a836'  # dev
+    det44_interface_add_del_feature_reply: '0xe8d4e804'  # dev
+    det44_interface_details: '0xe60cc5be'  # dev teardown
+    det44_interface_dump: '0x51077d14'  # dev teardown
+    det44_map_details: '0xad91dc83'  # dev teardown
+    det44_map_dump: '0x51077d14'  # dev teardown
+    det44_plugin_enable_disable: '0x617b6bf8'  # dev
+    det44_plugin_enable_disable_reply: '0xe8d4e804'  # dev
+    det44_session_dump: '0xe45a3af7'  # dev
+    # TODO: Which test to run to verify det44_* messages?
+    # dhcp_proxy_dump / details # honeycomb
+
+    # vnet/flow/flow.api
+    flow_add_v2: '0x5b757558'  # dev
+    flow_add_v2_reply: '0x8587dc85'  # dev
+    flow_del: '0xb6b9b02c' # dev
+    flow_del_reply: '0xe8d4e804' # dev
+    flow_disable: '0x2024be69' # dev
+    flow_disable_reply: '0xe8d4e804' # dev
+    flow_enable: '0x2024be69' # dev
+    flow_enable_reply: '0xe8d4e804' # dev
+
+    # plugins/geneve/geneve.api
+    geneve_add_del_tunnel2: '0x8c2a9999'  # dev
+    geneve_add_del_tunnel2_reply: '0x5383d31f'  # dev
+    geneve_tunnel_details: '0x6b16eb24'  # dev
+    geneve_tunnel_dump: '0xf9e6675e'  # dev
+
+    # plugins/lisp/lisp-gpe/lisp_gpe.api
+    gpe_enable_disable: '0xc264d7bf'  # dev
+    gpe_enable_disable_reply: '0xe8d4e804'  # dev
+
+    # gre_tunnel_add_del / reply # unused L1 keyword: create_gre_tunnel_interface
+
+    # plugins/gtpu/gtpu.api
+    gtpu_add_del_tunnel: '0xca983a2b'  # perf
+    gtpu_add_del_tunnel_reply: '0x5383d31f'  # perf
+    # ^^ 64B-1c-ethip4gtpusw-ip4base-ndrpdr
+    # ^ ndrpdrAND1cAND64BANDethip4gtpusw-ip4base
+    gtpu_offload_rx: '0xf0b08786' # perf
+    gtpu_offload_rx_reply: '0xe8d4e804' # perf
+
+    # vnet/interface.api
+    hw_interface_set_mtu: '0xe6746899'  # dev
+    hw_interface_set_mtu_reply: '0xe8d4e804'  # dev
+
+    # vnet/classify/classify.api
+    input_acl_set_interface: '0xde7ad708'  # dev
+    input_acl_set_interface_reply: '0xe8d4e804'  # dev
+
+    # vnet/ip/ip.api
+    ip_address_details: '0xee29b797'  # dev
+    ip_address_dump: '0x2d033de4'  # dev
+
+    # vnet/ip-neighbor/ip_neighbor.api
+    ip_neighbor_add_del: '0x0607c257'  # dev
+    ip_neighbor_add_del_reply: '0x1992deab'  # dev
+
+    # ip_probe_neighbor / reply # unused L1 keyword vpp_ip_probe
+
+    # vnet/ip/ip.api
+    ip_route_add_del: '0xb8ecfe0d'  # dev
+    ip_route_add_del_reply: '0x1992deab'  # dev
+    # ip_source_check_interface_add_del / reply # unused L1 keyword vpp_ip_source_check_setup
+    ip_table_add_del: '0x0ffdaec0'  # dev
+    ip_table_add_del_reply: '0xe8d4e804'  # dev
+
+    # vnet/ipip/ipip.api
+    ipip_add_tunnel: '0x2ac399f5'  # dev
+    ipip_add_tunnel_reply: '0x5383d31f'  # dev
+
+    # vnet/ipsec/ipsec.api
+    ipsec_interface_add_del_spd: '0x80f80cbb'  # dev
+    ipsec_interface_add_del_spd_reply: '0xe8d4e804'  # dev
+    ipsec_sa_v3_details: '0x2fc991ee'  # dev
+    ipsec_sa_v3_dump: '0x2076c2f4'  # dev
+    ipsec_sad_entry_add: '0x50229353'  # dev
+    ipsec_sad_entry_add_reply: '0x9ffac24b'  # dev
+    ipsec_select_backend: '0x5bcfd3b7'  # perf
+    ipsec_select_backend_reply: '0xe8d4e804'  # perf
+    # ^^ 64B-1c-ethip4ipsec1tnlhw-ip4base-int-aes256gcm-ndrpdr
+    # ^ ndrpdrAND1cAND64BANDethip4ipsec1tnlhw-ip4base-int-aes256gcm
+    ipsec_set_async_mode: '0xa6465f7c'  # perf
+    ipsec_set_async_mode_reply: '0xe8d4e804'  # perf
+    ipsec_spd_add_del: '0x20e89a95'  # dev
+    ipsec_spd_add_del_reply: '0xe8d4e804'  # dev
+    ipsec_spd_entry_add_del_v2: '0x7bfe69fc'  # dev
+    ipsec_spd_entry_add_del_v2_reply: '0x9ffac24b'  # dev
+    ipsec_tunnel_protect_update: '0x30d5f133'  # dev
+    ipsec_tunnel_protect_update_reply: '0xe8d4e804'  # dev
+    # ^^ 64B-1c-ethip4ipsec1tnlhw-ip4base-int-aes256gcm-ndrpdr
+    # ^ See ipsec_select_backend.
+
+    # vnet/l2/l2.api
+    # l2_fib_table_dump / details # honeycomb
+    l2_interface_vlan_tag_rewrite: '0x62cc0bbc'  # perf
+    l2_interface_vlan_tag_rewrite_reply: '0xe8d4e804'  # perf
+    # ^^ 64B-1c-dot1ad-l2xcbase-ndrpdr
+    # ^ ndrpdrAND1cAND64BANDdot1ad-l2xcbase
+    l2_patch_add_del: '0xa1f6a6f3'  # dev
+    l2_patch_add_del_reply: '0xe8d4e804'  # dev
+    # l2fib_add_del / reply # unused L1 keyword: vpp_add_l2fib_entry
+
+    # plugins/lb/lb.api
+    lb_add_del_as: '0x35d72500'  # perf
+    lb_add_del_as_reply: '0xe8d4e804'  # perf
+    lb_add_del_intf_nat4: '0x47d6e753'  # perf
+    lb_add_del_intf_nat4_reply: '0xe8d4e804'  # perf
+    lb_add_del_vip_v2: '0x7c520e0f'  # perf
+    lb_add_del_vip_v2_reply: '0xe8d4e804'  # perf
+    lb_conf: '0x56cd3261'  # perf
+    lb_conf_reply: '0xe8d4e804'  # perf
+    # 8x^ 64B-1c-ethip4-loadbalancer-nat4-ndrpdr
+    # ^ 2n: ndrpdrAND1cAND64BANDethip4-loadbalancer-nat4
+
+    # plugins/lisp/lisp-cp/lisp.api
+    lisp_add_del_adjacency: '0x2ce0e6f6'  # dev
+    lisp_add_del_adjacency_reply: '0xe8d4e804'  # dev
+    lisp_add_del_local_eid: '0x4e5a83a2'  # dev
+    lisp_add_del_local_eid_reply: '0xe8d4e804'  # dev
+    lisp_add_del_locator: '0xaf4d8f13'  # dev
+    lisp_add_del_locator_reply: '0xe8d4e804'  # dev
+    lisp_add_del_locator_set: '0x6fcd6471'  # dev
+    lisp_add_del_locator_set_reply: '0xb6666db4'  # dev
+    # lisp_add_del_map_resolver / reply # unused L2 keyword: Configure LISP map resolver address
+    lisp_add_del_remote_mapping: '0x6d5c789e'  # dev
+    lisp_add_del_remote_mapping_reply: '0xe8d4e804'  # dev
+    lisp_eid_table_add_del_map: '0x9481416b'  # dev
+    lisp_eid_table_add_del_map_reply: '0xe8d4e804'  # dev
+    lisp_enable_disable: '0xc264d7bf'  # dev
+    lisp_enable_disable_reply: '0xe8d4e804'  # dev
+
+    # plugins/acl/acl.api
+    macip_acl_add: '0xce6fbad0'  # dev
+    macip_acl_add_reply: '0xac407b0c'  # dev
+    macip_acl_details: '0x27135b59'  # dev teardown
+    macip_acl_dump: '0xef34fea4'  # dev teardown
+    macip_acl_interface_add_del: '0x4b8690b1'  # dev
+    macip_acl_interface_add_del_reply: '0xe8d4e804'  # dev
+    macip_acl_interface_get: '0x51077d14'  # dev teardown
+    macip_acl_interface_get_reply: '0xaccf9b05'  # dev teardown
+
+    # plugins/memif/memif.api
+    memif_create_v2: '0x8c7de5f7'  # dev
+    memif_create_v2_reply: '0x5383d31f'  # dev
+    memif_details: '0xda34feb9'  # dev
+    memif_dump: '0x51077d14'  # dev
+    memif_socket_filename_add_del_v2: '0x34223bdf'  # dev
+    memif_socket_filename_add_del_v2_reply: '0x9f29bdb9'  # dev
+
+    # plugins/nat/nat44-ed/nat44_ed.api
+    nat44_add_del_address_range: '0x6f2b8055'  # dev
+    nat44_add_del_address_range_reply: '0xe8d4e804'  # dev
+    nat44_address_details: '0x0d1beac1'  # dev teardown
+    nat44_address_dump: '0x51077d14'  # dev teardown
+    nat44_ed_plugin_enable_disable: '0xbe17f8dd'  # dev
+    nat44_ed_plugin_enable_disable_reply: '0xe8d4e804'  # dev
+    nat44_interface_add_del_feature: '0xf3699b83'  # dev
+    nat44_interface_add_del_feature_reply: '0xe8d4e804'  # dev
+    nat44_interface_addr_details: '0xe4aca9ca'  # dev teardown
+    nat44_interface_addr_dump: '0x51077d14'  # dev teardown
+    nat44_interface_details: '0x5d286289'  # dev teardown
+    nat44_interface_dump: '0x51077d14'  # dev teardown
+    nat44_show_running_config: '0x51077d14'  # dev teardown
+    nat44_show_running_config_reply: '0x93d8e267'  # dev teardown
+    nat44_static_mapping_details: '0x06cb40b2'  # dev teardown
+    nat44_static_mapping_dump: '0x51077d14'  # dev teardown
+    # nat44_user_dump and nat44_user_session_dump can be called
+    # by show_nat_user_data function
+    nat_worker_details: '0x84bf06fc'  # dev teardown
+    nat_worker_dump: '0x51077d14'  # dev teardown
+
+    # plugins/nsim/nsim.api
+    nsim_configure2: '0x64de8ed3'  # perf
+    nsim_configure2_reply: '0xe8d4e804'  # perf
+    nsim_output_feature_enable_disable: '0x3865946c'  # perf
+    nsim_output_feature_enable_disable_reply: '0xe8d4e804'  # perf
+    # 4x^ 1280B-1c-eth-ip4udpquicscale10cl1s-vppecho-bps
+    # ^ 1280BAND1cANDeth-ip4udpquicscale10cl1s-vppecho
+
+    # vnet/policer/policer.api
+    policer_add_del: '0x2b31dd38'  # dev
+    policer_add_del_reply: '0xa177cef2'  # dev
+
+    # vnet/classify/classify.api
+    policer_classify_set_interface: '0xde7ad708'  # dev
+    policer_classify_set_interface_reply: '0xe8d4e804'  # dev
+
+    # plugins/rdma/rdma.api
+    rdma_create_v3: '0xc6287ea8'  # perf
+    rdma_create_v3_reply: '0x5383d31f'  # perf
+    # 2x^ Any test with drv_rdma. Currently only available on 2n-clx.
+    # - Not testable by devicetest (until we have environment with right NICs).
+
+    # vlibmemory/vlib.api
+    show_threads: '0x51077d14'  # dev
+    show_threads_reply: '0xefd78e83'  # dev
+
+    # vpp/api/vpe.api
+    show_version: '0x51077d14'  # dev setup
+    show_version_reply: '0xc919bde1'  # dev setup
+
+    # vnet/srv6/sr.api
+    sr_localsid_add_del: '0x5a36c324'  # dev
+    sr_localsid_add_del_reply: '0xe8d4e804'  # dev
+    sr_localsids_details: '0x2e9221b9'  # dev teardown
+    sr_localsids_dump: '0x51077d14'  # dev teardown
+    sr_policies_v2_details: '0x96dcb699'  # dev teardown
+    sr_policies_v2_dump: '0x51077d14'  # dev teardown
+    sr_policy_add: '0x44ac92e8'  # dev
+    sr_policy_add_reply: '0xe8d4e804'  # dev
+    sr_set_encap_source: '0xd3bad5e1'  # dev
+    sr_set_encap_source_reply: '0xe8d4e804'  # dev
+    sr_steering_add_del: '0xe46b0a0f'  # dev
+    sr_steering_add_del_reply: '0xe8d4e804'  # dev
+    sr_steering_pol_details: '0xd41258c9'  # dev teardown
+    sr_steering_pol_dump: '0x51077d14'  # dev teardown
+
+    # vnet/bonding/bond.api
+    sw_bond_interface_details: '0x9428a69c'  # perf
+    sw_bond_interface_dump: '0xf9e6675e'  # perf
+    # ^^ see bond_*
+
+    # vnet/interface.api
+    sw_interface_add_del_address: '0x5463d73b'  # dev
+    sw_interface_add_del_address_reply: '0xe8d4e804'  # dev
+    sw_interface_details: '0x6c221fc7'  # dev
+    sw_interface_dump: '0xaa610c27'  # dev
+    # sw_interface_get_table / reply # honeycomb
+
+    # vnet/ip6-nd/ip6_nd.api
+    sw_interface_ip6nd_ra_config: '0x3eb00b1c'  # dev
+    sw_interface_ip6nd_ra_config_reply: '0xe8d4e804'  # dev
+
+    # vnet/interface.api
+    sw_interface_rx_placement_details: '0x9e44a7ce'  # dev
+    sw_interface_rx_placement_dump: '0xf9e6675e'  # dev
+    sw_interface_set_flags: '0xf5aec1b8'  # dev
+    sw_interface_set_flags_reply: '0xe8d4e804'  # dev
+
+    # sw_interface_set_geneve_bypass can be called
+    # by enable_interface_geneve_bypass function
+
+    # vnet/l2/l2.api
+    sw_interface_set_l2_bridge: '0xd0678b13'  # dev
+    sw_interface_set_l2_bridge_reply: '0xe8d4e804'  # dev
+    sw_interface_set_l2_xconnect: '0x4fa28a85'  # dev
+    sw_interface_set_l2_xconnect_reply: '0xe8d4e804'  # dev
+
+    # vnet/interface.api
+    sw_interface_set_mac_address: '0xc536e7eb'  # dev
+    sw_interface_set_mac_address_reply: '0xe8d4e804'  # dev
+    sw_interface_set_rx_placement: '0xdb65f3c9'  # dev
+    sw_interface_set_rx_placement_reply: '0xe8d4e804'  # dev
+    sw_interface_set_table: '0xdf42a577'  # dev
+    sw_interface_set_table_reply: '0xe8d4e804'  # dev
+    sw_interface_set_unnumbered: '0x154a6439'  # dev
+    sw_interface_set_unnumbered_reply: '0xe8d4e804'  # dev
+
+    # plugins/vxlan/vxlan.api
+    sw_interface_set_vxlan_bypass: '0x65247409'  # dev
+    sw_interface_set_vxlan_bypass_reply: '0xe8d4e804'  # dev
+
+    # vnet/devices/tap/tapv2.api
+    sw_interface_tap_v2_details: '0x1e2b2a47'  # dev
+    sw_interface_tap_v2_dump: '0xf9e6675e'  # dev
+
+    # plugins/vhost/vhost_user.api
+    sw_interface_vhost_user_details: '0x0cee1e53'  # dev teardown
+    sw_interface_vhost_user_dump: '0xf9e6675e'  # dev deardown
+
+    # vnet/bonding/bond.api
+    sw_member_interface_details: '0x3c4a0e23'  # perf
+    sw_member_interface_dump: '0xf9e6675e'  # perf
+    # ^^ see bond_*
+
+    # vnet/devices/tap/tapv2.api
+    tap_create_v3: '0x3f3fd1df'  # dev
+    tap_create_v3_reply: '0x5383d31f'  # dev
+
+    # plugins/vxlan/vxlan.api
+    vxlan_add_del_tunnel_v3: '0x0072b037'  # dev
+    vxlan_add_del_tunnel_v3_reply: '0x5383d31f'  # dev
+    # vxlan_gpe_tunnel_dump / details # honeycomb
+    # vxlan_tunnel_dump /details # unused L2 keyword: Get VXLAN dump
+
+    # plugins/wireguard/wireguard.api
+    wireguard_interface_create: '0xa530137e'
+    wireguard_interface_create_reply: '0x5383d31f'
+    wireguard_peer_add: '0x9b8aad61'
+    wireguard_peer_add_reply: '0x084a0cd3'
+    wg_set_async_mode: '0xa6465f7c'
+    wg_set_async_mode_reply: '0xe8d4e804'
+
     # Please keep alphabetic order.
 # Use bash command "env LC_COLLATE=C sort -u" if not clear.
 
index 39c6a4c..873b6af 100644 (file)
@@ -318,6 +318,8 @@ class IPsecUtil:
     def vpp_ipsec_set_async_mode(node, async_enable=1):
         """Set IPsec async mode on|off.
 
+        Unconditionally, attempt to switch crypto dispatch into polling mode.
+
         :param node: VPP node to set IPsec async mode.
         :param async_enable: Async mode on or off.
         :type node: dict
@@ -325,13 +327,23 @@ class IPsecUtil:
         :raises RuntimeError: If failed to set IPsec async mode or if no API
             reply received.
         """
-        cmd = u"ipsec_set_async_mode"
-        err_msg = f"Failed to set IPsec async mode on host {node[u'host']}"
-        args = dict(
-            async_enable=async_enable
-        )
         with PapiSocketExecutor(node) as papi_exec:
+            cmd = u"ipsec_set_async_mode"
+            err_msg = f"Failed to set IPsec async mode on host {node[u'host']}"
+            args = dict(
+                async_enable=async_enable
+            )
             papi_exec.add(cmd, **args).get_reply(err_msg)
+            cmd = "crypto_set_async_dispatch_v2"
+            err_msg = "Failed to set dispatch mode."
+            args = dict(mode=0, adaptive=False)
+            try:
+                papi_exec.add(cmd, **args).get_reply(err_msg)
+            except (AttributeError, RuntimeError):
+                # Expected when VPP build does not have the _v2 yet
+                # (after and before the first CRC check).
+                # TODO: Fail here when testing of pre-23.10 builds is over.
+                pass
 
     @staticmethod
     def vpp_ipsec_crypto_sw_scheduler_set_worker(
index b0fe6f5..b629996 100644 (file)
@@ -995,6 +995,7 @@ class PapiSocketExecutor:
         :returns: Papi replies parsed into a dict-like object,
             with fields due to API (possibly including retval).
         :rtype: List[UserDict]
+        :raises AttributeError: If VPP does not know the command.
         :raises RuntimeError: If the replies are not all correct.
         """
         vpp_instance = self.get_connected_client()
index 761059f..a8947a1 100644 (file)
@@ -75,8 +75,9 @@ class VppApiCrcChecker:
 
         Starts the same as _expected, but each time an encountered api,crc pair
         fits the expectation, the pair is removed from all collections
-        within this mapping. Ideally, the active mappings will become empty.
-        If not, it is an error, VPP removed or renamed a message CSIT needs."""
+        within this mapping. It is fine if an api is missing
+        from some collections, as long as it is not missing from all collections
+        that remained in _expected."""
 
         self._found = dict()
         """Mapping from API name to CRC string.
@@ -325,12 +326,15 @@ class VppApiCrcChecker:
         if not report_missing:
             return
         missing = {name: mapp for name, mapp in self._missing.items() if mapp}
-        if missing:
-            missing_indented = json.dumps(
-                missing, indent=1, sort_keys=True, separators=[u",", u":"])
-            self.log_and_raise(
-                f"API CRCs missing from .api.json:\n{missing_indented}"
-            )
+        if set(missing.keys()) < set(self._expected.keys()):
+            # There is a collection where nothing is missing.
+            return
+        missing_indented = json.dumps(
+            missing, indent=1, sort_keys=True, separators=[u",", u":"]
+        )
+        self.log_and_raise(
+            f"API CRCs missing from .api.json:\n{missing_indented}"
+        )
 
     def check_api_name(self, api_name):
         """Fail if the api_name has no, or different from known CRC associated.
@@ -375,23 +379,25 @@ class VppApiCrcChecker:
             self.log_and_raise(
                 f"No active collection has API {api_name!r} with CRC {crc!r}"
             )
-        options = self._options[api_name]
+        options = self._options.get(api_name, None)
+        if not options:
+            # None means CSIT is attempting a new API on an old VPP build.
+            # If that is an issue, the API has been reported as missing already.
+            return
         options.pop(u"vat_help", None)
         if options:
             self._reported[api_name] = crc
             logger.console(f"{api_name} used but has options {options}")
 
     def print_warnings(self):
-        """Call check_api_name for every API name in surviving collections.
+        """Call check_api_name for API names in surviving collections.
 
         Useful for VPP CRC checking job.
-
-        Even though there usually is only one surviving collection,
-        the implementation has to be prepared for multiple collections,
-        and it should de-duplicate api names.
+        The API name is only checked when it appears
+        in all surviving collections.
         """
         api_name_to_crc_maps = self._expected.values()
-        api_name_sets = [set(n2c.keys()) for n2c in api_name_to_crc_maps]
-        api_names = set().union(*api_name_sets)
+        api_name_sets = (set(n2c.keys()) for n2c in api_name_to_crc_maps)
+        api_names = set.intersection(*api_name_sets)
         for api_name in sorted(api_names):
             self.check_api_name(api_name)