feat(api): Use newest API messages after rls2402 98/40698/7
authorVratko Polak <vrpolak@cisco.com>
Thu, 11 Apr 2024 16:24:03 +0000 (18:24 +0200)
committerVratko Polak <vrpolak@cisco.com>
Mon, 15 Apr 2024 10:58:43 +0000 (12:58 +0200)
+ gtpu_add_del_tunnel_v2
 + Add comments on used values and unused fields.
+ ipsec_sad_entry_add_v2
 + Explicitly pass current default values.
+ ipsec_sa_v5_dump
+ policer_add
 + The old is_add argument removed, it was never false.
+ sr_policy_add_v2
 + Add comments about currently unused fields.
 + Support also older VP builds with wrong reply.
+ rdma_create_v4
 + Add comments about unused fields.

Change-Id: I3d5bc345c4cf099661626770c4d86bc230643cca
Signed-off-by: Vratko Polak <vrpolak@cisco.com>
resources/api/vpp/supported_crcs.yaml
resources/libraries/python/IPsecUtil.py
resources/libraries/python/InterfaceUtil.py
resources/libraries/python/Policer.py
resources/libraries/python/SRv6.py

index 1f5383a..fb6a499 100644 (file)
 # Comment https link towards the build used to get or verify the CRC values.
 # Link cannot easily be name as it is too long.
 
-# Prefer x86_64, Ubuntu, latest LTS.
-# Note that during flag day process, the link would point
-# to (logged artifacts of) verify job, not merge job.
-
 # Note that you cannot easily avoid quotes for CRC values.
 # With leading 0x, yaml interprets the values as numbers.
 # Even with 0x removed, hexa CRC value may appear as decimal number.
 
 # Trailing comments are optional, for tracking how to test the message.
+
 # Please keep alphabetic order.
 # Use bash command "env LC_COLLATE=C sort -u" if not clear.
 
 # https://packagecloud.io/app/fdio/release
-# /search?q=23.10&filter=debs&filter=all&dist=debian
-23.10-release:
+# /search?q=24.02&filter=debs&filter=all&dist=debian
+24.02-with-srv6-bug:
+    # plugins/acl/acl.api
+    acl_add_replace: '0xee5c2f18'  # dev
+    acl_add_replace_reply: '0xac407b0c'  # dev
+    acl_details: '0x95babae0'  # dev teardown
+    acl_dump: '0xef34fea4'  # dev teardown
+    acl_interface_list_details: '0xe695d256'  # dev teardown
+    acl_interface_list_dump: '0xf9e6675e'  # dev teardown
+    acl_interface_set_acl_list: '0x473982bd'  # dev
+    acl_interface_set_acl_list_reply: '0xe8d4e804'  # dev
+
+    # vlibmemory/vlib.api
+    add_node_next: '0x2457116d'  # dev
+    add_node_next_reply: '0x2ed75f32'  # dev
+
+    # plugins/adl/adl.api
+    adl_allowlist_enable_disable: '0xea88828d'  # dev
+    adl_allowlist_enable_disable_reply: '0xe8d4e804'  # dev
+    adl_interface_enable_disable: '0x5501adee'  # dev
+    adl_interface_enable_disable_reply: '0xe8d4e804'  # dev
+
+    # plugins/af_xdp/af_xdp.api
+    af_xdp_create_v3: '0xcf4b1827'  # perf
+    af_xdp_create_v3_reply: '0x5383d31f'  # perf
+
+    # plugins/avf/avf.api
+    avf_create: '0xdaab8ae2'  # dev
+    avf_create_reply: '0x5383d31f'  # dev
+
+    # vnet/bonding/bond.api
+    bond_add_member: '0xe7d14948'  # perf
+    bond_add_member_reply: '0xe8d4e804'  # perf
+    bond_create2: '0x912fda76'  # perf
+    bond_create2_reply: '0x5383d31f'  # perf
+    # 4x^ 64B-1c-1lbvpplacp-dot1q-l2xcbase-eth-2vhostvr1024-1vm-ndrpdr
+    # ^ ndrpdrAND1cAND64bAND1lbvpplacp-dot1q-l2xcbase-eth-2vhostvr1024-1vm
+
+    # vnet/l2/l2.api
+    bridge_domain_add_del_v2: '0x600b7170'  # dev
+    bridge_domain_add_del_v2_reply: '0xfcb1e980'  # dev
+    # bridge_domain_dump / details # honeycomb
+
+    # vnet/classify/classify.api
+    classify_add_del_session: '0xf20879f0'  # dev
+    classify_add_del_session_reply: '0xe8d4e804'  # dev
+    classify_add_del_table: '0x6849e39e'  # dev
+    classify_add_del_table_reply: '0x05486349'  # dev
+    # classify_session_dump / details # honeycomb
+    # classify_table_by_interface / reply # honeycomb
+    # classify_table_info / reply # honeycomb
+
+    # vlibmemory/vlib.api
+    cli_inband: '0xf8377302'  # dev setup
+    cli_inband_reply: '0x05879051'  # dev setup
+
+    # vnet/interface.api
+    create_loopback_instance: '0xd36a3ee2'  # dev
+    create_loopback_instance_reply: '0x5383d31f'  # dev
+
+    # vnet/interface.api
+    create_subif: '0x790ca755'  # perf
+    create_subif_reply: '0x5383d31f'  # perf
+    # ^^ 64B-1c-dot1ad-l2xcbase-ndrpdr
+    # ^ ndrpdrAND1cAND64bANDdot1ad-l2xcbase
+
+    # plugins/vhost/vhost_user.api
+    create_vhost_user_if_v2: '0xdba1cc1d'  # dev
+    create_vhost_user_if_v2_reply: '0x5383d31f'  # dev
+
+    # vnet/interface.api
+    create_vlan_subif: '0xaf34ac8b'  # dev
+    create_vlan_subif_reply: '0x5383d31f'  # dev
+
+    # plugins/crypto_sw_scheduler/crypto_sw_scheduler.api
+    crypto_set_async_dispatch_v2: '0x667d2d54'  # perf
+    crypto_set_async_dispatch_v2_reply: '0xe8d4e804'  # perf
+    crypto_sw_scheduler_set_worker: '0xb4274502'  # perf
+    crypto_sw_scheduler_set_worker_reply: '0xe8d4e804'  # perf
+    # 4^ 64B-1c-ethip4ipsec8tnlswasync-scheduler-ip4base-int-aes128gcm-udir-ndrpdr
+    # ^ ndrpdrAND1cAND64BANDethip4ipsec8tnlswasync-scheduler-ip4base-int-\
+    #   aes128gcm-udir
+
+    # plugins/nat/det44/det44.api
+    det44_add_del_map: '0x1150a190'  # dev
+    det44_add_del_map_reply: '0xe8d4e804'  # dev
+    det44_interface_add_del_feature: '0xdc17a836'  # dev
+    det44_interface_add_del_feature_reply: '0xe8d4e804'  # dev
+    det44_interface_details: '0xe60cc5be'  # dev teardown
+    det44_interface_dump: '0x51077d14'  # dev teardown
+    det44_map_details: '0xad91dc83'  # dev teardown
+    det44_map_dump: '0x51077d14'  # dev teardown
+    det44_plugin_enable_disable: '0x617b6bf8'  # dev
+    det44_plugin_enable_disable_reply: '0xe8d4e804'  # dev
+    det44_session_dump: '0xe45a3af7'  # dev
+    # TODO: Which test to run to verify det44_* messages?
+    # dhcp_proxy_dump / details # honeycomb
+
+    # vnet/flow/flow.api
+    flow_add_v2: '0x5b757558'  # dev
+    flow_add_v2_reply: '0x8587dc85'  # dev
+    flow_del: '0xb6b9b02c' # dev
+    flow_del_reply: '0xe8d4e804' # dev
+    flow_disable: '0x2024be69' # dev
+    flow_disable_reply: '0xe8d4e804' # dev
+    flow_enable: '0x2024be69' # dev
+    flow_enable_reply: '0xe8d4e804' # dev
+
+    # plugins/geneve/geneve.api
+    geneve_add_del_tunnel2: '0x8c2a9999'  # dev
+    geneve_add_del_tunnel2_reply: '0x5383d31f'  # dev
+    geneve_tunnel_details: '0x6b16eb24'  # dev
+    geneve_tunnel_dump: '0xf9e6675e'  # dev
+
+    # plugins/lisp/lisp-gpe/lisp_gpe.api
+    gpe_enable_disable: '0xc264d7bf'  # dev
+    gpe_enable_disable_reply: '0xe8d4e804'  # dev
+
+    # gre_tunnel_add_del / reply # unused L1 keyword: create_gre_tunnel_interface
+
+    # plugins/gtpu/gtpu.api
+    gtpu_add_del_tunnel_v2: '0xa0c30713'  # perf
+    gtpu_add_del_tunnel_v2_reply: '0x62b41304'  # perf
+    # ^^ 64B-1c-ethip4gtpusw-ip4base-ndrpdr
+    # ^ ndrpdrAND1cAND64BANDethip4gtpusw-ip4base
+    gtpu_offload_rx: '0xf0b08786' # perf
+    gtpu_offload_rx_reply: '0xe8d4e804' # perf
+
+    # vnet/interface.api
+    hw_interface_set_mtu: '0xe6746899'  # dev
+    hw_interface_set_mtu_reply: '0xe8d4e804'  # dev
+
+    # vnet/classify/classify.api
+    input_acl_set_interface: '0xde7ad708'  # dev
+    input_acl_set_interface_reply: '0xe8d4e804'  # dev
+
+    # vnet/ip/ip.api
+    ip_address_details: '0xee29b797'  # dev
+    ip_address_dump: '0x2d033de4'  # dev
+
+    # vnet/ip-neighbor/ip_neighbor.api
+    ip_neighbor_add_del: '0x0607c257'  # dev
+    ip_neighbor_add_del_reply: '0x1992deab'  # dev
+
+    # ip_probe_neighbor / reply # unused L1 keyword vpp_ip_probe
+
+    # vnet/ip/ip.api
+    ip_route_add_del: '0xb8ecfe0d'  # dev
+    ip_route_add_del_reply: '0x1992deab'  # dev
+    # ip_source_check_interface_add_del / reply # unused L1 keyword vpp_ip_source_check_setup
+    ip_table_add_del: '0x0ffdaec0'  # dev
+    ip_table_add_del_reply: '0xe8d4e804'  # dev
+
+    # vnet/ipip/ipip.api
+    ipip_add_tunnel: '0x2ac399f5'  # dev
+    ipip_add_tunnel_reply: '0x5383d31f'  # dev
+
+    # vnet/ipsec/ipsec.api
+    ipsec_interface_add_del_spd: '0x80f80cbb'  # dev
+    ipsec_interface_add_del_spd_reply: '0xe8d4e804'  # dev
+    ipsec_sa_v5_details: '0x3cfecfbd'  # dev teardown
+    ipsec_sa_v5_dump: '0x2076c2f4'  # dev teardown
+    ipsec_sad_entry_add_v2: '0x9611297a'  # dev
+    ipsec_sad_entry_add_v2_reply: '0x9ffac24b'  # dev
+    ipsec_select_backend: '0x5bcfd3b7'  # perf
+    ipsec_select_backend_reply: '0xe8d4e804'  # perf
+    # ^^ 64B-1c-ethip4ipsec1tnlhw-ip4base-int-aes256gcm-ndrpdr
+    # ^ ndrpdrAND1cAND64BANDethip4ipsec1tnlhw-ip4base-int-aes256gcm
+    ipsec_set_async_mode: '0xa6465f7c'  # perf
+    ipsec_set_async_mode_reply: '0xe8d4e804'  # perf
+    ipsec_spd_add_del: '0x20e89a95'  # dev
+    ipsec_spd_add_del_reply: '0xe8d4e804'  # dev
+    ipsec_spd_entry_add_del_v2: '0x7bfe69fc'  # dev
+    ipsec_spd_entry_add_del_v2_reply: '0x9ffac24b'  # dev
+    ipsec_tunnel_protect_update: '0x30d5f133'  # dev
+    ipsec_tunnel_protect_update_reply: '0xe8d4e804'  # dev
+    # ^^ 64B-1c-ethip4ipsec1tnlhw-ip4base-int-aes256gcm-ndrpdr
+    # ^ See ipsec_select_backend.
+
+    # vnet/l2/l2.api
+    # l2_fib_table_dump / details # honeycomb
+    l2_interface_vlan_tag_rewrite: '0x62cc0bbc'  # perf
+    l2_interface_vlan_tag_rewrite_reply: '0xe8d4e804'  # perf
+    # ^^ 64B-1c-dot1ad-l2xcbase-ndrpdr
+    # ^ ndrpdrAND1cAND64BANDdot1ad-l2xcbase
+    l2_patch_add_del: '0xa1f6a6f3'  # dev
+    l2_patch_add_del_reply: '0xe8d4e804'  # dev
+    # l2fib_add_del / reply # unused L1 keyword: vpp_add_l2fib_entry
+
+    # plugins/lb/lb.api
+    lb_add_del_as: '0x35d72500'  # perf
+    lb_add_del_as_reply: '0xe8d4e804'  # perf
+    lb_add_del_intf_nat4: '0x47d6e753'  # perf
+    lb_add_del_intf_nat4_reply: '0xe8d4e804'  # perf
+    lb_add_del_vip_v2: '0x7c520e0f'  # perf
+    lb_add_del_vip_v2_reply: '0xe8d4e804'  # perf
+    lb_conf: '0x56cd3261'  # perf
+    lb_conf_reply: '0xe8d4e804'  # perf
+    # 8x^ 64B-1c-ethip4-loadbalancer-nat4-ndrpdr
+    # ^ 2n: ndrpdrAND1cAND64BANDethip4-loadbalancer-nat4
+
+    # plugins/lisp/lisp-cp/lisp.api
+    lisp_add_del_adjacency: '0x2ce0e6f6'  # dev
+    lisp_add_del_adjacency_reply: '0xe8d4e804'  # dev
+    lisp_add_del_local_eid: '0x4e5a83a2'  # dev
+    lisp_add_del_local_eid_reply: '0xe8d4e804'  # dev
+    lisp_add_del_locator: '0xaf4d8f13'  # dev
+    lisp_add_del_locator_reply: '0xe8d4e804'  # dev
+    lisp_add_del_locator_set: '0x6fcd6471'  # dev
+    lisp_add_del_locator_set_reply: '0xb6666db4'  # dev
+    # lisp_add_del_map_resolver / reply # unused L2 keyword: Configure LISP map resolver address
+    lisp_add_del_remote_mapping: '0x6d5c789e'  # dev
+    lisp_add_del_remote_mapping_reply: '0xe8d4e804'  # dev
+    lisp_eid_table_add_del_map: '0x9481416b'  # dev
+    lisp_eid_table_add_del_map_reply: '0xe8d4e804'  # dev
+    lisp_enable_disable: '0xc264d7bf'  # dev
+    lisp_enable_disable_reply: '0xe8d4e804'  # dev
+
+    # plugins/acl/acl.api
+    macip_acl_add: '0xce6fbad0'  # dev
+    macip_acl_add_reply: '0xac407b0c'  # dev
+    macip_acl_details: '0x27135b59'  # dev teardown
+    macip_acl_dump: '0xef34fea4'  # dev teardown
+    macip_acl_interface_add_del: '0x4b8690b1'  # dev
+    macip_acl_interface_add_del_reply: '0xe8d4e804'  # dev
+    macip_acl_interface_get: '0x51077d14'  # dev teardown
+    macip_acl_interface_get_reply: '0xaccf9b05'  # dev teardown
+
+    # plugins/memif/memif.api
+    memif_create_v2: '0x8c7de5f7'  # dev
+    memif_create_v2_reply: '0x5383d31f'  # dev
+    memif_details: '0xda34feb9'  # dev
+    memif_dump: '0x51077d14'  # dev
+    memif_socket_filename_add_del_v2: '0x34223bdf'  # dev
+    memif_socket_filename_add_del_v2_reply: '0x9f29bdb9'  # dev
+
+    # plugins/nat/nat44-ed/nat44_ed.api
+    nat44_add_del_address_range: '0x6f2b8055'  # dev
+    nat44_add_del_address_range_reply: '0xe8d4e804'  # dev
+    nat44_address_details: '0x0d1beac1'  # dev teardown
+    nat44_address_dump: '0x51077d14'  # dev teardown
+    nat44_ed_plugin_enable_disable: '0xbe17f8dd'  # dev
+    nat44_ed_plugin_enable_disable_reply: '0xe8d4e804'  # dev
+    nat44_interface_add_del_feature: '0xf3699b83'  # dev
+    nat44_interface_add_del_feature_reply: '0xe8d4e804'  # dev
+    nat44_interface_addr_details: '0xe4aca9ca'  # dev teardown
+    nat44_interface_addr_dump: '0x51077d14'  # dev teardown
+    nat44_interface_details: '0x5d286289'  # dev teardown
+    nat44_interface_dump: '0x51077d14'  # dev teardown
+    nat44_show_running_config: '0x51077d14'  # dev teardown
+    nat44_show_running_config_reply: '0x93d8e267'  # dev teardown
+    nat44_static_mapping_details: '0x06cb40b2'  # dev teardown
+    nat44_static_mapping_dump: '0x51077d14'  # dev teardown
+    # nat44_user_dump and nat44_user_session_dump can be called
+    # by show_nat_user_data function
+    nat_worker_details: '0x84bf06fc'  # dev teardown
+    nat_worker_dump: '0x51077d14'  # dev teardown
+
+    # plugins/nsim/nsim.api
+    nsim_configure2: '0x64de8ed3'  # perf
+    nsim_configure2_reply: '0xe8d4e804'  # perf
+    nsim_output_feature_enable_disable: '0x3865946c'  # perf
+    nsim_output_feature_enable_disable_reply: '0xe8d4e804'  # perf
+    # 4x^ 1280B-1c-eth-ip4udpquicscale10cl1s-vppecho-bps
+    # ^ 1280BAND1cANDeth-ip4udpquicscale10cl1s-vppecho
+
+    # vnet/policer/policer.api
+    policer_add: '0x4d949e35'  # dev
+    policer_add_reply: '0xa177cef2'  # dev
+
+    # vnet/classify/classify.api
+    policer_classify_set_interface: '0xde7ad708'  # dev
+    policer_classify_set_interface_reply: '0xe8d4e804'  # dev
+
+    # plugins/rdma/rdma.api
+    rdma_create_v4: '0xc6287ea8'  # perf
+    rdma_create_v4_reply: '0x5383d31f'  # perf
+    # 2x^ Any test with drv_rdma. Currently only available on 2n-clx.
+    # - Not testable by devicetest (until we have environment with right NICs).
+
+    # vlibmemory/vlib.api
+    show_threads: '0x51077d14'  # dev
+    show_threads_reply: '0xefd78e83'  # dev
+
+    # vpp/api/vpe.api
+    show_version: '0x51077d14'  # dev setup
+    show_version_reply: '0xc919bde1'  # dev setup
+
+    # vnet/srv6/sr.api
+    sr_localsid_add_del: '0x5a36c324'  # dev
+    sr_localsid_add_del_reply: '0xe8d4e804'  # dev
+    sr_localsids_details: '0x2e9221b9'  # dev teardown
+    sr_localsids_dump: '0x51077d14'  # dev teardown
+    sr_policies_v2_details: '0x96dcb699'  # dev teardown
+    sr_policies_v2_dump: '0x51077d14'  # dev teardown
+    sr_policy_add_v2: '0xf6297f36'  # dev
+    sr_policy_add_reply: '0xe8d4e804'  # dev but wrong
+    sr_set_encap_source: '0xd3bad5e1'  # dev
+    sr_set_encap_source_reply: '0xe8d4e804'  # dev
+    sr_steering_add_del: '0xe46b0a0f'  # dev
+    sr_steering_add_del_reply: '0xe8d4e804'  # dev
+    sr_steering_pol_details: '0xd41258c9'  # dev teardown
+    sr_steering_pol_dump: '0x51077d14'  # dev teardown
+
+    # vnet/bonding/bond.api
+    sw_bond_interface_details: '0x9428a69c'  # perf
+    sw_bond_interface_dump: '0xf9e6675e'  # perf
+    # ^^ see bond_*
+
+    # vnet/interface.api
+    sw_interface_add_del_address: '0x5463d73b'  # dev
+    sw_interface_add_del_address_reply: '0xe8d4e804'  # dev
+    sw_interface_details: '0x6c221fc7'  # dev
+    sw_interface_dump: '0xaa610c27'  # dev
+    # sw_interface_get_table / reply # honeycomb
+
+    # vnet/ip6-nd/ip6_nd.api
+    sw_interface_ip6nd_ra_config: '0x3eb00b1c'  # dev
+    sw_interface_ip6nd_ra_config_reply: '0xe8d4e804'  # dev
+
+    # vnet/interface.api
+    sw_interface_rx_placement_details: '0x9e44a7ce'  # dev
+    sw_interface_rx_placement_dump: '0xf9e6675e'  # dev
+    sw_interface_set_flags: '0xf5aec1b8'  # dev
+    sw_interface_set_flags_reply: '0xe8d4e804'  # dev
+
+    # sw_interface_set_geneve_bypass can be called
+    # by enable_interface_geneve_bypass function
+
+    # vnet/l2/l2.api
+    sw_interface_set_l2_bridge: '0xd0678b13'  # dev
+    sw_interface_set_l2_bridge_reply: '0xe8d4e804'  # dev
+    sw_interface_set_l2_xconnect: '0x4fa28a85'  # dev
+    sw_interface_set_l2_xconnect_reply: '0xe8d4e804'  # dev
+
+    # vnet/interface.api
+    sw_interface_set_mac_address: '0xc536e7eb'  # dev
+    sw_interface_set_mac_address_reply: '0xe8d4e804'  # dev
+    sw_interface_set_rx_placement: '0xdb65f3c9'  # dev
+    sw_interface_set_rx_placement_reply: '0xe8d4e804'  # dev
+    sw_interface_set_table: '0xdf42a577'  # dev
+    sw_interface_set_table_reply: '0xe8d4e804'  # dev
+    sw_interface_set_unnumbered: '0x154a6439'  # dev
+    sw_interface_set_unnumbered_reply: '0xe8d4e804'  # dev
+
+    # plugins/vxlan/vxlan.api
+    sw_interface_set_vxlan_bypass: '0x65247409'  # dev
+    sw_interface_set_vxlan_bypass_reply: '0xe8d4e804'  # dev
+
+    # vnet/devices/tap/tapv2.api
+    sw_interface_tap_v2_details: '0x1e2b2a47'  # dev
+    sw_interface_tap_v2_dump: '0xf9e6675e'  # dev
+
+    # plugins/vhost/vhost_user.api
+    sw_interface_vhost_user_details: '0x0cee1e53'  # dev teardown
+    sw_interface_vhost_user_dump: '0xf9e6675e'  # dev deardown
+
+    # vnet/bonding/bond.api
+    sw_member_interface_details: '0x3c4a0e23'  # perf
+    sw_member_interface_dump: '0xf9e6675e'  # perf
+    # ^^ see bond_*
+
+    # vnet/devices/tap/tapv2.api
+    tap_create_v3: '0x3f3fd1df'  # dev
+    tap_create_v3_reply: '0x5383d31f'  # dev
+
+    # plugins/vxlan/vxlan.api
+    vxlan_add_del_tunnel_v3: '0x0072b037'  # dev
+    vxlan_add_del_tunnel_v3_reply: '0x5383d31f'  # dev
+    # vxlan_gpe_tunnel_dump / details # honeycomb
+    # vxlan_tunnel_dump /details # unused L2 keyword: Get VXLAN dump
+
+    # plugins/wireguard/wireguard.api
+    wireguard_interface_create: '0xa530137e'
+    wireguard_interface_create_reply: '0x5383d31f'
+    wireguard_peer_add: '0x9b8aad61'
+    wireguard_peer_add_reply: '0x084a0cd3'
+    wg_set_async_mode: '0xa6465f7c'
+    wg_set_async_mode_reply: '0xe8d4e804'
+
+    # Please keep alphabetic order.
+
+24.02-with-srv6-fix:
     # plugins/acl/acl.api
     acl_add_replace: '0xee5c2f18'  # dev
     acl_add_replace_reply: '0xac407b0c'  # dev
     # gre_tunnel_add_del / reply # unused L1 keyword: create_gre_tunnel_interface
 
     # plugins/gtpu/gtpu.api
-    gtpu_add_del_tunnel: '0xca983a2b'  # perf
-    gtpu_add_del_tunnel_reply: '0x5383d31f'  # perf
+    gtpu_add_del_tunnel_v2: '0xa0c30713'  # perf
+    gtpu_add_del_tunnel_v2_reply: '0x62b41304'  # perf
     # ^^ 64B-1c-ethip4gtpusw-ip4base-ndrpdr
     # ^ ndrpdrAND1cAND64BANDethip4gtpusw-ip4base
     gtpu_offload_rx: '0xf0b08786' # perf
     # vnet/ipsec/ipsec.api
     ipsec_interface_add_del_spd: '0x80f80cbb'  # dev
     ipsec_interface_add_del_spd_reply: '0xe8d4e804'  # dev
-    ipsec_sa_v4_details: '0x87a322d7'  # dev teardown
-    ipsec_sa_v4_dump: '0x2076c2f4'  # dev teardown
-    ipsec_sad_entry_add: '0x50229353'  # dev teardown
-    ipsec_sad_entry_add_reply: '0x9ffac24b'  # dev
+    ipsec_sa_v5_details: '0x3cfecfbd'  # dev teardown
+    ipsec_sa_v5_dump: '0x2076c2f4'  # dev teardown
+    ipsec_sad_entry_add_v2: '0x9611297a'  # dev
+    ipsec_sad_entry_add_v2_reply: '0x9ffac24b'  # dev
     ipsec_select_backend: '0x5bcfd3b7'  # perf
     ipsec_select_backend_reply: '0xe8d4e804'  # perf
     # ^^ 64B-1c-ethip4ipsec1tnlhw-ip4base-int-aes256gcm-ndrpdr
     # ^ 1280BAND1cANDeth-ip4udpquicscale10cl1s-vppecho
 
     # vnet/policer/policer.api
-    policer_add_del: '0x2b31dd38'  # dev
-    policer_add_del_reply: '0xa177cef2'  # dev
+    policer_add: '0x4d949e35'  # dev
+    policer_add_reply: '0xa177cef2'  # dev
 
     # vnet/classify/classify.api
     policer_classify_set_interface: '0xde7ad708'  # dev
     policer_classify_set_interface_reply: '0xe8d4e804'  # dev
 
     # plugins/rdma/rdma.api
-    rdma_create_v3: '0xc6287ea8'  # perf
-    rdma_create_v3_reply: '0x5383d31f'  # perf
+    rdma_create_v4: '0xc6287ea8'  # perf
+    rdma_create_v4_reply: '0x5383d31f'  # perf
     # 2x^ Any test with drv_rdma. Currently only available on 2n-clx.
     # - Not testable by devicetest (until we have environment with right NICs).
 
     sr_localsids_dump: '0x51077d14'  # dev teardown
     sr_policies_v2_details: '0x96dcb699'  # dev teardown
     sr_policies_v2_dump: '0x51077d14'  # dev teardown
-    sr_policy_add: '0x44ac92e8'  # dev
-    sr_policy_add_reply: '0xe8d4e804'  # dev
+    sr_policy_add_v2: '0xf6297f36'  # dev
+    sr_policy_add_v2_reply: '0xe8d4e804'  # dev
     sr_set_encap_source: '0xd3bad5e1'  # dev
     sr_set_encap_source_reply: '0xe8d4e804'  # dev
     sr_steering_add_del: '0xe46b0a0f'  # dev
index 07caad0..214764d 100644 (file)
@@ -36,7 +36,8 @@ from resources.libraries.python.VPPUtil import VPPUtil
 from resources.libraries.python.FlowUtil import FlowUtil
 
 
-IPSEC_UDP_PORT_NONE = 0xffff
+IPSEC_UDP_PORT_DEFAULT = 4500
+IPSEC_REPLAY_WINDOW_DEFAULT = 64
 
 
 def gen_key(length):
@@ -450,7 +451,7 @@ class IPsecUtil:
             src_addr = u""
             dst_addr = u""
 
-        cmd = u"ipsec_sad_entry_add"
+        cmd = u"ipsec_sad_entry_add_v2"
         err_msg = f"Failed to add Security Association Database entry " \
             f"on host {node[u'host']}"
         sad_entry = dict(
@@ -471,8 +472,9 @@ class IPsecUtil:
                 dscp=int(IpDscp.IP_API_DSCP_CS0),
             ),
             protocol=int(IPsecProto.IPSEC_API_PROTO_ESP),
-            udp_src_port=4500,  # default value in api
-            udp_dst_port=4500  # default value in api
+            udp_src_port=IPSEC_UDP_PORT_DEFAULT,
+            udp_dst_port=IPSEC_UDP_PORT_DEFAULT,
+            anti_replay_window_size=IPSEC_REPLAY_WINDOW_DEFAULT,
         )
         args = dict(entry=sad_entry)
         with PapiSocketExecutor(node) as papi_exec:
@@ -547,7 +549,7 @@ class IPsecUtil:
                     IPsecSadFlags.IPSEC_API_SAD_FLAG_IS_TUNNEL_V6
                 )
 
-        cmd = u"ipsec_sad_entry_add"
+        cmd = u"ipsec_sad_entry_add_v2"
         err_msg = f"Failed to add Security Association Database entry " \
             f"on host {node[u'host']}"
 
@@ -569,8 +571,9 @@ class IPsecUtil:
                 dscp=int(IpDscp.IP_API_DSCP_CS0),
             ),
             protocol=int(IPsecProto.IPSEC_API_PROTO_ESP),
-            udp_src_port=4500,  # default value in api
-            udp_dst_port=4500,  # default value in api
+            udp_src_port=IPSEC_UDP_PORT_DEFAULT,
+            udp_dst_port=IPSEC_UDP_PORT_DEFAULT,
+            anti_replay_window_size=IPSEC_REPLAY_WINDOW_DEFAULT,
         )
         args = dict(entry=sad_entry)
         with PapiSocketExecutor(node, is_async=True) as papi_exec:
@@ -1227,7 +1230,7 @@ class IPsecUtil:
             # Configure IPSec SAD entries
             ckeys = [bytes()] * existing_tunnels
             ikeys = [bytes()] * existing_tunnels
-            cmd = u"ipsec_sad_entry_add"
+            cmd = u"ipsec_sad_entry_add_v2"
             c_key = dict(
                 length=0,
                 data=None
@@ -1255,8 +1258,9 @@ class IPsecUtil:
                     dscp=int(IpDscp.IP_API_DSCP_CS0),
                 ),
                 salt=0,
-                udp_src_port=IPSEC_UDP_PORT_NONE,
-                udp_dst_port=IPSEC_UDP_PORT_NONE,
+                udp_src_port=IPSEC_UDP_PORT_DEFAULT,
+                udp_dst_port=IPSEC_UDP_PORT_DEFAULT,
+                anti_replay_window_size=IPSEC_REPLAY_WINDOW_DEFAULT,
             )
             args = dict(entry=sad_entry)
             for i in range(existing_tunnels, n_tunnels):
@@ -1466,7 +1470,7 @@ class IPsecUtil:
                 ]
             )
             # Configure IPSec SAD entries
-            cmd = u"ipsec_sad_entry_add"
+            cmd = u"ipsec_sad_entry_add_v2"
             c_key = dict(
                 length=0,
                 data=None
@@ -1494,8 +1498,9 @@ class IPsecUtil:
                     dscp=int(IpDscp.IP_API_DSCP_CS0),
                 ),
                 salt=0,
-                udp_src_port=IPSEC_UDP_PORT_NONE,
-                udp_dst_port=IPSEC_UDP_PORT_NONE,
+                udp_src_port=IPSEC_UDP_PORT_DEFAULT,
+                udp_dst_port=IPSEC_UDP_PORT_DEFAULT,
+                anti_replay_window_size=IPSEC_REPLAY_WINDOW_DEFAULT,
             )
             args = dict(entry=sad_entry)
             for i in range(existing_tunnels, n_tunnels):
@@ -2033,10 +2038,8 @@ class IPsecUtil:
         :param node: DUT node.
         :type node: dict
         """
-        cmds = [
-            u"ipsec_sa_v4_dump"
-        ]
-        PapiSocketExecutor.dump_and_log(node, cmds)
+        cmd = "ipsec_sa_v5_dump"
+        PapiSocketExecutor.dump_and_log(node, [cmd])
 
     @staticmethod
     def vpp_ipsec_flow_enale_rss(node, proto, type, function="default"):
index 9f023d9..ff01330 100644 (file)
@@ -1066,7 +1066,7 @@ class InterfaceUtil:
         :raises RuntimeError: if it is unable to create GTPU interface on the
             node.
         """
-        cmd = u"gtpu_add_del_tunnel"
+        cmd = u"gtpu_add_del_tunnel_v2"
         args = dict(
             is_add=True,
             src_address=IPAddress.create_ip_address_object(
@@ -1077,8 +1077,10 @@ class InterfaceUtil:
             ),
             mcast_sw_if_index=Constants.BITWISE_NON_ZERO,
             encap_vrf_id=0,
-            decap_next_index=2,
-            teid=teid
+            decap_next_index=2,  # ipv4
+            teid=teid,
+            # pdu_extension: Unused, false by default.
+            # qfi: Irrelevant when pdu_extension is not used.
         )
         err_msg = f"Failed to create GTPU tunnel interface " \
             f"on host {node[u'host']}"
@@ -1373,7 +1375,7 @@ class InterfaceUtil:
             node, u"set logging class rdma level debug"
         )
 
-        cmd = u"rdma_create_v3"
+        cmd = u"rdma_create_v4"
         pci_addr = Topology.get_interface_pci_addr(node, if_key)
         args = dict(
             name=InterfaceUtil.pci_to_eth(node, pci_addr),
@@ -1386,6 +1388,8 @@ class InterfaceUtil:
             no_multi_seg=False,
             max_pktlen=0,
             # TODO: Apply desired RSS flags.
+            # rss4 kept 0 (auto) as API default.
+            # rss6 kept 0 (auto) as API default.
         )
         err_msg = f"Failed to create RDMA interface on host {node[u'host']}"
         with PapiSocketExecutor(node) as papi_exec:
index 6d3bf86..28ed0b0 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2021 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -72,7 +72,7 @@ class Policer:
     def policer_set_configuration(
             node, policer_name, cir, eir, cbs, ebs, rate_type, round_type,
             policer_type, conform_action_type, exceed_action_type,
-            violate_action_type, color_aware, is_add=True, conform_dscp=None,
+            violate_action_type, color_aware, conform_dscp=None,
             exceed_dscp=None, violate_dscp=None):
         """Configure policer on VPP node.
 
@@ -89,7 +89,6 @@ class Policer:
         :param exceed_action_type: Exceed action type.
         :param violate_action_type: Violate action type.
         :param color_aware: Color-blind (cb) or color-aware (ca).
-        :param is_add: Add policer if True, else delete.
         :param conform_dscp: DSCP for conform mark_and_transmit action.
         :param exceed_dscp: DSCP for exceed mark_and_transmit action.
         :param violate_dscp: DSCP for vilate mark_and_transmit action.
@@ -106,7 +105,6 @@ class Policer:
         :type exceed_action_type: str
         :type violate_action_type: str
         :type color_aware: str
-        :type is_add: bool
         :type conform_dscp: str
         :type exceed_dscp: str
         :type violate_dscp: str
@@ -130,10 +128,8 @@ class Policer:
             else 0
         )
 
-        cmd = u"policer_add_del"
-        args = dict(
-            is_add=is_add,
-            name=str(policer_name),
+        cmd = u"policer_add"
+        infos = dict(
             cir=int(cir),
             eir=int(eir),
             cb=int(cbs),
@@ -148,6 +144,10 @@ class Policer:
             violate_action=violate_action,
             color_aware=bool(color_aware == u"'ca'")
         )
+        args = dict(
+            name=str(policer_name),
+            infos=infos,
+        )
         err_msg = f"Failed to configure policer {policer_name} " \
             f"on host {node['host']}"
 
index d16c352..0170df5 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -222,15 +222,14 @@ class SRv6:
         :type sid_list: list
         :type mode: str
         """
-        # TODO: Convert to use sr_policy_add_v2.
-        # The conversion is not straightforward so it was not done when bumping.
-        cmd = u"sr_policy_add"
+        cmd = u"sr_policy_add_v2"
         args = dict(
             bsid_addr=IPv6Address(bsid).packed,
             weight=1,
             is_encap=bool(mode == u"encap"),
-            is_spray=False,
-            sids=SRv6.create_srv6_sid_list(sid_list)
+            type=0,  # Neither SPRAY nor TEF are needed yet.
+            sids=SRv6.create_srv6_sid_list(sid_list),
+            # encap_src is optional, do not set yet.
         )
         err_msg = f"Failed to add SR policy for BindingSID {bsid} " \
             f"on host {node[u'host']}"