4 log /var/log/vpp/vpp.log
6 cli-listen /run/vpp/cli.sock
15 # scheduler-policy fifo
16 # scheduler-priority 50
18 ## In the VPP there is one main thread and optionally the user can create worker(s)
19 ## The main thread and worker thread(s) can be pinned to CPU core(s) manually or automatically
21 ## Manual pinning of thread(s) to CPU core(s)
23 ## Set logical CPU core where main thread runs
26 ## Set logical CPU core(s) where worker threads are running
27 # corelist-workers 2-3,18-19
29 ## Automatic pinning of thread(s) to CPU core(s)
31 ## Sets number of CPU core(s) to be skipped (1 ... N-1)
32 ## Skipped CPU core(s) are not used for pinning main thread and working thread(s).
33 ## The main thread is automatically pinned to the first available CPU core and worker(s)
34 ## are pinned to next free CPU core(s) after core assigned to main thread
37 ## Specify a number of workers to be created
38 ## Workers are pinned to N consecutive CPU cores while skipping "skip-cores" CPU core(s)
39 ## and main thread's CPU core
42 ## Set scheduling policy and priority of main and worker threads
44 ## Scheduling policy options are: other (SCHED_OTHER), batch (SCHED_BATCH)
45 ## idle (SCHED_IDLE), fifo (SCHED_FIFO), rr (SCHED_RR)
46 # scheduler-policy fifo
48 ## Scheduling priority is used only for "real-time policies (fifo and rr),
49 ## and has to be in the range of priorities supported for a particular policy
50 # scheduler-priority 50
56 ## Change default settings for all interfaces
58 ## Number of receive queues, enables RSS
62 ## Number of transmit queues, Default is equal
63 ## to number of worker threads or 1 if no workers treads
66 ## Number of descriptors in transmit and receive rings
67 ## increasing or reducing number can impact performance
68 ## Default is 1024 for both rx and tx
72 ## VLAN strip offload mode for interface
74 # vlan-strip-offload on
77 ## Whitelist specific interface by specifying PCI address
80 ## Whitelist specific interface by specifying PCI address and in
81 ## addition specify custom parameters for this interface
86 ## Specify bonded interface and its slaves via PCI addresses
88 ## Bonded interface in XOR load balance mode (mode 2) with L3 and L4 headers
89 # vdev eth_bond0,mode=2,slave=0000:02:00.0,slave=0000:03:00.0,xmit_policy=l34
90 # vdev eth_bond1,mode=2,slave=0000:02:00.1,slave=0000:03:00.1,xmit_policy=l34
92 ## Bonded interface in Active-Back up mode (mode 1)
93 # vdev eth_bond0,mode=1,slave=0000:02:00.0,slave=0000:03:00.0
94 # vdev eth_bond1,mode=1,slave=0000:02:00.1,slave=0000:03:00.1
96 ## Change UIO driver used by VPP, Options are: igb_uio, vfio-pci
97 ## and uio_pci_generic (default)
100 ## Disable multi-segment buffers, improves performance but
101 ## disables Jumbo MTU support
104 ## Increase number of buffers allocated, needed only in scenarios with
105 ## large number of interfaces and worker threads. Value is per CPU socket.
109 ## Change hugepages allocation per-socket, needed only if there is need for
110 ## larger number of mbufs. Default is 256M on each detected CPU socket
111 # socket-mem 2048,2048
114 # Adjusting the plugin path depending on where the VPP plugins are:
117 # path /home/bms/vpp/build-root/install-vpp-native/vpp/lib/vpp_plugins
120 # Alternate syntax to choose plugin path
121 #plugin_path /home/bms/vpp/build-root/install-vpp-native/vpp/lib/vpp_plugins