Skip to content

Commit

Permalink
[#40, #41, #42] vBNG and Pktgen updated
Browse files Browse the repository at this point in the history
Some code refactoring done, and tested working on CSIT testbed
  • Loading branch information
michaelspedersen committed Aug 31, 2018
1 parent 4d7a82e commit 66621bb
Show file tree
Hide file tree
Showing 16 changed files with 311 additions and 42 deletions.
1 change: 1 addition & 0 deletions comparison/box-by-box-kvm-docker/Pktgen/.gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
.vagrant/
results/
Pktgen.xml
4 changes: 2 additions & 2 deletions comparison/box-by-box-kvm-docker/Pktgen/traffic_test.sh
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
#! /bin/bash

iterations=5
iterations=1
nfvbench="sudo docker exec -it nfvbench nfvbench -c /tmp/nfvbench/nfvbench_config.cfg"
prefix="default_prefix" # Change to something identifiable
prefix="aug_21_test" # Change to something identifiable

for iter in $(seq 1 $iterations); do
$nfvbench --rate pdr_ndr | sudo tee -a results/${prefix}_iter_${iter}.log
Expand Down
19 changes: 10 additions & 9 deletions comparison/box-by-box-kvm-docker/VPP_Configs/vBNG_container.conf
Original file line number Diff line number Diff line change
@@ -1,17 +1,18 @@
create bridge-domain 1
create bridge-domain 2

bin memif_socket_filename_add_del add id 1 filename /etc/vpp/sockets/memif1.sock
bin memif_socket_filename_add_del add id 2 filename /etc/vpp/sockets/memif2.sock
create vhost-user socket /var/run/vpp/sock3.sock server
create vhost-user socket /var/run/vpp/sock4.sock server

set int state VirtualEthernet0/0/0 up
set int state VirtualEthernet0/0/1 up

create interface memif id 1 socket-id 1 master
create interface memif id 2 socket-id 2 master
set int state memif1/1 up
set int state memif2/2 up

set int l2 bridge VirtualEthernet0/0/0 1
set int l2 bridge TenGigabitEthernet18/0/0 1
set int l2 bridge memif1/1 1
set int l2 bridge VirtualEthernet0/0/1 2
set int l2 bridge TenGigabitEthernet18/0/1 2
set int l2 bridge memif2/2 2

set int state TenGigabitEthernet18/0/0 up
set int state TenGigabitEthernet18/0/1 up
set int state memif1/1 up
set int state memif2/2 up
19 changes: 11 additions & 8 deletions comparison/box-by-box-kvm-docker/VPP_Configs/vBNG_vm.conf
Original file line number Diff line number Diff line change
@@ -1,12 +1,15 @@
create bridge-domain 1
create bridge-domain 2

create vhost-user socket /var/run/vpp/sock1.sock server
create vhost-user socket /var/run/vpp/sock2.sock server
create vhost-user socket /var/run/vpp/sock3.sock server
create vhost-user socket /var/run/vpp/sock4.sock server
set int state VirtualEthernet0/0/0 up
set int state VirtualEthernet0/0/1 up
set int state VirtualEthernet0/0/2 up
set int state VirtualEthernet0/0/3 up

set int l2 bridge TenGigabitEthernet18/0/0 1
set int l2 bridge VirtualEthernet0/0/0 1
set int l2 bridge VirtualEthernet0/0/2 1
set int l2 bridge TenGigabitEthernet18/0/1 2
set int l2 bridge VirtualEthernet0/0/1 2
set int l2 bridge VirtualEthernet0/0/3 2

set int state TenGigabitEthernet18/0/0 up
set int state TenGigabitEthernet18/0/1 up
set int state VirtualEthernet0/0/0 up
set int state VirtualEthernet0/0/1 up
23 changes: 22 additions & 1 deletion comparison/box-by-box-kvm-docker/vBNG/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,28 @@ The setup scripts are originally from https://github.com/onap/demo/blob/master/v

### VNF VM setup/testing

`vagrant up`
Should ideally be started from box-by-box-kvm-docker directory:
`./vBNG_vm_test.sh`

Alternatively it can be started from this (vBNG) directory
`./run_vm.sh`

### Troubleshooting

If `vagrant up` fails to start with vhost sockets attached, showing the following error:
```
There was an error talking to Libvirt. The error message is shown
below:
Call to virDomainCreateWithFlags failed: internal error: process exited while connecting to monitor: 2018-08-24T20:31:24.446153Z qemu-system-x86_64: -chardev socket,id=charnet1,path=/var/run/vpp/sock1.sock: Failed to connect socket /var/run/vpp/sock1.sock: Permission denied
```
You will need to update libvirt to run as root:
- Open /etc/libvirt/qemu.conf
- Disable security: `security_driver = "none"` (uncomment and/or modify if needed)
- Uncomment `user = "root"`
- Uncomment `group = "root"`
- Save file and restart libvirtd
- `service libvirtd restart`

### TODO

Expand Down
4 changes: 2 additions & 2 deletions comparison/box-by-box-kvm-docker/vBNG/Vagrantfile
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@

Vagrant.configure("2") do |config|

config.vm.box = "generic/ubuntu1604"
config.vm.box_version = "1.8.14"
config.vm.box = "vbng"

config.vm.synced_folder './', '/vagrant'
config.vm.define vm_name = "vBNG" do |config|
config.vm.hostname = vm_name
#config.vm.provision "shell", path: "vnf_vbng_install.sh", privileged: true
config.vm.provider :libvirt do |v|
v.cpus = 3
v.numa_nodes = [
Expand Down
13 changes: 12 additions & 1 deletion comparison/box-by-box-kvm-docker/vBNG/run_vm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,13 @@ for sock in "${SOCKET_NAMES[@]}"; do
if [ ! -e "${SOCKET_DIR}/${sock}" ]; then
echo "ERROR - Socket ${SOCKET_DIR}/${sock} not found"
exit 1
else
chown root:root ${SOCKET_DIR}/${sock}
fi
done

cpus=( 7 8 9 )

input="$1"

mydir=$(dirname $0)
Expand Down Expand Up @@ -45,7 +49,14 @@ virsh define vBNG.xml

vagrant reload

cmd="cp /vagrant/v_bng_* . && chmod +x v_bng_* && ./v_bng_install.sh"
count=0
new_id=$(virsh list | grep vBNG_vBNG | awk '{print $1}')
for cpu in "${cpus[@]}"; do
virsh vcpupin ${new_id} ${count} ${cpu}
(( count++ ))
done

cmd="cp /vagrant/vnf_vbng_install.sh . && chmod +x vnf_vbng_install.sh && ./vnf_vbng_install.sh"
vagrant ssh -c "$cmd"

echo ""
Expand Down
195 changes: 195 additions & 0 deletions comparison/box-by-box-kvm-docker/vBNG/vnf_vbng_install.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,195 @@
#!/bin/bash
set -o xtrace # print commands during script execution

sudo service vpp stop

pci_search="Ethernet"
pci_devs=($(lspci | grep "$pci_search" | awk '{print $1}' | grep -v "00:05.0"))
dev_list=""
if [ ! "${#pci_devs[@]}" == "0" ]; then
for dev in ${pci_devs[@]}; do
dev_list+="dev 0000:$dev "
done
fi

sudo /vagrant/dpdk-devbind.py -b igb_uio ${pci_devs[@]}

# Overwrite default VPP configuration
sudo bash -c "cat > /etc/vpp/startup.conf" <<EOF
unix {
nodaemon
log /var/log/vpp/vpp.log
full-coredump
cli-listen /run/vpp/cli.sock
gid vpp
startup-config /etc/vpp/setup.gate
}
api-trace {
## This stanza controls binary API tracing. Unless there is a very strong reason,
## please leave this feature enabled.
on
## Additional parameters:
##
## To set the number of binary API trace records in the circular buffer, configure nitems
##
## nitems <nnn>
##
## To save the api message table decode tables, configure a filename. Results in /tmp/<filename>
## Very handy for understanding api message changes between versions, identifying missing
## plugins, and so forth.
##
## save-api-table <filename>
}
api-segment {
gid vpp
}
cpu {
## In the VPP there is one main thread and optionally the user can create worker(s)
## The main thread and worker thread(s) can be pinned to CPU core(s) manually or automatically
## Manual pinning of thread(s) to CPU core(s)
## Set logical CPU core where main thread runs
main-core 0
## Set logical CPU core(s) where worker threads are running
corelist-workers 1-2
## Automatic pinning of thread(s) to CPU core(s)
## Sets number of CPU core(s) to be skipped (1 ... N-1)
## Skipped CPU core(s) are not used for pinning main thread and working thread(s).
## The main thread is automatically pinned to the first available CPU core and worker(s)
## are pinned to next free CPU core(s) after core assigned to main thread
# skip-cores 4
## Specify a number of workers to be created
## Workers are pinned to N consecutive CPU cores while skipping "skip-cores" CPU core(s)
## and main thread's CPU core
# workers 2
## Set scheduling policy and priority of main and worker threads
## Scheduling policy options are: other (SCHED_OTHER), batch (SCHED_BATCH)
## idle (SCHED_IDLE), fifo (SCHED_FIFO), rr (SCHED_RR)
# scheduler-policy fifo
## Scheduling priority is used only for "real-time policies (fifo and rr),
## and has to be in the range of priorities supported for a particular policy
# scheduler-priority 50
}
dpdk {
## Change default settings for all intefaces
# dev default {
## Number of receive queues, enables RSS
## Default is 1
# num-rx-queues 3
## Number of transmit queues, Default is equal
## to number of worker threads or 1 if no workers treads
# num-tx-queues 3
## Number of descriptors in transmit and receive rings
## increasing or reducing number can impact performance
## Default is 1024 for both rx and tx
# num-rx-desc 512
# num-tx-desc 512
## VLAN strip offload mode for interface
## Default is off
# vlan-strip-offload on
# }
## Whitelist specific interface by specifying PCI address
${dev_list}
## Whitelist specific interface by specifying PCI address and in
## addition specify custom parameters for this interface
# dev 0000:02:00.1 {
# num-rx-queues 2
# }
## Specify bonded interface and its slaves via PCI addresses
##
## Bonded interface in XOR load balance mode (mode 2) with L3 and L4 headers
# vdev eth_bond0,mode=2,slave=0000:02:00.0,slave=0000:03:00.0,xmit_policy=l34
# vdev eth_bond1,mode=2,slave=0000:02:00.1,slave=0000:03:00.1,xmit_policy=l34
##
## Bonded interface in Active-Back up mode (mode 1)
# vdev eth_bond0,mode=1,slave=0000:02:00.0,slave=0000:03:00.0
# vdev eth_bond1,mode=1,slave=0000:02:00.1,slave=0000:03:00.1
## Change UIO driver used by VPP, Options are: igb_uio, vfio-pci,
## uio_pci_generic or auto (default)
# uio-driver vfio-pci
## Disable mutli-segment buffers, improves performance but
## disables Jumbo MTU support
no-multi-seg
## Increase number of buffers allocated, needed only in scenarios with
## large number of interfaces and worker threads. Value is per CPU socket.
## Default is 16384
# num-mbufs 128000
## Change hugepages allocation per-socket, needed only if there is need for
## larger number of mbufs. Default is 256M on each detected CPU socket
# socket-mem 2048,2048
## Disables UDP / TCP TX checksum offload. Typically needed for use
## faster vector PMDs (together with no-multi-seg)
# no-tx-checksum-offload
}
# plugins {
## Adjusting the plugin path depending on where the VPP plugins are
# path /home/bms/vpp/build-root/install-vpp-native/vpp/lib64/vpp_plugins
## Disable all plugins by default and then selectively enable specific plugins
# plugin default { disable }
# plugin dpdk_plugin.so { enable }
# plugin acl_plugin.so { enable }
## Enable all plugins by default and then selectively disable specific plugins
# plugin dpdk_plugin.so { disable }
# plugin acl_plugin.so { disable }
# }
## Alternate syntax to choose plugin path
# plugin_path /home/bms/vpp/build-root/install-vpp-native/vpp/lib64/vpp_plugins
EOF

sudo service vpp start
sleep 10

# Pre-heating the API so that the following works (workaround?)
sudo vppctl show int

intfs=($(sudo vppctl show int | grep Ethernet | awk '{print $1}'))
if [ ! "${#intfs[@]}" == "2" ]; then
echo "ERROR: Number of interfaces should be 2 (is ${#intfs[@]})"
exit 1
fi

# Create interface configuration for VPP
sudo bash -c "cat > /etc/vpp/setup.gate" <<EOF
set int state ${intfs[0]} up
set interface ip address ${intfs[0]} 1.1.0.10/8
set int state ${intfs[1]} up
set interface ip address ${intfs[1]} 2.2.0.10/8
set ip arp static ${intfs[0]} 1.1.0.100 3c:fd:fe:a8:ab:98
set ip arp static ${intfs[1]} 2.2.0.100 3c:fd:fe:a8:ab:99
ip route add 10.0.0.0/8 via 1.1.0.100
ip route add 20.0.0.0/8 via 2.2.0.100
EOF

sudo service vpp restart
Original file line number Diff line number Diff line change
Expand Up @@ -171,14 +171,17 @@ EOF
bash -c "cat > /etc/vpp/setup.gate" <<EOF
bin memif_socket_filename_add_del add id 1 filename /run/vpp/memif1.sock
bin memif_socket_filename_add_del add id 2 filename /run/vpp/memif2.sock
create interface memif id 1 socket-id 1 hw-addr 52:54:00:00:00:aa slave
create interface memif id 2 socket-id 2 hw-addr 52:54:00:00:00:bb slave
set int ip addr memif1/1 10.3.0.10/24
set int ip addr memif2/2 10.1.0.10/24
create interface memif id 1 socket-id 1 hw-addr 52:54:00:00:00:aa slave rx-queues 2 tx-queues 2
create interface memif id 2 socket-id 2 hw-addr 52:54:00:00:00:bb slave rx-queues 2 tx-queues 2
set int ip addr memif1/1 1.1.0.10/8
set int ip addr memif2/2 2.2.0.10/8
set int state memif1/1 up
set int state memif2/2 up
set ip arp static memif1/1 10.3.0.120 3c:fd:fe:a8:ab:98
set ip arp static memif2/2 10.1.0.120 3c:fd:fe:a8:ab:99
set ip arp static memif1/1 1.1.0.100 3c:fd:fe:a8:ab:98
set ip arp static memif2/2 1.1.0.100 3c:fd:fe:a8:ab:99
ip route add 10.0.0.0/8 via 1.1.0.100
ip route add 20.0.0.0/8 via 2.2.0.100
EOF

Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
.vagrant/
vbng.box
Loading

0 comments on commit 66621bb

Please sign in to comment.