diff --git a/deploy_onos.md b/deploy_onos.md new file mode 100644 index 00000000..d25b3fa8 --- /dev/null +++ b/deploy_onos.md @@ -0,0 +1,89 @@ +## deploy in bare machine +### Step1:install dependcies +```bash +apt install wget openjdk-11-jdk +``` + +### Step2:download onos +```bash +sudo mkdir /opt +cd /opt +sudo wget -c https://repo1.maven.org/maven2/org/onosproject/onos-releases/2.7.0/onos-2.7.0.tar.gz +sudo tar xzf onos-2.7.0.tar.gz +sudo mv onos-2.7.0 onos +``` + +### Step3:launch onos +```bash +vim onos/apache-karaf*/bin/setenv # * is your apache-karaf version +#add the follow +export JAVA_HOME=/usr/lib/jvm/java-11-openjdk-amd64/ +``` + +```bash +export ONOS_APPS="drivers,openflow-base,openflow,proxyarp,lldpprovider,fwd,optical-model,hostprovider" #add apps and excute +/opt/onos/bin/onos-service start +``` + +## deploy in docker cluster +### Step1:import docker images +```bash +docker pull atomix/atomix +docker pull onosproject/onos +``` + +### Step2:create atomix container +```bash +docker run -t -d --name atomix-1 atomix/atomix +docker run -t -d --name atomix-2 atomix/atomix +docker run -t -d --name atomix-3 atomix/atomix +``` + +### Step3:git onos source code +```bash +cd ~ +git clone https://gerrit.onosproject.org/onos +``` + +### Step4:generate atomix config files +```bash +export OC1=172.17.0.2 #atomix-1 ip +export OC2=172.17.0.3 #atomix-2 ip +export OC3=172.17.0.4 #atomix-3 ip +cd ~/onos +./tools/test/bin/atomix-gen-config 172.17.0.2 ~/atomix-1.conf 172.17.0.2 172.17.0.3 172.17.0.4 #maybe need python2 +./tools/test/bin/atomix-gen-config 172.17.0.3 ~/atomix-2.conf 172.17.0.2 172.17.0.3 172.17.0.4 +./tools/test/bin/atomix-gen-config 172.17.0.4 ~/atomix-3.conf 172.17.0.2 172.17.0.3 172.17.0.4 +docker cp ~/atomix-1.conf atomix-1:/opt/atomix/conf/atomix.conf +docker cp ~/atomix-2.conf atomix-2:/opt/atomix/conf/atomix.conf +docker cp ~/atomix-3.conf atomix-3:/opt/atomix/conf/atomix.conf +docker restart atomix-1 +docker restart atomix-2 +docker restart atomix-3 +``` + +### Step5:create onos container +```bash +docker run -t -d -p 6653:6653 -e ONOS_APPS="drivers,openflow-base,netcfghostprovider,openflow,proxyarp,lldpprovider,fwd,optical-model,hostprovider,gui2" --name onos1 onosproject/onos +docker run -t -d -p 6654:6653 -e ONOS_APPS="drivers,openflow-base,netcfghostprovider,openflow,proxyarp,lldpprovider,fwd,optical-model,hostprovider,gui2" --name onos2 onosproject/onos +docker run -t -d -p 6655:6653 -e ONOS_APPS="drivers,openflow-base,netcfghostprovider,openflow,proxyarp,lldpprovider,fwd,optical-model,hostprovider,gui2" --name onos3 onosproject/onos +``` + +### Step6:generate onos config files +```bash +cd ~/onos +./tools/test/bin/onos-gen-config 172.17.0.5 ~/cluster-1.json -n 172.17.0.2 172.17.0.3 172.17.0.4 #maybe need python2 +./tools/test/bin/onos-gen-config 172.17.0.6 ~/cluster-2.json -n 172.17.0.2 172.17.0.3 172.17.0.4 +./tools/test/bin/onos-gen-config 172.17.0.7 ~/cluster-3.json -n 172.17.0.2 172.17.0.3 172.17.0.4 +docker exec onos1 mkdir /root/onos/config +docker exec onos2 mkdir /root/onos/config +docker exec onos3 mkdir /root/onos/config +docker cp ~/cluster-1.json onos1:/root/onos/config/cluster.json +docker cp ~/cluster-2.json onos2:/root/onos/config/cluster.json +docker cp ~/cluster-3.json onos3:/root/onos/config/cluster.json +docker restart onos1 +docker restart onos2 +docker restart onos3 +``` + + \ No newline at end of file diff --git a/docker.md b/docker.md new file mode 100644 index 00000000..5e1e2f4e --- /dev/null +++ b/docker.md @@ -0,0 +1,215 @@ + +This document is the development and testing document for `issue25:Investigate the possibility of replacing lxc/lxd with docker` . + +* Figure out the detail steps to replace lxc/lxd engine with docker engine. + > The replacement process can be divided into two steps + > * Change the environment of lxd in the master and workers to docker + > * Change the deployment and configuration commands of the lxd container in the master and workers to the commands with the same function under docker + +* Study files under Distrinet/mininet/provision folder. + > Distrinet provides an infrastructure provisioning mechanism that uses Ansible to automatically install and configure LXD on each machine to be used during the experiment. + > * install-lxd.yml:this file is used to install lxd and download two images(an Ubuntu:18.04 image to emulate the vHosts, and a modified version of that image with OVS installed)on each machine by Ansible,we change it to install docker,and these two images need to be remade according to Docker,the OVS process will be described in the ovs_image.md. + > * configure-lxd-no-clustering.yml:this file is used to configure lxd init,but you don't neet to do it in docker,so we only use it to distribute and load the above two images. + +* Create related files (config, deploy, install, etc) for docker provision by referring lxd scenario + >install-docker.yml:install-lxd.yml's docker version + ``` + ansible-playbook ~/install-docker.yml + ``` + >configure-docker.yml: + ``` + ansible-playbook ~/configure-docker.yml: + ``` + >Reading through all the code in the LXD scenario, we can see that the creation and configuration of containers and the creation of network interfaces and links are mainly concentrated in lxc_cotainer.py and distrinet.py,We need to replace the code that uses LXC/LXD with a docker equivalent command + + >As for the network interfaces and links,we'll discuss this in more detail in issue33,In addition,for container creation and configuration, you can keep an eye out for lXC_container.py changes.The most critical one is that we use a pair of virtual devices veth pair and a bridge to replace lXC network attach command.Besides,We use namespaces to attach one end of veth pair to the container. + + >We don't change the lxc_container.py's name,because this file is referenced several times in the project.We added an autoSetDocker parameter to this file, which allows you to choose whether to use docker or not.This parameter is passed by the external command --docker when Distrinet is runned in client + +* The Distrinet environment replaced by docker includes the three following entities + * Client: host in which the Distrinet script is running and decides where to place the vNodes around the physical infrastructure (round-robin by default). The Client must be able to connect via SSH to the Master host. + * Master: host that acts as a relay to interconnect the Client with all the Worker hosts. It communicates with the Client and the different Workers via SSH. Note that the Master can also be configured as a Worker. + * Worker(s): host(s) where all the vNodes (vSwitches and vHosts) are running. vNodes are managed by the Master and the Client, via the admin network. + > To ensure the smooth deployment of vNodes, Ip forwarding needs to be allowed on workers and the netns of the container needs to be restored to the host directory,here are a few commands that you might configure on workers and Master. + ``` + sysctl net.ipv4.conf.all.forwarding=1 + iptables --policy FORWARD ACCEPT + mkdir /var/run/netns + ulimit -n 196835 + ``` + +* Experimental environment Configuration + >clinet:192.168.71.128,master:192.168.71.141,worker:192.168.71.142.They are all Ubuntu virtual machines running on the same physical host + >We use distrinet to create a linear topology with two switches and two hosts, i.e. : + >Master: + ``` + root@master:~# ryu-manager /usr/lib/python3/dist-packages/ryu/app/simple_switch_13.py --verbose + ``` + >client: + ``` + jxq@client:~$ python3 bin/dmn --workers="192.168.71.141,192.168.71.142" --controller=lxcremote,ip=192.168.0.1 --topo=linear,2 --docker + ``` + +* Experimental results + >Distrinet's CLI can be generated normally and pingall test also passes + # ![client](https://github.com/J980419xq/Distrinet/blob/master/images/cli.png) + >The master and worker can create containers and interfaces normally + >master: ip a + ``` + 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: ens33: mtu 1500 qdisc fq_codel state UP group default qlen 1000 + link/ether 00:0c:29:eb:10:f4 brd ff:ff:ff:ff:ff:ff + inet 192.168.71.141/24 brd 192.168.71.255 scope global noprefixroute ens33 + valid_lft forever preferred_lft forever + inet6 fe80::20c:29ff:feeb:10f4/64 scope link + valid_lft forever preferred_lft forever + 3: docker0: mtu 1500 qdisc noqueue state DOWN group default + link/ether 02:42:29:d0:a1:78 brd ff:ff:ff:ff:ff:ff + inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0 + valid_lft forever preferred_lft forever + inet6 fe80::42:29ff:fed0:a178/64 scope link + valid_lft forever preferred_lft forever + 26: admin-br: mtu 1500 qdisc noqueue state UP group default qlen 1000 + link/ether 72:97:2f:43:4c:cb brd ff:ff:ff:ff:ff:ff + inet 192.168.0.1/8 brd 192.255.255.255 scope global admin-br + valid_lft forever preferred_lft forever + inet6 fe80::80f7:25ff:fe05:b735/64 scope link + valid_lft forever preferred_lft forever + 27: intf3@if28: mtu 1500 qdisc noqueue master admin-br state UP group default qlen 1000 + link/ether 72:97:2f:43:4c:cb brd ff:ff:ff:ff:ff:ff link-netnsid 0 + inet6 fe80::7097:2fff:fe43:4ccb/64 scope link + valid_lft forever preferred_lft forever + 29: intf1@if30: mtu 1500 qdisc noqueue master admin-br state UP group default qlen 1000 + link/ether 9a:aa:18:f7:72:0b brd ff:ff:ff:ff:ff:ff link-netnsid 1 + inet6 fe80::98aa:18ff:fef7:720b/64 scope link + valid_lft forever preferred_lft forever + 31: vNone@vadmin-br: mtu 1500 qdisc noqueue state UP group default qlen 1000 + link/ether 36:9f:9e:c7:9b:52 brd ff:ff:ff:ff:ff:ff + inet6 fe80::349f:9eff:fec7:9b52/64 scope link + valid_lft forever preferred_lft forever + 32: vadmin-br@vNone: mtu 1500 qdisc noqueue master admin-br state UP group default qlen 1000 + link/ether a2:06:41:fd:a4:7c brd ff:ff:ff:ff:ff:ff + inet6 fe80::a006:41ff:fefd:a47c/64 scope link + valid_lft forever preferred_lft forever + 33: vx_21: mtu 1500 qdisc noqueue master admin-br state UNKNOWN group default qlen 1000 + link/ether 9e:3b:48:ef:89:6f brd ff:ff:ff:ff:ff:ff + inet6 fe80::9c3b:48ff:feef:896f/64 scope link + valid_lft forever preferred_lft forever + 34: intf6: mtu 1500 qdisc noqueue state UP group default qlen 1000 + link/ether c2:e5:e4:21:a2:78 brd ff:ff:ff:ff:ff:ff + inet6 fe80::c0e5:e4ff:fe21:a278/64 scope link + valid_lft forever preferred_lft forever + 35: intf5@if36: mtu 1500 qdisc noqueue master intf6 state UP group default qlen 1000 + link/ether c2:e5:e4:21:a2:78 brd ff:ff:ff:ff:ff:ff link-netnsid 1 + inet6 fe80::c0e5:e4ff:fe21:a278/64 scope link + valid_lft forever preferred_lft forever + 37: intf8: mtu 1500 qdisc noqueue state UP group default qlen 1000 + link/ether a6:0f:bc:97:d5:4d brd ff:ff:ff:ff:ff:ff + inet6 fe80::f4e0:f3ff:fe8f:37f5/64 scope link + valid_lft forever preferred_lft forever + 38: intf7@if39: mtu 1500 qdisc noqueue master intf8 state UP group default qlen 1000 + link/ether f6:e0:f3:8f:37:f5 brd ff:ff:ff:ff:ff:ff link-netnsid 0 + inet6 fe80::f4e0:f3ff:fe8f:37f5/64 scope link + valid_lft forever preferred_lft forever + 40: vintf8@vintf6: mtu 1500 qdisc noqueue master intf8 state UP group default qlen 1000 + link/ether a6:0f:bc:97:d5:4d brd ff:ff:ff:ff:ff:ff + inet6 fe80::a40f:bcff:fe97:d54d/64 scope link + valid_lft forever preferred_lft forever + 41: vintf6@vintf8: mtu 1500 qdisc noqueue master intf6 state UP group default qlen 1000 + link/ether e6:31:f4:e8:39:38 brd ff:ff:ff:ff:ff:ff + inet6 fe80::e431:f4ff:fee8:3938/64 scope link + valid_lft forever preferred_lft forever + 42: intf16: mtu 1500 qdisc noqueue state UP group default qlen 1000 + link/ether 4e:63:dc:70:fd:e0 brd ff:ff:ff:ff:ff:ff + inet6 fe80::b085:44ff:fed3:599/64 scope link + valid_lft forever preferred_lft forever + 43: intf15@if44: mtu 1500 qdisc noqueue master intf16 state UP group default qlen 1000 + link/ether b2:85:44:d3:05:99 brd ff:ff:ff:ff:ff:ff link-netnsid 0 + inet6 fe80::b085:44ff:fed3:599/64 scope link + valid_lft forever preferred_lft forever + 45: vx_26: mtu 1500 qdisc noqueue master intf16 state UNKNOWN group default qlen 1000 + link/ether 4e:63:dc:70:fd:e0 brd ff:ff:ff:ff:ff:ff + inet6 fe80::4c63:dcff:fe70:fde0/64 scope link + valid_lft forever preferred_lft forever + ``` + # ![master container](https://github.com/J980419xq/Distrinet/blob/master/images/master.png) + >worker: ip a + ``` + 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: ens33: mtu 1500 qdisc fq_codel state UP group default qlen 1000 + link/ether 00:0c:29:ae:98:b2 brd ff:ff:ff:ff:ff:ff + inet 192.168.71.142/24 brd 192.168.71.255 scope global noprefixroute ens33 + valid_lft forever preferred_lft forever + inet6 fe80::20c:29ff:feae:98b2/64 scope link + valid_lft forever preferred_lft forever + 3: docker0: mtu 1500 qdisc noqueue state DOWN group default + link/ether 02:42:5a:e8:15:54 brd ff:ff:ff:ff:ff:ff + inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0 + valid_lft forever preferred_lft forever + 22: admin-br: mtu 1500 qdisc noqueue state UP group default qlen 1000 + link/ether 22:7e:11:52:c0:5c brd ff:ff:ff:ff:ff:ff + inet6 fe80::bc16:11ff:fea9:78a4/64 scope link + valid_lft forever preferred_lft forever + 23: intf2@if24: mtu 1500 qdisc noqueue master admin-br state UP group default qlen 1000 + link/ether e2:94:dc:27:55:9d brd ff:ff:ff:ff:ff:ff link-netnsid 0 + inet6 fe80::e094:dcff:fe27:559d/64 scope link + valid_lft forever preferred_lft forever + 25: intf4@if26: mtu 1500 qdisc noqueue master admin-br state UP group default qlen 1000 + link/ether be:16:11:a9:78:a4 brd ff:ff:ff:ff:ff:ff link-netnsid 1 + inet6 fe80::bc16:11ff:fea9:78a4/64 scope link + valid_lft forever preferred_lft forever + 27: vx_21: mtu 1500 qdisc noqueue master admin-br state UNKNOWN group default qlen 1000 + link/ether 22:7e:11:52:c0:5c brd ff:ff:ff:ff:ff:ff + inet6 fe80::207e:11ff:fe52:c05c/64 scope link + valid_lft forever preferred_lft forever + 28: intf10: mtu 1500 qdisc noqueue state UP group default qlen 1000 + link/ether 2a:54:21:60:79:c5 brd ff:ff:ff:ff:ff:ff + inet6 fe80::9030:e2ff:fedb:b323/64 scope link + valid_lft forever preferred_lft forever + 29: intf9@if30: mtu 1500 qdisc noqueue master intf10 state UP group default qlen 1000 + link/ether 92:30:e2:db:b3:23 brd ff:ff:ff:ff:ff:ff link-netnsid 0 + inet6 fe80::9030:e2ff:fedb:b323/64 scope link + valid_lft forever preferred_lft forever + 31: intf12: mtu 1500 qdisc noqueue state UP group default qlen 1000 + link/ether 6e:c6:34:19:9e:cc brd ff:ff:ff:ff:ff:ff + inet6 fe80::6cc6:34ff:fe19:9ecc/64 scope link + valid_lft forever preferred_lft forever + 32: intf11@if33: mtu 1500 qdisc noqueue master intf12 state UP group default qlen 1000 + link/ether 6e:c6:34:19:9e:cc brd ff:ff:ff:ff:ff:ff link-netnsid 1 + inet6 fe80::6cc6:34ff:fe19:9ecc/64 scope link + valid_lft forever preferred_lft forever + 34: vintf12@vintf10: mtu 1500 qdisc noqueue master intf12 state UP group default qlen 1000 + link/ether b2:c1:38:57:4c:ca brd ff:ff:ff:ff:ff:ff + inet6 fe80::b0c1:38ff:fe57:4cca/64 scope link + valid_lft forever preferred_lft forever + 35: vintf10@vintf12: mtu 1500 qdisc noqueue master intf10 state UP group default qlen 1000 + link/ether 2a:54:21:60:79:c5 brd ff:ff:ff:ff:ff:ff + inet6 fe80::2854:21ff:fe60:79c5/64 scope link + valid_lft forever preferred_lft forever + 36: intf14: mtu 1500 qdisc noqueue state UP group default qlen 1000 + link/ether 0e:fd:28:ce:0e:8d brd ff:ff:ff:ff:ff:ff + inet6 fe80::cfd:28ff:fece:e8d/64 scope link + valid_lft forever preferred_lft forever + 37: intf13@if38: mtu 1500 qdisc noqueue master intf14 state UP group default qlen 1000 + link/ether 0e:fd:28:ce:0e:8d brd ff:ff:ff:ff:ff:ff link-netnsid 1 + inet6 fe80::cfd:28ff:fece:e8d/64 scope link + valid_lft forever preferred_lft forever + 39: vx_26: mtu 1500 qdisc noqueue master intf14 state UNKNOWN group default qlen 1000 + link/ether 86:5e:28:6d:96:ea brd ff:ff:ff:ff:ff:ff + inet6 fe80::845e:28ff:fe6d:96ea/64 scope link + valid_lft forever preferred_lft forever + ``` + # ![worker container](https://github.com/J980419xq/Distrinet/blob/master/images/worker.png) + + + diff --git a/images/cli.png b/images/cli.png new file mode 100755 index 00000000..cc605d9c Binary files /dev/null and b/images/cli.png differ diff --git a/images/master.png b/images/master.png new file mode 100755 index 00000000..e8e475a1 Binary files /dev/null and b/images/master.png differ diff --git a/images/worker.png b/images/worker.png new file mode 100755 index 00000000..a0117de4 Binary files /dev/null and b/images/worker.png differ diff --git a/mininet/bin/dmn b/mininet/bin/dmn index 9c0f031e..684771c7 100755 --- a/mininet/bin/dmn +++ b/mininet/bin/dmn @@ -298,10 +298,16 @@ class DistrinetRunner( object ): dest='test', help='|'.join( TESTS.keys() ) ) opts.add_option( '--xterms', '-x', action='store_true', default=False, help='spawn xterms for each node' ) - opts.add_option( '--ipbase', '-i', type='string', default='10.0.0.0/8', + opts.add_option( '--ipbase', '-i', type='string', default='10.10.1.5/16', help='base IP address for hosts' ) + opts.add_option( '--providerIpbase', '-p', type='string', default='20.20.1.5/16', + help='base Provider IP address for hosts' ) + '''opts.add_option( '--controllIpbase', '-j', type='string', default='10.10.1.6/16', + help='base controll IP address for hosts' )''' opts.add_option( '--mac', action='store_true', default=False, help='automatically set host MACs' ) + opts.add_option('--docker',action='store_true', + default=False,help='replace lxd with docker' ) opts.add_option( '--arp', action='store_true', default=False, help='set all-pairs ARP entries' ) opts.add_option( '--verbosity', '-v', type='choice', @@ -548,6 +554,7 @@ class DistrinetRunner( object ): if opts.workers: workers = opts.workers.split( ',' ) master = workers[0] + workers=workers[1:] if opts.workers or opts.provision or opts.placement_file_path: warn( '*** WARNING: Experimental cloud mode!\n' @@ -555,7 +562,7 @@ class DistrinetRunner( object ): host, switch, link = LxcNode, LxcOVSSwitch, CloudLink ## - adminIpBase='192.168.0.1/8' + adminIpBase='192.168.1.1/16' waitConnected=False build=False if not opts.placement_file_path: @@ -585,7 +592,6 @@ class DistrinetRunner( object ): # get the info from the experiment file jump=experiment_data["bastion"] workers=experiment_data["workers"] - master=workers[0] # use the dummy mapper with the placement in the experiment file mapper= DummyMapper(places=experiment_data["mapping"]) @@ -614,8 +620,10 @@ class DistrinetRunner( object ): switch=switch, host=host, controller=controller, link=link, - ipBase=opts.ipbase, inNamespace=opts.innamespace, + ipBase=opts.ipbase, providerIpBase=opts.providerIpbase, + inNamespace=opts.innamespace, xterms=opts.xterms, autoSetMacs=opts.mac, + autoSetDocker=opts.docker, autoStaticArp=opts.arp, autoPinCpus=opts.pin, listenPort=opts.listenport ) @@ -710,3 +718,4 @@ if __name__ == "__main__": stackTrace = traceback.format_exc() debug( stackTrace + "\n" ) cleanup() + diff --git a/mininet/custom/fat-tree.py b/mininet/custom/fat-tree.py new file mode 100644 index 00000000..73f194a3 --- /dev/null +++ b/mininet/custom/fat-tree.py @@ -0,0 +1,91 @@ +from xml.dom.expatbuilder import theDOMImplementation +from mininet.topo import Topo +from mininet.net import Mininet +from mininet.node import RemoteController +from mininet.link import TCLink +from mininet.util import dumpNodeConnections + +# Standard fattree topology: +# Fat tree is a switch centric topology. Support to expand the number of paths while expanding horizontally; All switches are common devices with the same number of ports, which reduces the network construction cost. +# Specifically, the fattree structure is divided into three layers: core layer (core switch), aggregation layer (aggregation switch) and access layer (edge switch). A K-ary fattree can be summarized into five features: +# 1. Each switch has K ports; +# 2. The core layer is the top layer, with a total of (K/2) ^2 switches; +# 3. There are K pods in total, and each pod is composed of K switches. The aggregation layer and access layer account for K/2 switches respectively; +# 4. Each switch in the access layer can accommodate K/2 servers. thus, the K-ary fattree has a total of K pods, each pod can accommodate K*K/4 servers, and all pods can accommodate K*K*K/4 servers; +# 5. There are K paths between any two pods. +# In other words, there are (K/2)^2+K^2 switches and (K^3)/4 servers in the K-ary fattree topology +# The relationship between the value of K in the fattree topology and the number of switches and servers is summarized as follows: +# --------------------------------------------------------------------------------------------------------- +# Value of K | 2 | 4 | 6 | 8 | 10 | 12 | 14 | 16 | 32 | 64 +# Number of switches | 5 | 20 | 45 | 80 | 125 | 180 | 245 | 320 | 1280| 5120 +# Number of servers | 2 | 16 | 54 | 128 | 250 | 432 | 686 | 1024| 8192| 65536 +# --------------------------------------------------------------------------------------------------------- +# Custom fattree topology: +# To enable this script to support a larger topology, we add an additional parameter S to control the number of servers that each edge switch can connect to. +# In this case, there are (K/2)^2+K^2 switches and (K/2)*K*S servers in the K-ary custom topology (the valus of S in standard fattree topology is K//2). +# The relationship between the value of K, the value of S and the number of switches and servers is summarized as follows: +# --------------------------------------------------------------------------------------------------------- +# Value of K | 2 | 4 | 6 | 8 | 10 | 12 | 14 | 16 | 32 | 64 +# Number of switches | 5 | 20 | 45 | 80 | 125 | 180 | 245 | 320 | 1280| 5120 +# Number of servers | 2S | 8S | 18S | 32S | 50S | 72S | 98S | 128S| 512S| 2048S +# --------------------------------------------------------------------------------------------------------- + +class MyTopo(Topo): + + def __init__(self): + super(MyTopo, self).__init__() + + # K setting + K = 12 + + # S setting + S = K//2 + + + # Marking the number of switch for per level + pod = K + L1 = (pod//2)**2 + L2 = pod*pod//2 + L3 = L2 + + # Starting create the switch + c = [] # core switch + a = [] # aggregate switch + e = [] # edge switch + + # notice: switch label is a special data structure + for i in range(L1): + # label from 1 to n,not start with 0 + c_sw = self.addSwitch('c{}'.format(i+1)) + c.append(c_sw) + + for i in range(L2): + a_sw = self.addSwitch('a{}'.format(L1+i+1)) + a.append(a_sw) + + for i in range(L3): + e_sw = self.addSwitch('e{}'.format(L1+L2+i+1)) + e.append(e_sw) + + # Starting create the link between switchs + # first the L1 level and L2 level link + for i in range(L1): + c_sw = c[i] + start = i % (pod//2) + for j in range(pod): + self.addLink(c_sw, a[start+j*(pod//2)]) + + # second the L2 level and L3 level link + for i in range(L2): + group = i//(pod//2) + for j in range(pod//2): + self.addLink(a[i], e[group*(pod//2)+j]) + + # Starting create the host and create link between switchs and hosts + for i in range(L3): + for j in range(S): + hs = self.addHost('h{}'.format(i*S+j+1)) + self.addLink(e[i], hs) + + +topos = {"mytopo": (lambda: MyTopo())} diff --git a/mininet/mininet/assh.py b/mininet/mininet/assh.py index 67e7c3a6..955703de 100644 --- a/mininet/mininet/assh.py +++ b/mininet/mininet/assh.py @@ -136,6 +136,7 @@ async def _connect(self, host, port): try: async with connect(host=host, port=port) as conn: self.conn = conn + info ("connect for {}@{} via {}:{}: \n".format(self.username, self.host, self.bastion, port)) while self.run: await asyncio.sleep(1) except Exception as e: diff --git a/mininet/mininet/cloudlink.py b/mininet/mininet/cloudlink.py index 5ceb81a5..d2db99fb 100644 --- a/mininet/mininet/cloudlink.py +++ b/mininet/mininet/cloudlink.py @@ -39,7 +39,7 @@ class CloudLink( Link ): def __init__( self, node1, node2, port1=None, port2=None, intfName1=None, intfName2=None, addr1=None, addr2=None, intf=TCIntf, cls1=None, cls2=None, params1=None, - params2=None, fast=True, **params ): + params2=None, fast=True,autoSetDocker=False, **params ): """Create veth link to another node, making two new interfaces. node1: first node node2: second node @@ -88,7 +88,7 @@ def __init__( self, node1, node2, port1=None, port2=None, # Make interfaces interfaces = self.makeIntfPair( intfName1, intfName2, addr1, addr2, - node1, node2, deleteIntfs=False ) + node1, node2, deleteIntfs=False,autoSetDocker=autoSetDocker ) if not cls1: cls1 = intf @@ -133,7 +133,7 @@ def intfName( self, node, n ): @classmethod def makeIntfPair( cls, intfname1, intfname2, addr1=None, addr2=None, - node1=None, node2=None, deleteIntfs=True ): + node1=None, node2=None, deleteIntfs=True,autoSetDocker=False ): """Create pair of interfaces intfname1: name for interface 1 intfname2: name for interface 2 @@ -151,8 +151,8 @@ def makeIntfPair( cls, intfname1, intfname2, addr1=None, addr2=None, raise Exception("Must implement delete interface") # Add the interface on both ends: - br1 = node1.addContainerInterface(intfName=intfname1) - br2 = node2.addContainerInterface(intfName=intfname2) + br1 = node1.addContainerInterface(intfName=intfname1,autoSetDocker=autoSetDocker) + br2 = node2.addContainerInterface(intfName=intfname2,autoSetDocker=autoSetDocker) return (br1, br2) diff --git a/mininet/mininet/distrinet.py b/mininet/mininet/distrinet.py index e5100d53..8d91cd9a 100755 --- a/mininet/mininet/distrinet.py +++ b/mininet/mininet/distrinet.py @@ -139,8 +139,8 @@ def __init__( self, topo=None, switch=LxcSwitch, host=LxcNode, controller=LxcRemoteController, link=CloudLink, intf=TCIntf, mapper=None, build=True, xterms=False, cleanup=False, ipBase='10.0.0.0/8', - adminIpBase='192.168.0.1/8', - autoSetMacs=False, autoPinCpus=False, + adminIpBase='192.168.0.1/8',providerIpBase='172.16.62.1/8', + autoSetMacs=False, autoSetDocker=False,autoPinCpus=False, listenPort=None, waitConnected=False, waitConnectionTimeout=5, jump=None, user="root", client_keys=None, master=None, pub_id=None, **kwargs): @@ -178,6 +178,18 @@ def __init__( self, topo=None, switch=LxcSwitch, host=LxcNode, # Start for address allocation self.nextIP = hostIP if hostIP > 0 else 1 + self.providerIpBase = providerIpBase + #self.controllIpBase = controllIpBase + + '''self.controllIpBaseNum, self.controllPrefixLen = netParse( self.controllIpBase ) + controllIP = ( 0xffffffff >> self.controllPrefixLen ) & self.controllIpBaseNum + # Start for address allocation + self.controllNextIP = controllIP if controllIP > 0 else 1''' + + self.providerIpBaseNum, self.providerPrefixLen = netParse( self.providerIpBase ) + providerIP = ( 0xffffffff >> self.providerPrefixLen ) & self.providerIpBaseNum + # Start for address allocation + self.providerNextIP = providerIP if providerIP > 0 else 1 self.adminIpBase = adminIpBase self.adminIpBaseNum, self.adminPrefixLen = netParse( self.adminIpBase ) @@ -190,6 +202,7 @@ def __init__( self, topo=None, switch=LxcSwitch, host=LxcNode, self.xterms = xterms self.cleanup = cleanup self.autoSetMacs = autoSetMacs + self.autoSetDocker=autoSetDocker # self.autoStaticArp = autoStaticArp self.autoPinCpus = autoPinCpus # self.numCores = numCores() @@ -220,14 +233,14 @@ def runforever(loop): self.client_keys = client_keys self.masterhost = master - _info ("Connecting to master node\n") + info ("Connecting to master node\n") self.masterSsh = ASsh(loop=self.loop, host=self.masterhost, username=self.user, bastion=self.jump, client_keys=self.client_keys) self.masterSsh.connect() self.masterSsh.waitConnected() - _info ("connected to master node\n") - + info ("connected to master node\n") + self.connectedToAdminNetwork=set() self.nameToNode = {} # name to Node (Host/Switch) objects self.terms = [] # list of spawned xterm processes @@ -387,7 +400,7 @@ def addLink( self, node1, node2, port1=None, port2=None, # Set default MAC - this should probably be in Link options.setdefault( 'addr1', self.randMac() ) options.setdefault( 'addr2', self.randMac() ) - + options.setdefault('autoSetDocker',self.autoSetDocker) params1 = None params2 = None if self.mapper: @@ -460,7 +473,8 @@ def buildFromTopo( self, topo=None ): _ip = "{}/{}".format(ipAdd(self.adminNextIP, ipBaseNum=self.adminIpBaseNum, prefixLen=self.adminPrefixLen), self.adminPrefixLen) self.adminNextIP += 1 self.host.createMasterAdminNetwork(self.masterSsh, brname="admin-br", ip=_ip) - _info (" admin network created on {}\n".format(self.masterhost)) + self.connectedToAdminNetwork.add(self.masterhost) + info (" admin network created on {}\n".format(self.masterhost)) assert (isinstance(self.controllers, list)) @@ -485,11 +499,11 @@ def buildFromTopo( self, topo=None ): # == Hosts =========================================================== for hostName in topo.hosts(): - _ip = "{}/{}".format(ipAdd( self.adminNextIP, ipBaseNum=self.adminIpBaseNum, prefixLen=self.adminPrefixLen),self.adminPrefixLen) - self.adminNextIP += 1 + ''' _ip = "{}/{}".format(ipAdd( self.adminNextIP, ipBaseNum=self.adminIpBaseNum, prefixLen=self.adminPrefixLen),self.adminPrefixLen) + self.adminNextIP += 1''' # __ip= newAdminIp(admin_ip) + self.addHost( name=hostName, - admin_ip= _ip, loop=self.loop, master=self.masterSsh, username=self.user, @@ -501,10 +515,9 @@ def buildFromTopo( self, topo=None ): info( '\n*** Adding switches:\n' ) for switchName in topo.switches(): - _ip = "{}/{}".format(ipAdd( self.adminNextIP, ipBaseNum=self.adminIpBaseNum, prefixLen=self.adminPrefixLen),self.adminPrefixLen) - self.adminNextIP += 1 + '''_ip = "{}/{}".format(ipAdd( self.adminNextIP, ipBaseNum=self.adminIpBaseNum, prefixLen=self.adminPrefixLen),self.adminPrefixLen) + self.adminNextIP += 1''' self.addSwitch( name=switchName, - admin_ip=_ip, loop=self.loop, master=self.masterSsh, username=self.user, @@ -518,7 +531,7 @@ def buildFromTopo( self, topo=None ): if not waitStart: nodes = self.hosts + self.switches - _info ("[starting\n") + info ("[starting\n") for node in nodes: _info ("connectTarget {} ".format( node.name)) node.connectTarget() @@ -530,57 +543,92 @@ def buildFromTopo( self, topo=None ): count = 0 for node in nodes: _info ("createContainer {} ".format( node.name)) - node.createContainer() + _ip = "{}/{}".format(ipAdd( self.providerNextIP, ipBaseNum=self.providerIpBaseNum, prefixLen=self.providerPrefixLen),self.providerPrefixLen) + self.providerNextIP += 1 + node.createContainer(autoSetDocker=self.autoSetDocker,providerIP=_ip) count += 1 - if count > 50: - output("50 nodes created...\n") + if count > 100: + output("100 nodes created...\n") sleep(10) count = 0 - + for node in nodes: node.waitCreated() _info ("createdContainer {} ".format(node.name)) - + info ("nodes created\n") + + cmds = [] + for node in nodes: + if node.target not in self.connectedToAdminNetwork: + _ip = "{}/{}".format(ipAdd( self.adminNextIP, ipBaseNum=self.adminIpBaseNum, prefixLen=self.adminPrefixLen),self.adminPrefixLen) + self.adminNextIP += 1 + cmds = cmds + node.connectToAdminNetwork(admin_ip=_ip,master=node.masternode.host, target=node.target, link_id=CloudLink.newLinkId(), admin_br="admin-br", wait=False) + self.connectedToAdminNetwork.add(node.target) + if len (cmds) > 0: + cmd = ';'.join(cmds) + self.masterSsh.cmd(cmd) + sleep(10) + + count=0 for node in nodes: _info ("create admin interface {} ".format( node.name)) - node.addContainerInterface(intfName="admin", brname="admin-br", wait=False) + node.addContainerInterface(intfName="admin", brname="admin-br", wait=False,autoSetDocker=self.autoSetDocker) + count+=1 + if count>100: + sleep(10) + count=0 for node in nodes: node.targetSshWaitOutput() _info ("admin interface created on {} ".format( node.name)) _info ("\n") - cmds = [] - for node in nodes: - cmds = cmds + node.connectToAdminNetwork(master=node.masternode.host, target=node.target, link_id=CloudLink.newLinkId(), admin_br="admin-br", wait=False) - if len (cmds) > 0: - cmd = ';'.join(cmds) - self.masterSsh.cmd(cmd) - + count=0 for node in nodes: - node.configureContainer(wait=False) + _ip = "{}/{}".format(ipAdd( self.adminNextIP, ipBaseNum=self.adminIpBaseNum, prefixLen=self.adminPrefixLen),self.adminPrefixLen) + self.adminNextIP += 1 + '''_controllIp = "{}/{}".format(ipAdd( self.controllNextIP, ipBaseNum=self.controllIpBaseNum, prefixLen=self.controllPrefixLen),self.controllPrefixLen) + self.controllNextIP += 1''' + node.configureContainer(admin_ip=_ip,wait=False,autoSetDocker=self.autoSetDocker) + count+=1 + if count>100: + sleep(10) + count=0 + for node in nodes: node.targetSshWaitOutput() for node in nodes: - _info ("connecting {} ".format( node.name)) + info ("connecting {} ".format( node.name)) node.connect() - for node in nodes: node.waitConnected() - _info ("connected {} ".format( node.name)) - + info ("connected {} ".format( node.name)) + '''info ("starting nova compute") + for host in self.hosts: + host.startNovacompute() + info ("started nova compute")''' + count=0 for node in nodes: - _info ("startshell {} ".format( node.name) ) + info ("startshell {} ".format( node.name) ) node.asyncStartShell() + count+=1 + if count>100: + sleep(10) + count=0 for node in nodes: node.waitStarted() - _info ("startedshell {}".format( node.name)) - + info ("startedshell {}".format( node.name)) + + count=0 for node in nodes: - _info ("finalize {}".format( node.name)) + info ("finalize {}".format( node.name)) node.finalizeStartShell() - _info ("\n") + count+=1 + if count>100: + sleep(10) + count=0 + info ("\n") info( '\n*** Adding links:\n' ) for srcName, dstName, params in topo.links( @@ -688,12 +736,12 @@ def stop( self ): info( switch.name + ' ' ) if switch not in stopped: switch.stop() - switch.terminate() + switch.terminate(autoSetDocker=self.autoSetDocker) info( '\n' ) info( '*** Stopping %i hosts\n' % len( self.hosts ) ) for host in self.hosts: info( host.name + ' ' ) - host.terminate() + host.terminate(autoSetDocker=self.autoSetDocker) info( '*** Stopping %i controllers\n' % len( self.controllers ) ) for controller in self.controllers: @@ -703,6 +751,7 @@ def stop( self ): info( '*** cleaning master\n' ) # XXX DSA need to find something nicer + self.masterSsh.cmd("ip link delete admin-br") for node in self.hosts + self.switches + self.controllers: _info ("wait {} ".format( node )) node.targetSshWaitOutput() @@ -873,5 +922,3 @@ def configureRoutedControlNetwork( self, ip='192.168.123.1', error( '*** Error: control network test failed\n' ) exit( 1 ) info( '\n' ) - - diff --git a/mininet/mininet/lxc_container.py b/mininet/mininet/lxc_container.py index 933e8c74..d05ee6d9 100644 --- a/mininet/mininet/lxc_container.py +++ b/mininet/mininet/lxc_container.py @@ -38,7 +38,6 @@ def genIntfName(): class LxcNode (Node): """ SSH node - Attributes ---------- name : str @@ -75,7 +74,6 @@ class LxcNode (Node): STDOUT of the process stderr : asyncssh.stream.SSHReader STDERR of the process - master : ASsh SSH connection to the master containerInterfaces : dict @@ -85,8 +83,7 @@ class LxcNode (Node): adminNetworkCreated = False connectedToAdminNetwork = {} - def __init__(self, name, loop, - admin_ip, + def __init__(self, name, loop, master, target=None, port=22, username=None, pub_id=None, bastion=None, bastion_port=22, client_keys=None, @@ -123,7 +120,6 @@ def __init__(self, name, loop, """ # == distrinet self._preInit(loop=loop, - admin_ip=admin_ip, master=master, target=target, port=port, username=username, pub_id=pub_id, bastion=bastion, bastion_port=bastion_port, client_keys=client_keys, @@ -138,7 +134,6 @@ def __init__(self, name, loop, def _preInit(self, loop, - admin_ip, master, target=None, port=22, username=None, pub_id=None, bastion=None, bastion_port=22, client_keys=None, @@ -157,13 +152,11 @@ def _preInit(self, self.username = username self.pub_id = pub_id self.client_keys = client_keys - # ssh bastion information self.bastion = bastion self.bastion_port = bastion_port # IP address to use to administrate the machine - self.admin_ip = admin_ip self.masternode = master self.containerInterfaces = {} @@ -183,12 +176,13 @@ def _preInit(self, if self.target: self.targetSsh = ASsh(loop=self.loop, host=self.target, username=self.username, bastion=self.bastion, client_keys=self.client_keys) # SSH with the node - admin_ip = self.admin_ip + '''admin_ip = seddlf.admin_ip if "/" in admin_ip: - admin_ip, prefix = admin_ip.split("/") - self.ssh = ASsh(loop=self.loop, host=admin_ip, username=self.username, bastion=self.bastion, client_keys=self.client_keys) + admin_ip, prefix = admin_ip.split("/")''' + self.admin_ip=None + self.ssh = None - def configureContainer(self, adminbr="admin-br", wait=True): + def configureContainer(self, admin_ip, adminbr="admin-br", wait=True,autoSetDocker=False): # # connect the node to the admin network # self.addContainerInterface(intfName="admin", brname=adminbr) @@ -202,14 +196,28 @@ def configureContainer(self, adminbr="admin-br", wait=True): # configure the node to be "SSH'able" cmds = [] + self.admin_ip=admin_ip + if "/" in admin_ip: + admin_ip, prefix = admin_ip.split("/") + '''if "/" in controll_ip: + controll_ip, prefix = controll_ip.split("/")''' + self.ssh = ASsh(loop=self.loop, host=admin_ip, username=self.username, bastion=self.bastion, client_keys=self.client_keys) + if autoSetDocker: + cmds.append("docker exec {} mkdir /root/.ssh".format(self.name)) + cmds.append("docker exec {} bash -c 'echo \"{}\" >> /root/.ssh/authorized_keys'".format(self.name, self.pub_id)) + cmds.append("docker exec {} service ssh start".format(self.name)) + cmds.append("docker exec {} ifconfig admin {}".format(self.name,self.admin_ip)) + '''if self.image=="ubuntu": + print("docker network connect --ip {} network10 {}\n".format(controll_ip,self.name)) + cmds.append("docker network connect --ip {} network10 {}".format(controll_ip,self.name))''' # configure the container to have + else: # an admin IP address - cmds.append("lxc exec {} -- ifconfig admin {}".format(self.name, self.admin_ip)) + cmds.append("lxc exec {} -- ifconfig admin {}".format(self.name, self.admin_ip)) # a public key - cmds.append("lxc exec {} -- bash -c 'echo \"{}\" >> /root/.ssh/authorized_keys'".format(self.name, self.pub_id)) - # a ssh server - cmds.append("lxc exec {} -- service ssh start".format(self.name)) - + cmds.append("lxc exec {} -- bash -c 'echo \"{}\" >> /root/.ssh/authorized_keys'".format(self.name, self.pub_id)) +# a ssh server + cmds.append("lxc exec {} -- service ssh start".format(self.name)) cmd = ';'.join(cmds) if wait: self.targetSsh.cmd(cmd) @@ -224,8 +232,8 @@ def createMasterAdminNetwork(cls, master, brname="admin-br", ip="192.168.42.1/24 cmd = ";".join(cmds) master.cmd(cmd) - import re + import re def _findNameIP(self, name): """ Resolves name to IP as seen by the eyeball @@ -301,31 +309,31 @@ def createContainerLinkCommandList(self, target1, target2, vxlan_id, vxlan_name, self.devices.append(bridge2) return cmds - def connectToAdminNetwork(self, master, target, link_id, admin_br, wait=True, **params): + def connectToAdminNetwork(self, admin_ip,master, target, link_id, admin_br, wait=True, **params): cmds = [] - if not self.target in self.__class__.connectedToAdminNetwork: - self.__class__.connectedToAdminNetwork[self.target] = True + cmds.append("brctl addbr admin-br") + cmds.append("ifconfig admin-br {}".format(admin_ip)) - # no need to connect admin on the same machine or if it is already connected - vxlan_name = "vx_{}".format(link_id) + # no need to connect admin on the same machine or if it is already connected + vxlan_name = "vx_{}".format(link_id) - # locally - # DSA - TODO - XXX beurk bridge2 = None - cmds = self.createContainerLinkCommandList(target, master, link_id, vxlan_name, bridge1=admin_br, bridge2=None) - cmd = ';'.join(cmds) + # locally + # DSA - TODO - XXX beurk bridge2 = None + cmds =cmds + self.createContainerLinkCommandList(target, master, link_id, vxlan_name, bridge1=admin_br, bridge2=None) + cmd = ';'.join(cmds) - if wait: - self.targetSsh.cmd(cmd) - else: - self.targetSsh.sendCmd(cmd) + if wait: + self.targetSsh.cmd(cmd) + else: + self.targetSsh.sendCmd(cmd) - # on master - # DSA - TODO - XXX beurk bridge2 = None - cmds = self.createContainerLinkCommandList(master, target, link_id, vxlan_name, bridge1=admin_br, bridge2=None) - cmd = ';'.join(cmds) - self.devicesMaster.append(vxlan_name) + # on master + # DSA - TODO - XXX beurk bridge2 = None + cmds = self.createContainerLinkCommandList(master, target, link_id, vxlan_name, bridge1=admin_br, bridge2=None) + cmd = ';'.join(cmds) + self.devicesMaster.append(vxlan_name) - self.devices.append(vxlan_name) + self.devices.append(vxlan_name) # print ("master".format(vxlan_name),cmd) # if wait: # self.masternode.cmd(cmd) @@ -338,27 +346,48 @@ def connectTarget(self): def waitConnectedTarget(self): self.targetSsh.waitConnected() - def createContainer(self, **params): + def createContainer(self,autoSetDocker=False,providerIP=None, **params): ################################################################################ time.sleep(1.0) info ("create container ({} {} {}) ".format(self.image, self.cpu, self.memory)) cmds = [] + providerIP, prefix = providerIP.split("/") # initialise the container - cmd = "lxc init {} {} < /dev/null ".format(self.image, self.name) - info ("{}\n".format(cmd)) + if autoSetDocker: + if self.image=="ubuntu": + ##--privileged=true --init --cap-add=NET_ADMIN --cap-add=SYS_MODULE --cap-add=SYS_NICE jiawei96liu/cnimage:v3 bash + cmd = "docker create -it --privileged --cap-add=NET_ADMIN --cap-add=SYS_MODULE --cap-add=SYS_NICE --init --net network20 --ip {} --name {} -h {} {} ".format(providerIP, self.name, self.name, self.image) + else: + cmd="docker create -it --privileged --cap-add=NET_ADMIN --cap-add=SYS_MODULE --cap-add=SYS_NICE --net=none --name {} -h {} {} ".format(self.name, self.name, self.image) + else: + cmd = "lxc init {} {} < /dev/null ".format(self.image, self.name) + info("{}\n".format(cmd)) cmds.append(cmd) - # limit resources - if self.cpu: - cmds.append("lxc config set {} limits.cpu {}".format(self.name, self.cpu)) - if self.memory: - cmds.append("lxc config set {} limits.memory {}".format(self.name, self.memory)) - - # start the container - cmds.append("lxc start {}".format(self.name)) - + if autoSetDocker: + cmds.append("docker start {}".format(self.name)) + if self.cpu: + #cmds.append("lxc config set {} limits.cpu {}".format(self.name, self.cpu)) + cmds.append("docker container update --cpuset-cpus={} {}".format(self.cpu, self.name)) + if self.memory: + #cmds.append("lxc config set {} limits.memory {}".format(self.name, self.memory)) + cmds.append("docker container update -m {} {}".format(self.memory, self.name)) + + if self.image=="switch": + cmds.append("docker exec {} bash -c 'export PATH=$PATH:/usr/share/openvswitch/scripts;ovs-ctl start'".format(self.name)) + else: + if self.cpu: + cmds.append("lxc config set {} limits.cpu {}".format(self.name, self.cpu)) + if self.memory: + cmds.append("lxc config set {} limits.memory {}".format(self.name, self.memory)) + # start the container + cmds.append("lxc start {}".format(self.name)) cmd = ";".join(cmds) self.targetSsh.sendCmd(cmd) + def startNovacompute(self): + cmd="python3 docker_configer_container_cmd.py" + self.ssh.sendCmd(cmd) + def targetSshWaitOutput(self): """ Wait for output on targetSsh @@ -371,20 +400,29 @@ def waitCreated(self): info ("container created") - def addContainerInterface(self, intfName, devicename=None, brname=None, wait=True, **params): + def addContainerInterface(self, intfName, devicename=None, brname=None, wait=True, autoSetDocker=False,**params): """ Add the interface with name intfName to the container that is associated to the bridge named name-intfName-br on the host """ + cmds=[] if devicename is None: devicename = genIntfName() if brname is None: brname = genIntfName() - cmds = [] - cmds.append("brctl addbr {}".format(brname)) - cmds.append("lxc network attach {} {} {} {}".format(brname, self.name, devicename, intfName)) + cmds.append("brctl addbr {}".format(brname)) + if autoSetDocker: + cmds.append("ip link add {} type veth peer name {}".format("veth"+devicename,devicename)) + cmds.append("brctl addif {} {}".format(brname,devicename)) + cmds.append("ip link set up {}".format(devicename)) + cmds.append("{}=$(docker inspect -f '{{{{.State.Pid}}}}' {})".format(self.name,self.name)) + cmds.append("ln -s /proc/{}/ns/net /var/run/netns/${}".format("$"+self.name,self.name)) + cmds.append("ip link set {} netns ${}".format("veth"+devicename,self.name)) + cmds.append("ip netns exec ${} ip link set dev {} name {}".format(self.name,"veth"+devicename,intfName)) + cmds.append("ip netns exec ${} ip link set {} up".format(self.name,intfName)) + else: + cmds.append("lxc network attach {} {} {} {}".format(brname, self.name, devicename, intfName)) cmds.append("ip link set up {}".format(brname)) - cmd = ";".join(cmds) if wait: @@ -511,14 +549,16 @@ def cleanup( self ): # Subshell I/O, commands and control # XXX - OK - def terminate( self ): + def terminate( self ,autoSetDocker=False): "Send kill signal to Node and clean up after it." self.unmountPrivateDirs() cmds = [] # destroy the container - cmds.append("lxc delete {} --force".format(self.name)) - + if autoSetDocker: + cmds.append("docker rm -f {}".format(self.name)) + else: + cmds.append("lxc delete {} --force".format(self.name)) # remove all locally made devices for device in self.devices: cmds.append("ip link delete {}".format(device)) diff --git a/mininet/mininet/provision/playbooks/configure-docker.yml b/mininet/mininet/provision/playbooks/configure-docker.yml new file mode 100644 index 00000000..4c0787c3 --- /dev/null +++ b/mininet/mininet/provision/playbooks/configure-docker.yml @@ -0,0 +1,16 @@ +--- + +- hosts : all + remote_user: root + tasks : + - name : create switch image + command: docker pull jiawei96liu/hificnet-switch:generic-v1 + + - name : create ubuntu18.04 image + command: docker pull jiawei96liu/hificnet-ubuntu:openstack-v1 + + - name : update ubuntu tag + command: docker tag jiawei96liu/hificnet-ubuntu:openstack-v1 ubuntu + + - name : update switch tag + command: docker tag jiawei96liu/hificnet-switch:generic-v1 switch diff --git a/mininet/mininet/provision/playbooks/configure-lxd-no-clustering.yml b/mininet/mininet/provision/playbooks/configure-lxd-no-clustering.yml index 87405346..cf1a47c9 100644 --- a/mininet/mininet/provision/playbooks/configure-lxd-no-clustering.yml +++ b/mininet/mininet/provision/playbooks/configure-lxd-no-clustering.yml @@ -12,15 +12,19 @@ (?i)Would you like to use LXD clustering? : "no" (?i)Do you want to configure a new storage pool? : "yes" (?i)Name of the new storage pool : "default" - (?i)Name of the storage backend to use : "dir" + (?i)Name of the storage backend to use : "zfs" + (?i)Create a new BTRFS pool? : "yes" + (?i)Create a new ZFS pool : "yes" + (?i)Would you like to use an existing block device? : "no" + (?i)Would you like to use an existing empty block device : "no" (?i)Would you like to connect to a MAAS server? : "no" - (?i)Would you like to create a new local network bridge? : "yes" - (?i)What should the new bridge be called? : "lxdbr0" - (?i)What IPv4 address should be used? : "auto" - (?i)What IPv6 address should be used? : "auto" - (?i)Would you like LXD to be available over the network? : "no" - (?i)Would you like stale cached images to be updated automatically? : "yes" - (?i)Would you like a YAML "lxd init" preseed to be printed? : "no" + (?i)Size in GB of the new loop device : "15GB" + (?i)Would you like to configure LXD to use an existing bridge or host interface?: "no" + (?i)Would you like to create a new local network bridge? : "no" + (?i)Would you like LXD to be available over the network : "no" + (?i)Would you like the LXD server to be available over the network : "no" + (?i)Would you like stale cached images to be updated automatically? : "no" + (?i)Would you like a YAML : "no" - hosts : all @@ -36,7 +40,7 @@ src : ~/ubuntu.tar.gz dest : ~/ubuntu.tar.gz -### - name : distribute debian image +# - name : distribute debian image # copy : # src : ~/debian.tar.gz # dest : ~/debian.tar.gz @@ -52,7 +56,7 @@ # dest : ~/ubuntu-hadoop-slave.tar.gz -# - name : distribute onos image +# - name : distribute onos image # copy : # src : ~/ubuntu-onos-2.1.0.tar.gz # dest : ~/ubuntu-onos-2.1.0.tar.gz diff --git a/mininet/mininet/provision/playbooks/deploy_compute.yml b/mininet/mininet/provision/playbooks/deploy_compute.yml new file mode 100644 index 00000000..6014780c --- /dev/null +++ b/mininet/mininet/provision/playbooks/deploy_compute.yml @@ -0,0 +1,10 @@ +--- + +- hosts : workers + gather_facts: false + remote_user: root + tasks : + - name : update ubuntu tag + command: python3 docker_configer_container_cmd.py + + diff --git a/mininet/mininet/provision/playbooks/docker-nova-compute-auto.sh b/mininet/mininet/provision/playbooks/docker-nova-compute-auto.sh new file mode 100755 index 00000000..61c1fd85 --- /dev/null +++ b/mininet/mininet/provision/playbooks/docker-nova-compute-auto.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +function shX(){ + CMD=$* + echo -e "\e[32m[$(date +"%F %T")] " EXEC: ${CMD} " \e[0m" + ${CMD} +} + + +container_id=`docker ps -a | awk '{print $10}' | grep h` +array=(`echo $container_id | tr '\n' ' '` ) +for var in ${array[@]} +do + #echo $var + shX docker exec ${var} sh /root/nova_compute_auto_restart.sh & +done diff --git a/mininet/mininet/provision/playbooks/install-docker.yml b/mininet/mininet/provision/playbooks/install-docker.yml new file mode 100644 index 00000000..a41d70f8 --- /dev/null +++ b/mininet/mininet/provision/playbooks/install-docker.yml @@ -0,0 +1,47 @@ +--- +# + +- hosts : all + remote_user: root + tasks : + - name: install python3-pip + apt : + update_cache: true + name : python3-pip + + - name: install docker + apt : + name: docker.io + + - name: install htop + apt : + name: htop + + - name: install ethtool + apt : + name: ethtool + + - name: install bridge-utils + apt : + name: bridge-utils + + - name: install net-tools + apt : + name: net-tools + + - name: install pexpect + pip : + name: pexpect + + - name: install ovs + apt : + name: openvswitch-switch + +# - name: install btrfs-tools +# apt: +# name: btrfs-tools + + - name: install ryu + apt: + name: python3-ryu + diff --git a/mininet/mininet/provision/playbooks/prepare-ansiable.sh b/mininet/mininet/provision/playbooks/prepare-ansiable.sh new file mode 100755 index 00000000..039ebeb7 --- /dev/null +++ b/mininet/mininet/provision/playbooks/prepare-ansiable.sh @@ -0,0 +1,22 @@ +#/bin/bash +if [[ $# -ne 2 ]];then + echo "Usage: ./prepare-ansiable.sh {base-ip} {ip-num}" +else + base_ip=$1 + base_ip_array=(`echo $base_ip | tr '.' ' '` ) + ip_num=$2 + + echo "[master]" > /etc/ansible/hosts + echo "127.0.0.1 ansible_connection=local ansible_python_interpreter=/usr/bin/python3" >> /etc/ansible/hosts + echo "[workers]" >> /etc/ansible/hosts + + for ((cnt=0; cnt<${ip_num}; cnt++)) + do + let temp=${base_ip_array[3]}+cnt + let my_ip_1=${temp}%256 + let my_ip_2=${base_ip_array[2]}+${temp}/256 + my_ip=${base_ip_array[0]}.${base_ip_array[1]}.${my_ip_2}.${my_ip_1} + #echo $my_ip + echo "${my_ip} ansible_ssh_extra_args='-o StrictHostKeyChecking=no' ansible_python_interpreter=/usr/bin/python3" >> /etc/ansible/hosts + done +fi diff --git a/mininet/mininet/provision/playbooks/prepare-docker-net.sh b/mininet/mininet/provision/playbooks/prepare-docker-net.sh new file mode 100755 index 00000000..6d0e8702 --- /dev/null +++ b/mininet/mininet/provision/playbooks/prepare-docker-net.sh @@ -0,0 +1,13 @@ +ip addr flush eno3 +#docker network create --subnet 20.20.0.0/16 --gateway 20.20.0.4 network20 +brctl addif br-a149c07e9972 eno3 +ping 20.20.0.1 -c 2 -I br-a149c07e9972 +docker network rm network10 +#ip addr flush eno4 +#docker network create --subnet 10.10.0.0/16 --gateway 10.10.0.4 network10 +#brctl addif br-e91b254f36d6 eno4 +#ping 10.10.0.1 -c 2 -I br-e91b254f36d6 +sysctl net.ipv4.conf.all.forwarding=1 +iptables --policy FORWARD ACCEPT +mkdir /var/run/netns +ulimit -n 196835 diff --git a/mininet/mininet/provision/playbooks/prepare-openstack.sh b/mininet/mininet/provision/playbooks/prepare-openstack.sh new file mode 100644 index 00000000..15e7e4cc --- /dev/null +++ b/mininet/mininet/provision/playbooks/prepare-openstack.sh @@ -0,0 +1,19 @@ +#controller +brctl addbr admin-br &&\ +ifconfig admin-br 192.168.0.1/16 &&\ +tunctl -t admin &&\ +brctl addif admin-br admin &&\ +ifconfig admin 192.168.0.3/16 &&\ +ip link set admin-br up &&\ +ip link set admin up + +ip link delete vx_00 &&\ +ip link add vx_00 type vxlan id 00 remote 172.16.50.8 local 172.16.50.3 dstport 4789 &&\ +ip link set up vx_00 &&\ +brctl addif admin-br vx_00 &&\ +ip link set up admin-br +#master +ip link add vx_00 type vxlan id 00 remote 172.16.50.3 local 172.16.50.8 dstport 4789 &&\ +ip link set up vx_00 &&\ +brctl addif admin-br vx_00 &&\ +ip link set up admin-br diff --git a/openstack/fake.py b/openstack/fake.py new file mode 100644 index 00000000..ad178f5b --- /dev/null +++ b/openstack/fake.py @@ -0,0 +1,1199 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A fake (in-memory) hypervisor+api. + +Allows nova testing w/o a hypervisor. This module also documents the +semantics of real hypervisor connections. + +""" +import netaddr +import collections +import contextlib +import time +import uuid + +import fixtures +import os_resource_classes as orc +from oslo_log import log as logging +from oslo_serialization import jsonutils +from oslo_utils import versionutils + +from nova.compute import power_state +from nova.compute import task_states +from nova.compute import vm_states +import nova.conf +from nova.console import type as ctype +from nova import context as nova_context +from nova import exception +from nova import objects +from nova.objects import diagnostics as diagnostics_obj +from nova.objects import fields as obj_fields +from nova.objects import migrate_data +from nova.virt import driver +from nova.virt import hardware +from nova.virt.ironic import driver as ironic +from nova.virt import virtapi +from oslo_concurrency import processutils +from nova import utils +import re + +CONF = nova.conf.CONF + +LOG = logging.getLogger(__name__) + +def execute_wrapper(args): + LOG.info( + "fake running command: %s", + " ".join(str(arg) for arg in args), + ) + #root_helper = utils.get_root_helper() + try: + return processutils.execute(*args) + except Exception as e: + LOG.error( + "fake Unable to execute %(cmd)s. Exception: %(exception)s", + {"cmd": args, "exception": e}, + ) + raise + +@nova.privsep.sys_admin_pctxt.entrypoint +def add_namespace(ns): + full_args = ["ip", "netns", "add", ns] + execute_wrapper(full_args) + full_args = ["ip", "netns", "exec", ns, "ip", "link", "set", "lo", "up"] + execute_wrapper(full_args) + +@nova.privsep.sys_admin_pctxt.entrypoint +def delete_namespace(ns): + # deleting namespace will delete its ports and veth pairs + full_args = ["ip", "netns", "del", ns] + execute_wrapper(full_args) + +@nova.privsep.sys_admin_pctxt.entrypoint +def add_port_ip_addresses(ns, ovs_port, ip_addresses): + for address in ip_addresses: + full_args = ["ip", "netns", "exec", ns, + "ip", "addr", "add", address, "dev", ovs_port] + execute_wrapper(full_args) + +@nova.privsep.sys_admin_pctxt.entrypoint +def add_port(ns, bridge, ovs_port, port_id, mac_address): + full_args = ["ovs-vsctl", "--may-exist", "add-port", bridge, ovs_port, + "--", "set", "Interface", ovs_port, "type=internal", + "--", "set", "Interface", ovs_port, + "external_ids:iface-id=%s" % port_id, + "--", "set", "Interface", ovs_port, + "external-ids:iface-status=active", + "--", "set", "Interface", ovs_port, + "external-ids:attached-mac=%s" % mac_address] + execute_wrapper(full_args) + full_args = ["ip", "link", "set", ovs_port, "netns", ns] + execute_wrapper(full_args) + namespace = ["ip", "netns", "exec", ns] + full_args = namespace + ["ip", "link", "set", ovs_port, "up"] + execute_wrapper(full_args) + namespace = ["ip", "netns", "exec", ns] + full_args = namespace + ["ip", "link", "set", ovs_port, "address", mac_address] + execute_wrapper(full_args) + +@nova.privsep.sys_admin_pctxt.entrypoint +def delete_port(ns, bridge, ovs_port): + full_args = ["ovs-vsctl", "--if-exists", "del-port", bridge, ovs_port] + execute_wrapper(full_args) + +@nova.privsep.sys_admin_pctxt.entrypoint +def add_route(ns, ip_addresses): + namespace = ["ip", "netns", "exec", ns] + for address in ip_addresses: + m=re.search(r'(\d+).(\d+).(\d+).\d+/(\d+)',address) + if m.group(4)=="8": + ip=m.group(1)+".0.0.1" + elif m.group(4)=="16": + ip=m.group(1)+"."+m.group(2)+".0.1" + elif m.group(4)=="24": + ip=m.group(1)+"."+m.group(2)+"."+m.group(3)+".1" + full_args=namespace+["route","add","default","gw",ip] + execute_wrapper(full_args) + +class FakeInstance(object): + + def __init__(self, name, state, uuid): + LOG.info("---fake---instance_init---") + self.name = name + self.state = state + self.uuid = uuid + + def __getitem__(self, key): + return getattr(self, key) + + +class Resources(object): + vcpus = 0 + memory_mb = 0 + local_gb = 0 + vcpus_used = 0 + memory_mb_used = 0 + local_gb_used = 0 + + def __init__(self, vcpus=8, memory_mb=8000, local_gb=500): + LOG.info("---fake---resource_init----") + self.vcpus = vcpus + self.memory_mb = memory_mb + self.local_gb = local_gb + + def claim(self, vcpus=0, mem=0, disk=0): + self.vcpus_used += vcpus + self.memory_mb_used += mem + self.local_gb_used += disk + + def release(self, vcpus=0, mem=0, disk=0): + self.vcpus_used -= vcpus + self.memory_mb_used -= mem + self.local_gb_used -= disk + + def dump(self): + return { + 'vcpus': self.vcpus, + 'memory_mb': self.memory_mb, + 'local_gb': self.local_gb, + 'vcpus_used': self.vcpus_used, + 'memory_mb_used': self.memory_mb_used, + 'local_gb_used': self.local_gb_used + } + + +class FakeDriver(driver.ComputeDriver): + # These must match the traits in + # nova.tests.functional.integrated_helpers.ProviderUsageBaseTestCase + capabilities = { + "has_imagecache": True, + "supports_evacuate": True, + "supports_migrate_to_same_host": False, + "supports_attach_interface": True, + "supports_device_tagging": True, + "supports_tagged_attach_interface": True, + "supports_tagged_attach_volume": True, + "supports_extend_volume": True, + "supports_multiattach": True, + "supports_trusted_certs": True, + "supports_pcpus": False, + "supports_accelerators": True, + "supports_remote_managed_ports": True, + + # Supported image types + "supports_image_type_raw": True, + "supports_image_type_vhd": False, + } + + # Since we don't have a real hypervisor, pretend we have lots of + # disk and ram so this driver can be used to test large instances. + vcpus = 1000 + memory_mb = 800000 + local_gb = 600000 + + """Fake hypervisor driver.""" + + def __init__(self, virtapi, read_only=False): + LOG.info("------fakedriver_init------") + super(FakeDriver, self).__init__(virtapi) + self.instances = {} + self.resources = Resources( + vcpus=self.vcpus, + memory_mb=self.memory_mb, + local_gb=self.local_gb) + self.host_status_base = { + 'hypervisor_type': 'fake', + 'hypervisor_version': versionutils.convert_version_to_int('1.0'), + 'hypervisor_hostname': CONF.host, + 'cpu_info': {}, + 'disk_available_least': 0, + 'supported_instances': [( + obj_fields.Architecture.X86_64, + obj_fields.HVType.FAKE, + obj_fields.VMMode.HVM)], + 'numa_topology': None, + } + self._mounts = {} + self._interfaces = {} + self.active_migrations = {} + self._host = None + self._nodes = None + + def init_host(self, host): + self._host = host + # NOTE(gibi): this is unnecessary complex and fragile but this is + # how many current functional sample tests expect the node name. + self._nodes = (['fake-mini'] if self._host == 'compute' + else [self._host]) + + def _set_nodes(self, nodes): + # NOTE(gibi): this is not part of the driver interface but used + # by our tests to customize the discovered nodes by the fake + # driver. + self._nodes = nodes + + def list_instances(self): + return [self.instances[uuid].name for uuid in self.instances.keys()] + + def list_instance_uuids(self): + return list(self.instances.keys()) + + def get_ip_addresses(self, vif): + addresses = [] + network = vif.get("network", {}) + for subnet in network.get("subnets", []): + if subnet and subnet.get("version", "") == 4: + cidr = subnet.get("cidr", None) + for ip in subnet.get("ips", []): + ip_address = ip.get("address", None) + if cidr and ip_address: + prefixlen = netaddr.IPNetwork(cidr).prefixlen + ip_address = "%s/%s" % (ip_address, prefixlen) + addresses = addresses + [ip_address] + return addresses + + def plug_vif(self, instance, vif): + bridge = "br-int" + dev = vif.get("devname") + port = vif.get("id") + mac_address = vif.get("address") + if not dev or not port or not mac_address: + return + ns = "fake-%s" % instance.uuid + add_port(ns, bridge, dev, port, mac_address) + ip_addresses = self.get_ip_addresses(vif) + add_port_ip_addresses(ns, dev, ip_addresses) + add_route(ns,ip_addresses) + + def plug_vifs(self, instance, network_info): + """Plug VIFs into networks.""" + LOG.info("---fake-plug_vifs---TAG0") + ns = "fake-%s" % instance.uuid + add_namespace(ns) + if network_info == None: + LOG.info("---fake-plug_vifs---TAG1") + else: + LOG.info("---fake-plug_vifs---TAG2") + for vif in network_info: + LOG.info("---fake-plug_vifs---TAG3") + self.plug_vif(instance, vif) + + def unplug_vif(self, instance, vif): + bridge = "br-int" + dev = vif.get("devname") + port = vif.get("id") + if not dev: + if not port: + return + dev = "tap" + str(port[0:11]) + ns = "fake-%s" % instance.uuid + delete_port(ns, bridge, dev) + + def unplug_vifs(self, instance, network_info): + """Unplug VIFs from networks.""" + for vif in network_info: + self.unplug_vif(instance, vif) + # delete namespace after removing ovs ports + ns = "fake-%s" % instance.uuid + delete_namespace(ns) + + def spawn(self, context, instance, image_meta, injected_files, + admin_password, allocations, network_info=None, + block_device_info=None, power_on=True, accel_info=None): + LOG.info("---fake-spawn---TAG0") + self.plug_vifs(instance, network_info) + if network_info: + LOG.info("---fake-spawn---TAG2 {}".format(network_info)) + for vif in network_info: + # simulate a real driver triggering the async network + # allocation as it might cause an error + vif.fixed_ips() + # store the vif as attached so we can allow detaching it later + # with a detach_interface() call. + self._interfaces[vif['id']] = vif + + uuid = instance.uuid + state = power_state.RUNNING if power_on else power_state.SHUTDOWN + flavor = instance.flavor + self.resources.claim( + vcpus=flavor.vcpus, + mem=flavor.memory_mb, + disk=flavor.root_gb) + fake_instance = FakeInstance(instance.name, state, uuid) + self.instances[uuid] = fake_instance + + def snapshot(self, context, instance, image_id, update_task_state): + if instance.uuid not in self.instances: + raise exception.InstanceNotRunning(instance_id=instance.uuid) + update_task_state(task_state=task_states.IMAGE_UPLOADING) + + def reboot(self, context, instance, network_info, reboot_type, + block_device_info=None, bad_volumes_callback=None, + accel_info=None): + # If the guest is not on the hypervisor and we're doing a hard reboot + # then mimic the libvirt driver by spawning the guest. + if (instance.uuid not in self.instances and + reboot_type.lower() == 'hard'): + injected_files = admin_password = allocations = None + self.spawn(context, instance, instance.image_meta, injected_files, + admin_password, allocations, + block_device_info=block_device_info) + else: + # Just try to power on the guest. + self.power_on(context, instance, network_info, + block_device_info=block_device_info) + + def get_host_ip_addr(self): + return '192.168.0.1' + + def set_admin_password(self, instance, new_pass): + pass + + def resume_state_on_host_boot(self, context, instance, network_info, + block_device_info=None): + pass + + def rescue(self, context, instance, network_info, image_meta, + rescue_password, block_device_info): + pass + + def unrescue( + self, + context: nova_context.RequestContext, + instance: 'objects.Instance', + ): + self.instances[instance.uuid].state = power_state.RUNNING + + def poll_rebooting_instances(self, timeout, instances): + pass + + def migrate_disk_and_power_off(self, context, instance, dest, + flavor, network_info, + block_device_info=None, + timeout=0, retry_interval=0): + pass + + def finish_revert_migration(self, context, instance, network_info, + migration, block_device_info=None, + power_on=True): + state = power_state.RUNNING if power_on else power_state.SHUTDOWN + self.instances[instance.uuid] = FakeInstance( + instance.name, state, instance.uuid) + + def post_live_migration_at_destination(self, context, instance, + network_info, + block_migration=False, + block_device_info=None): + # Called from the destination host after a successful live migration + # so spawn the instance on this host to track it properly. + image_meta = injected_files = admin_password = allocations = None + self.spawn(context, instance, image_meta, injected_files, + admin_password, allocations) + + def power_off(self, instance, timeout=0, retry_interval=0): + if instance.uuid in self.instances: + self.instances[instance.uuid].state = power_state.SHUTDOWN + else: + raise exception.InstanceNotFound(instance_id=instance.uuid) + + def power_on(self, context, instance, network_info, + block_device_info=None, accel_info=None): + if instance.uuid in self.instances: + self.instances[instance.uuid].state = power_state.RUNNING + else: + raise exception.InstanceNotFound(instance_id=instance.uuid) + + def trigger_crash_dump(self, instance): + pass + + def soft_delete(self, instance): + pass + + def restore(self, instance): + pass + + def pause(self, instance): + pass + + def unpause(self, instance): + pass + + def suspend(self, context, instance): + pass + + def resume(self, context, instance, network_info, block_device_info=None): + pass + + def destroy(self, context, instance, network_info, block_device_info=None, + destroy_disks=True, destroy_secrets=True): + self.unplug_vifs(instance, network_info) + key = instance.uuid + if key in self.instances: + flavor = instance.flavor + self.resources.release( + vcpus=flavor.vcpus, + mem=flavor.memory_mb, + disk=flavor.root_gb) + del self.instances[key] + else: + LOG.warning("fake Key '%(key)s' not in instances '%(inst)s'", + {'key': key, + 'inst': self.instances}, instance=instance) + + def cleanup(self, context, instance, network_info, block_device_info=None, + destroy_disks=True, migrate_data=None, destroy_vifs=True, + destroy_secrets=True): + # cleanup() should not be called when the guest has not been destroyed. + if instance.uuid in self.instances: + raise exception.InstanceExists( + "Instance %s has not been destroyed." % instance.uuid) + + def attach_volume(self, context, connection_info, instance, mountpoint, + disk_bus=None, device_type=None, encryption=None): + """Attach the disk to the instance at mountpoint using info.""" + instance_name = instance.name + if instance_name not in self._mounts: + self._mounts[instance_name] = {} + self._mounts[instance_name][mountpoint] = connection_info + + def detach_volume(self, context, connection_info, instance, mountpoint, + encryption=None): + """Detach the disk attached to the instance.""" + try: + del self._mounts[instance.name][mountpoint] + except KeyError: + pass + + def swap_volume(self, context, old_connection_info, new_connection_info, + instance, mountpoint, resize_to): + """Replace the disk attached to the instance.""" + instance_name = instance.name + if instance_name not in self._mounts: + self._mounts[instance_name] = {} + self._mounts[instance_name][mountpoint] = new_connection_info + + def extend_volume(self, context, connection_info, instance, + requested_size): + """Extend the disk attached to the instance.""" + pass + + def attach_interface(self, context, instance, image_meta, vif): + if vif['id'] in self._interfaces: + raise exception.InterfaceAttachFailed( + instance_uuid=instance.uuid) + self._interfaces[vif['id']] = vif + + def detach_interface(self, context, instance, vif): + try: + del self._interfaces[vif['id']] + except KeyError: + raise exception.InterfaceDetachFailed( + instance_uuid=instance.uuid) + + def get_info(self, instance, use_cache=True): + if instance.uuid not in self.instances: + raise exception.InstanceNotFound(instance_id=instance.uuid) + i = self.instances[instance.uuid] + return hardware.InstanceInfo(state=i.state) + + def get_diagnostics(self, instance): + return {'cpu0_time': 17300000000, + 'memory': 524288, + 'vda_errors': -1, + 'vda_read': 262144, + 'vda_read_req': 112, + 'vda_write': 5778432, + 'vda_write_req': 488, + 'vnet1_rx': 2070139, + 'vnet1_rx_drop': 0, + 'vnet1_rx_errors': 0, + 'vnet1_rx_packets': 26701, + 'vnet1_tx': 140208, + 'vnet1_tx_drop': 0, + 'vnet1_tx_errors': 0, + 'vnet1_tx_packets': 662, + } + + def get_instance_diagnostics(self, instance): + diags = diagnostics_obj.Diagnostics( + state='running', driver='libvirt', hypervisor='kvm', + hypervisor_os='ubuntu', uptime=46664, config_drive=True) + diags.add_cpu(id=0, time=17300000000, utilisation=15) + diags.add_nic(mac_address='01:23:45:67:89:ab', + rx_octets=2070139, + rx_errors=100, + rx_drop=200, + rx_packets=26701, + rx_rate=300, + tx_octets=140208, + tx_errors=400, + tx_drop=500, + tx_packets = 662, + tx_rate=600) + diags.add_disk(read_bytes=262144, + read_requests=112, + write_bytes=5778432, + write_requests=488, + errors_count=1) + diags.memory_details = diagnostics_obj.MemoryDiagnostics( + maximum=524288, used=0) + return diags + + def get_all_volume_usage(self, context, compute_host_bdms): + """Return usage info for volumes attached to vms on + a given host. + """ + volusage = [] + if compute_host_bdms: + volusage = [{'volume': compute_host_bdms[0][ + 'instance_bdms'][0]['volume_id'], + 'instance': compute_host_bdms[0]['instance'], + 'rd_bytes': 0, + 'rd_req': 0, + 'wr_bytes': 0, + 'wr_req': 0}] + + return volusage + + def get_host_cpu_stats(self): + stats = {'kernel': 5664160000000, + 'idle': 1592705190000000, + 'user': 26728850000000, + 'iowait': 6121490000000} + stats['frequency'] = 800 + return stats + + def block_stats(self, instance, disk_id): + return [0, 0, 0, 0, None] + + def get_console_output(self, context, instance): + return 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE' + + def get_vnc_console(self, context, instance): + return ctype.ConsoleVNC(internal_access_path='FAKE', + host='fakevncconsole.com', + port=6969) + + def get_spice_console(self, context, instance): + return ctype.ConsoleSpice(internal_access_path='FAKE', + host='fakespiceconsole.com', + port=6969, + tlsPort=6970) + + def get_rdp_console(self, context, instance): + return ctype.ConsoleRDP(internal_access_path='FAKE', + host='fakerdpconsole.com', + port=6969) + + def get_serial_console(self, context, instance): + return ctype.ConsoleSerial(internal_access_path='FAKE', + host='fakerdpconsole.com', + port=6969) + + def get_mks_console(self, context, instance): + return ctype.ConsoleMKS(internal_access_path='FAKE', + host='fakemksconsole.com', + port=6969) + + def get_available_resource(self, nodename): + """Updates compute manager resource info on ComputeNode table. + + Since we don't have a real hypervisor, pretend we have lots of + disk and ram. + """ + cpu_info = collections.OrderedDict([ + ('arch', 'x86_64'), + ('model', 'Nehalem'), + ('vendor', 'Intel'), + ('features', ['pge', 'clflush']), + ('topology', { + 'cores': 1, + 'threads': 1, + 'sockets': 4, + }), + ]) + if nodename not in self.get_available_nodes(): + return {} + + host_status = self.host_status_base.copy() + host_status.update(self.resources.dump()) + host_status['hypervisor_hostname'] = nodename + host_status['host_hostname'] = nodename + host_status['host_name_label'] = nodename + host_status['cpu_info'] = jsonutils.dumps(cpu_info) + return host_status + + def update_provider_tree(self, provider_tree, nodename, allocations=None): + # NOTE(yikun): If the inv record does not exists, the allocation_ratio + # will use the CONF.xxx_allocation_ratio value if xxx_allocation_ratio + # is set, and fallback to use the initial_xxx_allocation_ratio + # otherwise. + inv = provider_tree.data(nodename).inventory + ratios = self._get_allocation_ratios(inv) + inventory = { + 'VCPU': { + 'total': self.vcpus, + 'min_unit': 1, + 'max_unit': self.vcpus, + 'step_size': 1, + 'allocation_ratio': ratios[orc.VCPU], + 'reserved': CONF.reserved_host_cpus, + }, + 'MEMORY_MB': { + 'total': self.memory_mb, + 'min_unit': 1, + 'max_unit': self.memory_mb, + 'step_size': 1, + 'allocation_ratio': ratios[orc.MEMORY_MB], + 'reserved': CONF.reserved_host_memory_mb, + }, + 'DISK_GB': { + 'total': self.local_gb, + 'min_unit': 1, + 'max_unit': self.local_gb, + 'step_size': 1, + 'allocation_ratio': ratios[orc.DISK_GB], + 'reserved': self._get_reserved_host_disk_gb_from_config(), + }, + } + provider_tree.update_inventory(nodename, inventory) + + def get_instance_disk_info(self, instance, block_device_info=None): + return + + def live_migration(self, context, instance, dest, + post_method, recover_method, block_migration=False, + migrate_data=None): + post_method(context, instance, dest, block_migration, + migrate_data) + return + + def live_migration_force_complete(self, instance): + return + + def live_migration_abort(self, instance): + return + + def cleanup_live_migration_destination_check(self, context, + dest_check_data): + return + + def check_can_live_migrate_destination(self, context, instance, + src_compute_info, dst_compute_info, + block_migration=False, + disk_over_commit=False): + data = migrate_data.LibvirtLiveMigrateData() + data.filename = 'fake' + data.image_type = CONF.libvirt.images_type + data.graphics_listen_addr_vnc = CONF.vnc.server_listen + data.graphics_listen_addr_spice = CONF.spice.server_listen + data.serial_listen_addr = None + # Notes(eliqiao): block_migration and disk_over_commit are not + # nullable, so just don't set them if they are None + if block_migration is not None: + data.block_migration = block_migration + if disk_over_commit is not None: + data.disk_over_commit = disk_over_commit + data.disk_available_mb = 100000 + data.is_shared_block_storage = True + data.is_shared_instance_path = True + + return data + + def check_can_live_migrate_source(self, context, instance, + dest_check_data, block_device_info=None): + return dest_check_data + + def finish_migration(self, context, migration, instance, disk_info, + network_info, image_meta, resize_instance, + allocations, block_device_info=None, power_on=True): + injected_files = admin_password = None + # Finish migration is just like spawning the guest on a destination + # host during resize/cold migrate, so re-use the spawn() fake to + # claim resources and track the instance on this "hypervisor". + self.spawn(context, instance, image_meta, injected_files, + admin_password, allocations, + block_device_info=block_device_info, power_on=power_on) + + def confirm_migration(self, context, migration, instance, network_info): + # Confirm migration cleans up the guest from the source host so just + # destroy the guest to remove it from the list of tracked instances + # unless it is a same-host resize. + if migration.source_compute != migration.dest_compute: + self.destroy(context, instance, network_info) + + def pre_live_migration(self, context, instance, block_device_info, + network_info, disk_info, migrate_data): + return migrate_data + + def rollback_live_migration_at_destination(self, context, instance, + network_info, + block_device_info, + destroy_disks=True, + migrate_data=None): + return + + def _test_remove_vm(self, instance_uuid): + """Removes the named VM, as if it crashed. For testing.""" + self.instances.pop(instance_uuid) + + def host_power_action(self, action): + """Reboots, shuts down or powers up the host.""" + return action + + def host_maintenance_mode(self, host, mode): + """Start/Stop host maintenance window. On start, it triggers + guest VMs evacuation. + """ + if not mode: + return 'off_maintenance' + return 'on_maintenance' + + def set_host_enabled(self, enabled): + """Sets the specified host's ability to accept new instances.""" + if enabled: + return 'enabled' + return 'disabled' + + def get_volume_connector(self, instance): + return {'ip': CONF.my_block_storage_ip, + 'initiator': 'fake', + 'host': self._host} + + def get_available_nodes(self, refresh=False): + return self._nodes + + def instance_on_disk(self, instance): + return False + + def quiesce(self, context, instance, image_meta): + pass + + def unquiesce(self, context, instance, image_meta): + pass + + +class FakeVirtAPI(virtapi.VirtAPI): + @contextlib.contextmanager + def wait_for_instance_event(self, instance, event_names, deadline=300, + error_callback=None): + # NOTE(danms): Don't actually wait for any events, just + # fall through + yield + + def exit_wait_early(self, events): + # We never wait, so there is nothing to exit early + pass + + def update_compute_provider_status(self, context, rp_uuid, enabled): + pass + + +class SmallFakeDriver(FakeDriver): + # The api samples expect specific cpu memory and disk sizes. In order to + # allow the FakeVirt driver to be used outside of the unit tests, provide + # a separate class that has the values expected by the api samples. So + # instead of requiring new samples every time those + # values are adjusted allow them to be overwritten here. + + vcpus = 2 + memory_mb = 8192 + local_gb = 1028 + + +class MediumFakeDriver(FakeDriver): + # Fake driver that has enough resources to host more than one instance + # but not that much that cannot be exhausted easily + + vcpus = 10 + memory_mb = 8192 + local_gb = 1028 + + +class SameHostColdMigrateDriver(MediumFakeDriver): + """MediumFakeDriver variant that supports same-host cold migrate.""" + capabilities = dict(FakeDriver.capabilities, + supports_migrate_to_same_host=True) + + +class RescueBFVDriver(MediumFakeDriver): + capabilities = dict(FakeDriver.capabilities, supports_bfv_rescue=True) + + +class PowerUpdateFakeDriver(SmallFakeDriver): + # A specific fake driver for the power-update external event testing. + + def __init__(self, virtapi): + super(PowerUpdateFakeDriver, self).__init__(virtapi=None) + self.driver = ironic.IronicDriver(virtapi=virtapi) + + def power_update_event(self, instance, target_power_state): + """Update power state of the specified instance in the nova DB.""" + self.driver.power_update_event(instance, target_power_state) + + +class MediumFakeDriverWithNestedCustomResources(MediumFakeDriver): + # A MediumFakeDriver variant that also reports CUSTOM_MAGIC resources on + # a nested resource provider + vcpus = 10 + memory_mb = 8192 + local_gb = 1028 + child_resources = { + 'CUSTOM_MAGIC': { + 'total': 10, + 'reserved': 0, + 'min_unit': 1, + 'max_unit': 10, + 'step_size': 1, + 'allocation_ratio': 1, + } + } + + def update_provider_tree(self, provider_tree, nodename, allocations=None): + super( + MediumFakeDriverWithNestedCustomResources, + self).update_provider_tree( + provider_tree, nodename, + allocations=allocations) + + if not provider_tree.exists(nodename + '-child'): + provider_tree.new_child(name=nodename + '-child', + parent=nodename) + + provider_tree.update_inventory(nodename + '-child', + self.child_resources) + + +class FakeFinishMigrationFailDriver(FakeDriver): + """FakeDriver variant that will raise an exception from finish_migration""" + + def finish_migration(self, *args, **kwargs): + raise exception.VirtualInterfaceCreateException() + + +class PredictableNodeUUIDDriver(SmallFakeDriver): + """SmallFakeDriver variant that reports a predictable node uuid in + get_available_resource, like IronicDriver. + """ + + def get_available_resource(self, nodename): + resources = super( + PredictableNodeUUIDDriver, self).get_available_resource(nodename) + # This is used in ComputeNode.update_from_virt_driver which is called + # from the ResourceTracker when creating a ComputeNode. + resources['uuid'] = uuid.uuid5(uuid.NAMESPACE_DNS, nodename) + return resources + + +class FakeRescheduleDriver(FakeDriver): + """FakeDriver derivative that triggers a reschedule on the first spawn + attempt. This is expected to only be used in tests that have more than + one compute service. + """ + # dict, keyed by instance uuid, mapped to a boolean telling us if the + # instance has been rescheduled or not + rescheduled = {} + + def spawn(self, context, instance, image_meta, injected_files, + admin_password, allocations, network_info=None, + block_device_info=None, power_on=True, accel_info=None): + if not self.rescheduled.get(instance.uuid, False): + # We only reschedule on the first time something hits spawn(). + self.rescheduled[instance.uuid] = True + raise exception.ComputeResourcesUnavailable( + reason='FakeRescheduleDriver') + super(FakeRescheduleDriver, self).spawn( + context, instance, image_meta, injected_files, + admin_password, allocations, network_info, block_device_info, + power_on) + + +class FakeRescheduleDriverWithNestedCustomResources( + FakeRescheduleDriver, MediumFakeDriverWithNestedCustomResources): + pass + + +class FakeBuildAbortDriver(FakeDriver): + """FakeDriver derivative that always fails on spawn() with a + BuildAbortException so no reschedule is attempted. + """ + + def spawn(self, context, instance, image_meta, injected_files, + admin_password, allocations, network_info=None, + block_device_info=None, power_on=True, accel_info=None): + raise exception.BuildAbortException( + instance_uuid=instance.uuid, reason='FakeBuildAbortDriver') + + +class FakeBuildAbortDriverWithNestedCustomResources( + FakeBuildAbortDriver, MediumFakeDriverWithNestedCustomResources): + pass + + +class FakeUnshelveSpawnFailDriver(FakeDriver): + """FakeDriver derivative that always fails on spawn() with a + VirtualInterfaceCreateException when unshelving an offloaded instance. + """ + + def spawn(self, context, instance, image_meta, injected_files, + admin_password, allocations, network_info=None, + block_device_info=None, power_on=True, accel_info=None): + if instance.vm_state == vm_states.SHELVED_OFFLOADED: + raise exception.VirtualInterfaceCreateException( + 'FakeUnshelveSpawnFailDriver') + # Otherwise spawn normally during the initial build. + super(FakeUnshelveSpawnFailDriver, self).spawn( + context, instance, image_meta, injected_files, + admin_password, allocations, network_info, block_device_info, + power_on) + + +class FakeUnshelveSpawnFailDriverWithNestedCustomResources( + FakeUnshelveSpawnFailDriver, MediumFakeDriverWithNestedCustomResources): + pass + + +class FakeLiveMigrateDriver(FakeDriver): + """FakeDriver derivative to handle force_complete and abort calls. + + This module serves those tests that need to abort or force-complete + the live migration, thus the live migration will never be finished + without the force_complete_migration or delete_migration API calls. + + """ + + def __init__(self, virtapi, read_only=False): + super(FakeLiveMigrateDriver, self).__init__(virtapi, read_only) + self._migrating = True + self._abort_migration = True + + def live_migration(self, context, instance, dest, + post_method, recover_method, block_migration=False, + migrate_data=None): + self._abort_migration = False + self._migrating = True + count = 0 + while self._migrating and count < 50: + time.sleep(0.1) + count = count + 1 + + if self._abort_migration: + recover_method(context, instance, dest, migrate_data, + migration_status='cancelled') + else: + post_method(context, instance, dest, block_migration, + migrate_data) + + def live_migration_force_complete(self, instance): + self._migrating = False + if instance.uuid in self.instances: + del self.instances[instance.uuid] + + def live_migration_abort(self, instance): + self._abort_migration = True + self._migrating = False + + def post_live_migration(self, context, instance, block_device_info, + migrate_data=None): + # Runs on the source host, called from + # ComputeManager._post_live_migration so just delete the instance + # from being tracked on the source host. + self.destroy(context, instance, network_info=None, + block_device_info=block_device_info) + + +class FakeLiveMigrateDriverWithNestedCustomResources( + FakeLiveMigrateDriver, MediumFakeDriverWithNestedCustomResources): + pass + + +class FakeDriverWithPciResources(SmallFakeDriver): + + PCI_ADDR_PF1 = '0000:01:00.0' + PCI_ADDR_PF1_VF1 = '0000:01:00.1' + PCI_ADDR_PF2 = '0000:02:00.0' + PCI_ADDR_PF2_VF1 = '0000:02:00.1' + PCI_ADDR_PF3 = '0000:03:00.0' + PCI_ADDR_PF3_VF1 = '0000:03:00.1' + + # NOTE(gibi): Always use this fixture along with the + # FakeDriverWithPciResources to make the necessary configuration for the + # driver. + class FakeDriverWithPciResourcesConfigFixture(fixtures.Fixture): + def setUp(self): + super(FakeDriverWithPciResources. + FakeDriverWithPciResourcesConfigFixture, self).setUp() + # Set passthrough_whitelist before the compute node starts to match + # with the PCI devices reported by this fake driver. + + # NOTE(gibi): 0000:01:00 is tagged to physnet1 and therefore not a + # match based on physnet to our sriov port + # 'port_with_sriov_resource_request' as the network of that port + # points to physnet2 with the attribute + # 'provider:physical_network'. Nova pci handling already enforces + # this rule. + # + # 0000:02:00 and 0000:03:00 are both tagged to physnet2 and + # therefore a good match for our sriov port based on physnet. + # Having two PFs on the same physnet will allow us to test the + # placement allocation - physical allocation matching based on the + # bandwidth allocation in the future. + CONF.set_override('passthrough_whitelist', override=[ + jsonutils.dumps( + { + "address": { + "domain": "0000", + "bus": "01", + "slot": "00", + "function": ".*"}, + "physical_network": "physnet1", + } + ), + jsonutils.dumps( + { + "address": { + "domain": "0000", + "bus": "02", + "slot": "00", + "function": ".*"}, + "physical_network": "physnet2", + } + ), + jsonutils.dumps( + { + "address": { + "domain": "0000", + "bus": "03", + "slot": "00", + "function": ".*"}, + "physical_network": "physnet2", + } + ), + ], + group='pci') + + self.useFixture(fixtures.MockPatch( + 'nova.pci.utils.get_mac_by_pci_address', + return_value='52:54:00:1e:59:c6')) + + self.useFixture(fixtures.MockPatch( + 'nova.pci.utils.get_vf_num_by_pci_address', + return_value=1)) + + def get_available_resource(self, nodename): + host_status = super( + FakeDriverWithPciResources, self).get_available_resource(nodename) + # 01:00.0 - PF - ens1 + # |---- 01:00.1 - VF + # + # 02:00.0 - PF - ens2 + # |---- 02:00.1 - VF + # + # 03:00.0 - PF - ens3 + # |---- 03:00.1 - VF + host_status['pci_passthrough_devices'] = jsonutils.dumps([ + { + 'address': self.PCI_ADDR_PF1, + 'product_id': 'fake-product_id', + 'vendor_id': 'fake-vendor_id', + 'status': 'available', + 'dev_type': 'type-PF', + 'parent_addr': None, + 'numa_node': 0, + 'label': 'fake-label', + }, + { + 'address': self.PCI_ADDR_PF1_VF1, + 'product_id': 'fake-product_id', + 'vendor_id': 'fake-vendor_id', + 'status': 'available', + 'dev_type': 'type-VF', + 'parent_addr': self.PCI_ADDR_PF1, + 'numa_node': 0, + 'label': 'fake-label', + "parent_ifname": self._host + "-ens1", + }, + { + 'address': self.PCI_ADDR_PF2, + 'product_id': 'fake-product_id', + 'vendor_id': 'fake-vendor_id', + 'status': 'available', + 'dev_type': 'type-PF', + 'parent_addr': None, + 'numa_node': 0, + 'label': 'fake-label', + }, + { + 'address': self.PCI_ADDR_PF2_VF1, + 'product_id': 'fake-product_id', + 'vendor_id': 'fake-vendor_id', + 'status': 'available', + 'dev_type': 'type-VF', + 'parent_addr': self.PCI_ADDR_PF2, + 'numa_node': 0, + 'label': 'fake-label', + "parent_ifname": self._host + "-ens2", + }, + { + 'address': self.PCI_ADDR_PF3, + 'product_id': 'fake-product_id', + 'vendor_id': 'fake-vendor_id', + 'status': 'available', + 'dev_type': 'type-PF', + 'parent_addr': None, + 'numa_node': 0, + 'label': 'fake-label', + }, + { + 'address': self.PCI_ADDR_PF3_VF1, + 'product_id': 'fake-product_id', + 'vendor_id': 'fake-vendor_id', + 'status': 'available', + 'dev_type': 'type-VF', + 'parent_addr': self.PCI_ADDR_PF3, + 'numa_node': 0, + 'label': 'fake-label', + "parent_ifname": self._host + "-ens3", + }, + ]) + return host_status + + +class FakeLiveMigrateDriverWithPciResources( + FakeLiveMigrateDriver, FakeDriverWithPciResources): + """FakeDriver derivative to handle force_complete and abort calls. + + This module serves those tests that need to abort or force-complete + the live migration, thus the live migration will never be finished + without the force_complete_migration or delete_migration API calls. + + """ + + +class FakeDriverWithCaching(FakeDriver): + def __init__(self, *a, **k): + super(FakeDriverWithCaching, self).__init__(*a, **k) + self.cached_images = set() + + def cache_image(self, context, image_id): + if image_id in self.cached_images: + return False + else: + self.cached_images.add(image_id) + return True diff --git a/requirements.txt b/requirements.txt index c13c31d2..b07778e2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,4 +20,4 @@ PyYAML==5.1.2 #awscli>=1.16.224 progressbar2==3.43.1 testresources==2.0.1 -git+https://github.com/Giuseppe1992/mapping_distrinet-1.git \ No newline at end of file +git+https://github.com/Giuseppe1992/mapping_distrinet-1.git diff --git a/setup.py b/setup.py index 3ca9adf9..a04ece7f 100644 --- a/setup.py +++ b/setup.py @@ -38,3 +38,4 @@ include_package_data = True, zip_safe = True ) +