User Tools

Site Tools


services:vpshosts

Differences

This shows you the differences between two versions of the page.

Link to this comparison view

Both sides previous revision Previous revision
Next revision
Previous revision
services:vpshosts [2017/02/22 20:13]
dgalloway
services:vpshosts [2017/08/15 22:46] (current)
djgalloway
Line 8: Line 8:
  
 ===== Virtual Machines ===== ===== Virtual Machines =====
-Each VPSHOST is home to <​del>​8</​del>​ 4 virtual machines. ​ Each VM has its own JBOD disk assigned to it with 4GB RAM and 1 vCPU.+Each VPSHOST is home to <​del>​8</​del>​ 4 virtual machines. ​ Each VM has its own JBOD disk assigned to it with 4GB RAM and 1 vCPU **except** the first VM on each VPSHOST. ​ It runs off the root drive.
  
 In June 2016, we marked down all the even-numbered VPSes and made a change to teuthology which creates the VMs with 4GB RAM as 2GB per VM wasn't meeting the needs. ​ See http://​tracker.ceph.com/​issues/​15052 In June 2016, we marked down all the even-numbered VPSes and made a change to teuthology which creates the VMs with 4GB RAM as 2GB per VM wasn't meeting the needs. ​ See http://​tracker.ceph.com/​issues/​15052
Line 20: Line 20:
 ==== Setting up a VPSHOST from scratch ==== ==== Setting up a VPSHOST from scratch ====
 **NOTE:** This has been adapted to be applicable for 4 VPSes and disks per host. **NOTE:** This has been adapted to be applicable for 4 VPSes and disks per host.
 +
 +If setting up a host as a new or replacement VPSHOST, be sure to update your libvirt config. ​ See http://​docs.ceph.com/​teuthology/​docs/​downburst_vms.html#​vps-hosts.
 +
 +If you install the machine using a ''​-stock''​ cobbler profile, you'll need to run the common role as well.
  
 <​code>​ <​code>​
-apt-get install xfsprogs+apt-get install xfsprogs ​vim
  
 # Determine the first and last VPS number by reading the VPSHOST'​s description in the lock db # Determine the first and last VPS number by reading the VPSHOST'​s description in the lock db
Line 30: Line 34:
 for disk in sd{b..d}; do mkfs -t xfs -f /dev/$disk; done for disk in sd{b..d}; do mkfs -t xfs -f /dev/$disk; done
  
-# num should be second VPM+## Not really needed if next task and `mount -s` succeeds 
 +# $num should be second VPM
 num=51; for disk in sd{b..d}; do mount /dev/$disk /​srv/​libvirtpool/​vpm0$num;​ let num=num+2; done num=51; for disk in sd{b..d}; do mount /dev/$disk /​srv/​libvirtpool/​vpm0$num;​ let num=num+2; done
 # OR if VPM$num is >= 100, # OR if VPM$num is >= 100,
 num=101; for disk in sd{b..d}; do mount /dev/$disk /​srv/​libvirtpool/​vpm$num;​ let num=num+2; done num=101; for disk in sd{b..d}; do mount /dev/$disk /​srv/​libvirtpool/​vpm$num;​ let num=num+2; done
  
 +# $num should be second VPM
 num=51; for disk in sd{b..d}; do echo -e "​UUID=$(blkid -s UUID -o value /​dev/​$disk)\t/​srv/​libvirtpool/​vpm0$num\txfs\tdefaults,​noatime,​nodiratime,​nobarrier,​inode64,​logbufs=8,​logbsize=256k,​largeio\t0\t0";​ let num=num+2; done >> /etc/fstab num=51; for disk in sd{b..d}; do echo -e "​UUID=$(blkid -s UUID -o value /​dev/​$disk)\t/​srv/​libvirtpool/​vpm0$num\txfs\tdefaults,​noatime,​nodiratime,​nobarrier,​inode64,​logbufs=8,​logbsize=256k,​largeio\t0\t0";​ let num=num+2; done >> /etc/fstab
 # OR if VPM$num is >= 100, # OR if VPM$num is >= 100,
Line 48: Line 54:
 ansible-playbook vmhost.yml --limit="​mira###​.front.sepia.ceph.com"​ ansible-playbook vmhost.yml --limit="​mira###​.front.sepia.ceph.com"​
  
-# Lock the first VPM on the host to download ​the disk image +# Make sure the first VPM is down 
-tl --lock ubuntu@$firstvpm+tl --update --status down vpm049 
 + 
 +# Lock the first VPM on the host to download disk images 
 +tl --lock ​--os-type ubuntu --os-version 14.04 ubuntu@vpm049 
 +tl --unlock ubuntu@vpm049 
 +tl --lock --os-type ubuntu --os-version 16.04 ubuntu@vpm049 
 +tl --unlock ubuntu@vpm049 
 +tl --lock --os-type centos --os-version 7.3 ubuntu@vpm049 
 +tl --unlock ​ubuntu@vpm049
  
 # Copy the disk image to the other libvirtpools # Copy the disk image to the other libvirtpools
-for dir in $(ls /​srv/​libvirtpool/​ | tail -n 7); do cp /​srv/​libvirtpool/​$(ls /​srv/​libvirtpool/​ | head -n 1)/ubuntu* /​srv/​libvirtpool/​$dir/;​ done+for dir in $(ls /​srv/​libvirtpool/​ | tail -n 3); do cp /​srv/​libvirtpool/​$(ls /​srv/​libvirtpool/​ | head -n 1)/{ubuntu*,​centos*} ​/​srv/​libvirtpool/​$dir/;​ done
  
 for pool in $(ls /​srv/​libvirtpool/​);​ do virsh pool-refresh $pool; done for pool in $(ls /​srv/​libvirtpool/​);​ do virsh pool-refresh $pool; done
  
 # Lock then unlock all the VPSes to verify everything looks good # Lock then unlock all the VPSes to verify everything looks good
-for sys in vpm{051,​053,​055,057}; do tl --lock ubuntu@$sys;​ done +for sys in vpm{049,051,​053,​055};​ do tl --lock ubuntu@$sys;​ done 
-for sys in vpm{051,​053,​055,057}; do tl --unlock ubuntu@$sys;​ done+for sys in vpm{049,051,​053,​055};​ do tl --unlock ubuntu@$sys;​ done
 </​code>​ </​code>​
  
services/vpshosts.1487794412.txt.gz · Last modified: 2017/02/22 20:13 by dgalloway