User Tools

Site Tools


services:vpshosts

Differences

This shows you the differences between two versions of the page.

Link to this comparison view

Both sides previous revision Previous revision
Next revision
Previous revision
Last revision Both sides next revision
services:vpshosts [2016/11/08 16:52]
dgalloway [Virtual Machines]
services:vpshosts [2017/03/16 18:33]
dgalloway
Line 19: Line 19:
  
 ==== Setting up a VPSHOST from scratch ==== ==== Setting up a VPSHOST from scratch ====
 +**NOTE:** This has been adapted to be applicable for 4 VPSes and disks per host.
 +
 +If setting up a host as a new or replacement VPSHOST, be sure to update your libvirt config. ​ See http://​docs.ceph.com/​teuthology/​docs/​downburst_vms.html#​vps-hosts.
 +
 +If you install the machine using a ''​-stock''​ cobbler profile, you'll need to run the common role as well.
 +
 <​code>​ <​code>​
 +apt-get install xfsprogs vim
 +
 # Determine the first and last VPS number by reading the VPSHOST'​s description in the lock db # Determine the first and last VPS number by reading the VPSHOST'​s description in the lock db
 # In this example, the VPSes that live on the VPSHOST are vpm177 thru vpm184 # In this example, the VPSes that live on the VPSHOST are vpm177 thru vpm184
  
-for sys in vpm{177..184}; do mkdir -p /​srv/​libvirtpool/​$sys;​ done +for sys in vpm{049,​051,​053,​055}; do mkdir -p /​srv/​libvirtpool/​$sys;​ done 
-for disk in sd{b..h}; do mkfs -t xfs -f /dev/$disk; done+for disk in sd{b..d}; do mkfs -t xfs -f /dev/$disk; done
  
-# num should be second VPM +## Not really needed if next task and `mount -s` succeeds 
-num=178; for disk in sd{b..h}; do mount /dev/$disk /​srv/​libvirtpool/​vpm$num;​ let num=num+1; done+# $num should be second VPM 
 +num=51; for disk in sd{b..d}; do mount /dev/$disk /​srv/​libvirtpool/​vpm0$num;​ let num=num+2; done 
 +# OR if VPM$num is >= 100, 
 +num=101; for disk in sd{b..d}; do mount /dev/$disk /​srv/​libvirtpool/​vpm$num;​ let num=num+2; done
  
-Untested +$num should be second VPM 
-num=178; for disk in sd{b..h}; do echo -e "​UUID=$(blkid -s UUID -o value /​dev/​$disk)\t/​srv/​libvirtpool/​vpm$num\txfs\tdefaults,​noatime,​nodiratime,​nobarrier,​inode64,​logbufs=8,​logbsize=256k,​largeio\t0\t0";​ let num=num+1; done >> /etc/fstab+num=51; for disk in sd{b..d}; do echo -e "​UUID=$(blkid -s UUID -o value /​dev/​$disk)\t/​srv/​libvirtpool/​vpm0$num\txfs\tdefaults,​noatime,​nodiratime,​nobarrier,​inode64,​logbufs=8,​logbsize=256k,​largeio\t0\t0";​ let num=num+2; done >> /​etc/​fstab 
 +# OR if VPM$num is >= 100, 
 +num=101; for disk in sd{b..d}; do echo -e "​UUID=$(blkid -s UUID -o value /​dev/​$disk)\t/​srv/​libvirtpool/​vpm$num\txfs\tdefaults,​noatime,​nodiratime,​nobarrier,​inode64,​logbufs=8,​logbsize=256k,​largeio\t0\t0";​ let num=num+2; done >> /etc/fstab
  
 # Verify fstab, then # Verify fstab, then
Line 41: Line 54:
 ansible-playbook vmhost.yml --limit="​mira###​.front.sepia.ceph.com"​ ansible-playbook vmhost.yml --limit="​mira###​.front.sepia.ceph.com"​
  
-# Lock the first VPM on the host to download ​the disk image +# Make sure the first VPM is down 
-tl --lock ubuntu@$firstvpm+tl --update --status down vpm049 
 + 
 +# Lock the first VPM on the host to download disk images 
 +tl --lock ​--os-type ubuntu --os-version 14.04 ubuntu@vpm049 
 +tl --unlock ubuntu@vpm049 
 +tl --lock --os-type ubuntu --os-version 16.04 ubuntu@vpm049 
 +tl --unlock ubuntu@vpm049 
 +tl --lock --os-type centos --os-version 7.3 ubuntu@vpm049 
 +tl --unlock ​ubuntu@vpm049
  
 # Copy the disk image to the other libvirtpools # Copy the disk image to the other libvirtpools
-for dir in $(ls /​srv/​libvirtpool/​ | tail -n 7); do cp /​srv/​libvirtpool/​$(ls /​srv/​libvirtpool/​ | head -n 1)/ubuntu* /​srv/​libvirtpool/​$dir/;​ done+for dir in $(ls /​srv/​libvirtpool/​ | tail -n 3); do cp /​srv/​libvirtpool/​$(ls /​srv/​libvirtpool/​ | head -n 1)/{ubuntu*,​centos*} ​/​srv/​libvirtpool/​$dir/;​ done
  
 for pool in $(ls /​srv/​libvirtpool/​);​ do virsh pool-refresh $pool; done for pool in $(ls /​srv/​libvirtpool/​);​ do virsh pool-refresh $pool; done
  
 # Lock then unlock all the VPSes to verify everything looks good # Lock then unlock all the VPSes to verify everything looks good
-for sys in vpm{146..152}; do tl --lock ubuntu@$sys;​ done +for sys in vpm{049,​051,​053,​055}; do tl --lock ubuntu@$sys;​ done 
-for sys in vpm{146..152}; do tl --unlock ubuntu@$sys;​ done+for sys in vpm{049,​051,​053,​055}; do tl --unlock ubuntu@$sys;​ done
 </​code>​ </​code>​
  
services/vpshosts.txt · Last modified: 2017/08/15 22:46 by djgalloway