diff --git a/Vagrantfile b/Vagrantfile index e362d77..43b0821 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -57,16 +57,16 @@ Vagrant.configure("2") do |config| :libvirt__netmask => "255.255.255.0", :libvirt__network_name => "cluster-network" osd1.vm.provider :libvirt do |libvirt| - libvirt.storage :file, - :size => "30G", - :type => "raw", - :cache => "writeback" libvirt.storage :file, :size => "20G", :type => "raw", :cache => "writeback" libvirt.storage :file, - :size => "20G", + :size => "40G", + :type => "raw", + :cache => "writeback" + libvirt.storage :file, + :size => "40G", :type => "raw", :cache => "writeback" end @@ -81,16 +81,16 @@ Vagrant.configure("2") do |config| :mac => "52:54:00:dc:51:7c", :libvirt__network_name => "cluster-network" osd2.vm.provider :libvirt do |libvirt| - libvirt.storage :file, - :size => "30G", - :type => "raw", - :cache => "writeback" libvirt.storage :file, :size => "20G", :type => "raw", :cache => "writeback" libvirt.storage :file, - :size => "20G", + :size => "40G", + :type => "raw", + :cache => "writeback" + libvirt.storage :file, + :size => "40G", :type => "raw", :cache => "writeback" end diff --git a/scripts/ceph-install.sh b/scripts/ceph-install.sh index 223c42d..3368b92 100755 --- a/scripts/ceph-install.sh +++ b/scripts/ceph-install.sh @@ -39,6 +39,7 @@ IP_ADDRESS=$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1) PUBLIC_NETWORK=$(echo $IP_ADDRESS | awk -F '.' '{print $1"."$2"."$3".0/24"}') # Cluster configuration directory +$OUTPUT_LOG "Create new cluster configuration" mkdir -p "$GUEST_USER_DIR/ceph-cluster" cd "$GUEST_USER_DIR/ceph-cluster" @@ -46,18 +47,19 @@ cd "$GUEST_USER_DIR/ceph-cluster" ceph-deploy new $ADMIN_NODE # Initialize cluster configuration -cat << CLUSTERCONFIG >> ceph.conf +$OUTPUT_LOG "Initialize cluster configuration" +cat << CLUSTERCONFIG | tee -a ceph.conf public network = $PUBLIC_NETWORK cluster network = $CLUSTER_NETWORK osd pool default size = 2 osd pool default min size = 1 -osd pool default pg num = 256 -osd pool default pgp num = 256 +osd pool default pg num = 128 +osd pool default pgp num = 128 CLUSTERCONFIG for NODE in $OSD_NODES; do -cat << CLUSTERCONFIG >> ceph.conf +cat << CLUSTERCONFIG | tee -a ceph.conf [mds.$NODE] mds standby replay = true @@ -65,29 +67,39 @@ mds standby for rank = 0 CLUSTERCONFIG done -# Install ceph on all nodes +# Deploy ceph on all nodes +$OUTPUT_LOG "Deploy ceph on all nodes" ceph-deploy install --release luminous $NODES # Create initial monitor +$OUTPUT_LOG "Create initial monitor" ceph-deploy --overwrite-conf mon create-initial # Deploy configuration file and client keys +$OUTPUT_LOG "Deploy configuration file and client keys on all nodes" ceph-deploy admin $NODES # Add monitor on osd nodes -ceph-deploy mon add $OSD_NODES +for NODE in $OSD_NODES; do + $OUTPUT_LOG "Add monitor on $NODE" + ceph-deploy mon add $NODE +done # Create manager on all nodes +$OUTPUT_LOG "Create manager on all nodes" ceph-deploy mgr create $NODES # Create metadata server on osd nodes +$OUTPUT_LOG "Create metadata server on osd nodes" ceph-deploy mds create $OSD_NODES # For each osd node, gather keys from admin node and create OSDs for NODE in $OSD_NODES; do + $OUTPUT_LOG "Gather keys on $NODE" ssh $NODE ceph-deploy gatherkeys $ADMIN_NODE ssh $NODE sudo cp /home/$CEPH_ADMIN_USER/ceph.bootstrap-osd.keyring /var/lib/ceph/bootstrap-osd/ceph.keyring ssh $NODE sudo chown ceph:ceph /var/lib/ceph/bootstrap-osd/ceph.keyring + $OUTPUT_LOG "Create OSDs on $NODE" ssh $NODE sudo ceph-volume lvm create --filestore --data /dev/vdb --journal /dev/vda1 ssh $NODE sudo ceph-volume lvm create --filestore --data /dev/vdc --journal /dev/vda2 done; @@ -95,3 +107,5 @@ done; # wait 10 seconds and get cluster status sleep 10 sudo ceph -s + +$OUTPUT_LOG "Ceph cluster is now ready for pool creation and other activities.\nThanks for waiting. Enjoy!" diff --git a/scripts/cephtest-utils.sh b/scripts/cephtest-utils.sh index 7811363..ca9fd0d 100755 --- a/scripts/cephtest-utils.sh +++ b/scripts/cephtest-utils.sh @@ -24,6 +24,15 @@ # Version VERSION=cephtest-utils-1.0-120417 +# Cluster name +CLUSTER_NAME="cephtest" + +# Script name +SCRIPT=$(basename --suffix=.sh "$0") + +# Define log output +OUTPUT_LOG="echo -e \n{$CLUSTER_NAME} {$SCRIPT} " + # Ceph user CEPH_ADMIN_USER="ceph-admin" diff --git a/scripts/pre-up.sh b/scripts/pre-up.sh index 0d993a1..ab22127 100755 --- a/scripts/pre-up.sh +++ b/scripts/pre-up.sh @@ -29,6 +29,7 @@ VERSION=pre-up-2.0-121617 source "$(dirname "$(readlink -f "$0")")/cephtest-utils.sh" # (re)Create ssh keys +$OUTPUT_LOG "Create SSH keys and config for ceph admin user" rm -f "$HOST_SSH_DIR/$CEPH_ADMIN_USER"-id_rsa* ssh-keygen -q -N "" -f "$HOST_SSH_DIR/$CEPH_ADMIN_USER-id_rsa" chmod 644 "$HOST_SSH_DIR/$CEPH_ADMIN_USER-id_rsa" @@ -37,11 +38,12 @@ chmod 644 "$HOST_SSH_DIR/$CEPH_ADMIN_USER-id_rsa.pub" # (re)Create ssh config file rm -f "$HOST_SSH_DIR/$CEPH_ADMIN_USER-config" for NODE in $NODES; do - echo -e "Host $NODE\n\tHostname $NODE\n\tUser $CEPH_ADMIN_USER\n\tStrictHostKeyChecking no\n" >> "$HOST_SSH_DIR/$CEPH_ADMIN_USER-config" + echo -e "Host $NODE\n\tHostname $NODE\n\tUser $CEPH_ADMIN_USER\n\tStrictHostKeyChecking no\n" | tee -a "$HOST_SSH_DIR/$CEPH_ADMIN_USER-config" done chmod 644 "$HOST_SSH_DIR/$CEPH_ADMIN_USER-config" # Clean up IP and PROVISION signals +$OUTPUT_LOG "Clean up IP and PROVISION signals" for NODE in $NODES; do rm -f "$HOST_SIGNAL_DIR/$NODE-IP" rm -f "$HOST_SIGNAL_DIR/$NODE-PROVISION" diff --git a/scripts/provision-admin.sh b/scripts/provision-admin.sh old mode 100644 new mode 100755 index 77f9ceb..472b2d3 --- a/scripts/provision-admin.sh +++ b/scripts/provision-admin.sh @@ -34,6 +34,7 @@ if [[ $(hostname -s) != $ADMIN_NODE ]]; then fi # Wait for all nodes to be ready +$OUTPUT_LOG "Wait of all nodes ready, with provision done" TIMER_MAX=300 for NODE in $NODES; do TIMER=0 diff --git a/scripts/provision.sh b/scripts/provision.sh index afc525c..f688286 100755 --- a/scripts/provision.sh +++ b/scripts/provision.sh @@ -42,39 +42,58 @@ export DEBIAN_FRONTEND=noninteractive locale-gen fr_FR.UTF-8 # Install ceph repository (luminous version) +$OUTPUT_LOG "Install ceph repository (luminous version)" wget -q -O- 'https://download.ceph.com/keys/release.asc' | apt-key add - echo deb https://download.ceph.com/debian-luminous/ $(lsb_release -sc) main | tee /etc/apt/sources.list.d/ceph.list apt-get update # Install chrony for time synchronization, gdisk for GPT partitioning, # vnstat for network stats, htop for system monitor and ceph-deploy +$OUTPUT_LOG "Install chrony, gdisk, vnstat, htop, ceph-deploy" apt-get -y install chrony gdisk vnstat htop ceph-deploy +# Configure chrony with admin node as server and osd nodes as clients +# Update chronyc password +$OUTPUT_LOG "Configure chrony with admin node as server and osd nodes as clients" +echo "1 chrony" > /etc/chrony/chrony.keys +if [[ $GUEST_NAME == $ADMIN_NODE ]]; then + sed -i "s/#local stratum/local stratum/g" /etc/chrony/chrony.conf + sed -i "s/#allow 10\/8/allow 192.168\/16/g" /etc/chrony/chrony.conf +else + sed -i "s/pool/server $ADMIN_NODE\n#pool/" /etc/chrony/chrony.conf +fi +# Restart chrony with new config +systemctl restart chrony + # Full update #apt-get -y dist-upgrade #apt-get -y autoremove # Create partitions on journal disk for osd nodes only +$OUTPUT_LOG "Create partitions on journal disk for osd nodes" for NODE in $OSD_NODES; do if [[ $NODE == $GUEST_NAME ]]; then + $OUTPUT_LOG "Create partitions on $NODE" sgdisk --zap-all sgdisk --new=0:0:10G /dev/vda > /dev/null 2>&1 - sgdisk --new=0:0:20G /dev/vda > /dev/null 2>&1 sgdisk --largest-new=0 /dev/vda > /dev/null 2>&1 sgdisk --print /dev/vda fi done -# Modify /etc/hosts to allow ceph-deploy to resolve the guests +# Modify /etc/hosts to allow ceph-deploy to resolve the guest IP_ADDRESS=$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1) # Need to replace the loopback address by the real address +$OUTPUT_LOG "Modify /etc/hosts to allow ceph-deploy to resolve the guest" sed -i "s/127.0.0.1\t$GUEST_NAME\t$GUEST_NAME/$IP_ADDRESS\t$GUEST_NAME\t$GUEST_NAME/g" /etc/hosts echo >> /etc/hosts # Signal that IP is ready -echo -e "$IP_ADDRESS\t$GUEST_NAME" > "$GUEST_VAGRANT_SIGNAL_DIR/$GUEST_NAME-IP" +$OUTPUT_LOG "Signal that IP is ready" +echo -e "$IP_ADDRESS\t$GUEST_NAME" | tee "$GUEST_VAGRANT_SIGNAL_DIR/$GUEST_NAME-IP" # Wait for all nodes IP and update /etc/hosts +$OUTPUT_LOG "Wait for all nodes IP and update /etc/hosts" TIMER_MAX=300 for NODE in $NODES; do if [[ $NODE != $GUEST_NAME ]]; then @@ -90,11 +109,13 @@ for NODE in $NODES; do # Remove record if existing sed -i "/$NODE/d" /etc/hosts # Add new record - cat "$GUEST_VAGRANT_SIGNAL_DIR/$NODE-IP" >> /etc/hosts + $OUTPUT_LOG "Add IP for $NODE" + cat "$GUEST_VAGRANT_SIGNAL_DIR/$NODE-IP" | tee -a /etc/hosts fi done # Create user ceph-admin if not existing +$OUTPUT_LOG "Create user ceph-admin if not existing and make it paswordless sudoer" cat /etc/passwd | grep $CEPH_ADMIN_USER || useradd -m -s /bin/bash $CEPH_ADMIN_USER # Make ceph-admin passwordless sudoer @@ -104,8 +125,9 @@ chmod 0440 "/etc/sudoers.d/$CEPH_ADMIN_USER" # Copy ceph-admin ssh keys and ssh config from Vagrant folder # Keys must be created by pre-up script # Executed in ceph admin context -sudo -i -u $CEPH_ADMIN_USER <