Firts version with ceph-install in provisioning. Nodes synchronization is working. Nodes' iP in /etc/hosts to avoid confusion with dnsmasq.

This commit is contained in:
Denis Lambolez
2017-12-17 16:18:30 +01:00
parent 7d4af9b48b
commit 14893e7346
11 changed files with 171 additions and 84 deletions
+7 -5
View File
@@ -27,11 +27,7 @@ VERSION=ceph-install-1.0-120417
# This script is executed in guest context
source "/vagrant/scripts/cephtest-utils.sh"
# Directories (depending of the context)
GUEST_USER_DIR="/home/$CEPH_ADMIN_USER"
GUEST_USER_SSH_DIR="$GUEST_USER_DIR/.ssh"
GUEST_VAGRANT_SCRIPT_DIR="/vagrant/scripts"
GUEST_VAGRANT_SSH_DIR="/vagrant/.ssh"
# Network (dynamically defined by Vagrant)
IP_ADDRESS=$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)
PUBLIC_NETWORK=$(echo $IP_ADDRESS | awk -F '.' '{print $1"."$2"."$3".0"}')
@@ -42,6 +38,12 @@ if [[ $(whoami) != $CEPH_ADMIN_USER ]]; then
exit 1
fi
# Make sure this script is run from the admin node
if [[ $(hostname -s) != $ADMIN_NODE ]]; then
echo "This script must be run from $ADMIN_NODE" 1>&2
exit 1
fi
# Cluster configuration directory
mkdir -p "$GUEST_USER_DIR/ceph-cluster"
cd "$GUEST_USER_DIR/ceph-cluster"
+16 -1
View File
@@ -28,7 +28,7 @@ VERSION=cephtest-utils-1.0-120417
CEPH_ADMIN_USER="ceph-admin"
CEPH_ADMIN_EXEC="sudo -i -u $CEPH_ADMIN_USER"
# Machines
# Nodes
ADMIN_NODE="node-admin"
OSD_NODES="node-osd1 node-osd2"
NODES="$ADMIN_NODE $OSD_NODES"
@@ -36,3 +36,18 @@ NODES="$ADMIN_NODE $OSD_NODES"
# Networks
CLUSTER_NETWORK="172.28.128.0"
# Guest name
GUEST_NAME=$(hostname -s)
# Guest directories
GUEST_USER_DIR="/home/$CEPH_ADMIN_USER"
GUEST_USER_SSH_DIR="$GUEST_USER_DIR/.ssh"
GUEST_VAGRANT_DIR="/vagrant"
GUEST_VAGRANT_SCRIPT_DIR="$GUEST_VAGRANT_DIR/scripts"
GUEST_VAGRANT_SSH_DIR="$GUEST_VAGRANT_DIR/.ssh"
GUEST_VAGRANT_SIGNAL_DIR="$GUEST_VAGRANT_DIR/.signals"
# Host directories
HOST_SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
HOST_SSH_DIR="$(readlink -f "$HOST_SCRIPT_DIR/../.ssh")"
HOST_SIGNAL_DIR="$(readlink -f "$HOST_SCRIPT_DIR/../.signals")"
+39
View File
@@ -0,0 +1,39 @@
#!/bin/bash
# ========================================================================================
# Execute post-destroy cleaning
#
# Written by : Denis Lambolez
# Release : 1.0
# Creation date : 16 December 2017
# Description : Bash script
# This script has been designed and written on Ubuntu 16.04 plateform.
# It must be executed in vagrant context
# Usage : ./post-destroy.sh
# ----------------------------------------------------------------------------------------
# ========================================================================================
#
# HISTORY :
# Release | Date | Authors | Description
# --------------+---------------+--------------- +------------------------------------------
# 1.0 | 12.16.17 | Denis Lambolez | Creation
# | | |
# =========================================================================================
#set -xev
# Version
VERSION=post-destroy-1.0-121617
# This script is executed in host context
source "$(dirname "$(readlink -f "$0")")/cephtest-utils.sh"
# clean-up networks to start with fresh configuration
for NETWORK in vagrant-libvirt vagrant-private-dhcp; do
virsh net-list --all 2> /dev/null | grep $NETWORK | grep active
if [[ $? -eq 0 ]]; then
virsh net-destroy $NETWORK 2> /dev/null
fi
virsh net-list --all 2> /dev/null | grep $NETWORK
if [[ $? -eq 0 ]]; then
virsh net-undefine $NETWORK 2> /dev/null
fi
done
@@ -1,35 +1,32 @@
#!/bin/bash
# ========================================================================================
# Execute preflight configuration needed to deploy vagrant cluster
# Execute pre-up configuration needed to deploy vagrant cluster
#
# Written by : Denis Lambolez
# Release : 1.1
# Creation date : 04 December 2017
# Release : 2.0
# Creation date : 16 December 2017
# Description : Bash script
# This script has been designed and written on Ubuntu 16.04 plateform.
# It must be executed in vagrant context
# Usage : ./vagrant-preflight.sh
# Usage : ./pre-up.sh
# ----------------------------------------------------------------------------------------
# ========================================================================================
#
# HISTORY :
# Release | Date | Authors | Description
# --------------+---------------+--------------- +------------------------------------------
# 2.0 | 12.16.17 | Denis Lambolez | Renamed pre-up.sh and linked to post-destroy
# 1.1 | 12.04.17 | Denis Lambolez | Sourcing parameters from cephtest-utils
# 1.0 | 12.02.17 | Denis Lambolez | Creation
# | | |
# | | |
# =========================================================================================
#set -xev
# Version
VERSION=vagrant-preflight-1.0-120217
VERSION=pre-up-2.0-121617
# This script is executed in host context
source "$(dirname "$(readlink -f "$0")")/cephtest-utils.sh"
# Directories (depending of the context)
HOST_SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
HOST_SSH_DIR="$(readlink -f "$HOST_SCRIPT_DIR/../.ssh")"
# (re)Create ssh keys
rm -f "$HOST_SSH_DIR/$CEPH_ADMIN_USER"-id_rsa*
@@ -43,3 +40,9 @@ for NODE in $NODES; do
echo -e "Host $NODE\n\tHostname $NODE\n\tUser $CEPH_ADMIN_USER\n\tStrictHostKeyChecking no\n" >> "$HOST_SSH_DIR/$CEPH_ADMIN_USER-config"
done
chmod 644 "$HOST_SSH_DIR/$CEPH_ADMIN_USER-config"
# Clean up IP and PROVISION signals
for NODE in $NODES; do
rm -f "$HOST_SIGNAL_DIR/$NODE-IP"
rm -f "$HOST_SIGNAL_DIR/$NODE-PROVISION"
done
@@ -3,35 +3,32 @@
# Execute preflight configuration needed to deploy ceph distributed storage
#
# Written by : Denis Lambolez
# Release : 1.1
# Creation date : 04 December 2017
# Release : 2.0
# Creation date : 17 December 2017
# Description : Bash script
# This script has been designed and written on Ubuntu 16.04 plateform.
# It must be executed in privileged mode
# Usage : ./ceph-preflight.sh
# Usage : ./provision.sh
# ----------------------------------------------------------------------------------------
# ========================================================================================
#
# HISTORY :
# Release | Date | Authors | Description
# --------------+---------------+--------------- +------------------------------------------
# 2.0 | 12.17.17 | Denis Lambolez | Adding /etc/hosts modification and
# | | | synchronization with other nodes. Renamed
# | | | to provision.sh
# 1.1 | 12.04.17 | Denis Lambolez | Sourcing parameters from cephtest-utils
# 1.0 | 12.02.17 | Denis Lambolez | Creation
# | | |
# | | |
# =========================================================================================
#set -xev
# Version
VERSION=ceph-preflight-1.0-120217
VERSION=provision-2.0-121717
# This script is executed in guest context
source "/vagrant/scripts/cephtest-utils.sh"
# Directories (depending of the context)
GUEST_USER_DIR="/home/$CEPH_ADMIN_USER"
GUEST_USER_SSH_DIR="$GUEST_USER_DIR/.ssh"
GUEST_VAGRANT_SCRIPT_DIR="/vagrant/scripts"
GUEST_VAGRANT_SSH_DIR="/vagrant/.ssh"
# Make sure only root can run the script
if [[ $EUID -ne 0 ]]; then
@@ -39,8 +36,11 @@ if [[ $EUID -ne 0 ]]; then
exit 1
fi
# Create user ceph-admin
useradd -m -s /bin/bash $CEPH_ADMIN_USER
# Create user ceph-admin if not existing
cat /etc/passwd | grep $CEPH_ADMIN_USER
if [[ $? -ne 0 ]]; then
useradd -m -s /bin/bash $CEPH_ADMIN_USER
fi
# Make ceph-admin passwordless sudoer
echo "$CEPH_ADMIN_USER ALL = (root) NOPASSWD:ALL" | tee "/etc/sudoers.d/$CEPH_ADMIN_USER"
@@ -72,15 +72,15 @@ apt-get update
# vnstat for network stats, htop for system monitor and ceph-deploy
apt-get -y install chrony gdisk vnstat htop ceph-deploy
# Modify /etc/hosts to allow ceph-deploy to resolve the guest
# Need to replace the loopback address by the real address
GUEST_NAME=$(hostname -s)
# Modify /etc/hosts to allow ceph-deploy to resolve the guests
IP_ADDRESS=$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)
# Need to replace the loopback address by the real address
sed -i "s/127.0.0.1\t$GUEST_NAME\t$GUEST_NAME/$IP_ADDRESS\t$GUEST_NAME\t$GUEST_NAME/g" /etc/hosts
# Create partitions on journal disk for osd nodes only
for NODE in $OSD_NODES; do
if [[ NODE == $GUEST_NAME ]]; then
if [[ $NODE == $GUEST_NAME ]]; then
sgdisk --zap-all
sgdisk --new=0:0:10G /dev/vda > /dev/null 2>&1
sgdisk --new=0:0:20G /dev/vda > /dev/null 2>&1
sgdisk --largest-new=0 /dev/vda > /dev/null 2>&1
@@ -91,3 +91,50 @@ done
# Full update
#apt-get -y dist-upgrade
#apt-get -y autoremove
# Signal that IP is ready
echo -e "$IP_ADDRESS\t$GUEST_NAME" > "$GUEST_VAGRANT_SIGNAL_DIR/$GUEST_NAME-IP"
# Wait for all nodes IP and update /etc/hosts
TIMER_MAX=300
echo >> /etc/hosts
for NODE in $NODES; do
if [[ $NODE != $GUEST_NAME ]]; then
TIMER=0
until [[ -r "$GUEST_VAGRANT_SIGNAL_DIR/$NODE-IP" ]]; do
sleep 1
TIMER=$(($TIMER + 1))
if [[ $TIMER -gt $TIMER_MAX ]]; then
echo "Can't get IP from $NODE" >&2
exit 1
fi
done
# Remove record if existing
sed -i "/$NODE/d" /etc/hosts
# Add new record
cat "$GUEST_VAGRANT_SIGNAL_DIR/$NODE-IP" >> /etc/hosts
fi
done
# Signal that provision is done
echo "$(date --rfc-3339=ns) - Done!" | tee "$GUEST_VAGRANT_SIGNAL_DIR/$GUEST_NAME-PROVISION"
# Continue provisioning on admin node only
[[ $GUEST_NAME != $ADMIN_NODE ]] && exit 0
# Wait for all nodes to be ready
TIMER_MAX=300
for NODE in $NODES; do
TIMER=0
until [[ -r "$GUEST_VAGRANT_SIGNAL_DIR/$NODE-PROVISION" ]]; do
sleep 1
TIMER=$(($TIMER + 1))
if [[ $TIMER -gt $TIMER_MAX ]]; then
echo "Waited too long for $NODE!" >&2
exit 1
fi
done
done
# Install ceph in ceph-admin context
$CEPH_ADMIN_EXEC $GUEST_VAGRANT_SCRIPT_DIR/ceph-install.sh