Firts version with ceph-install in provisioning. Nodes synchronization is working. Nodes' iP in /etc/hosts to avoid confusion with dnsmasq.

This commit is contained in:
Denis Lambolez 2017-12-17 16:18:30 +01:00
parent 7d4af9b48b
commit 14893e7346
11 changed files with 171 additions and 84 deletions

2
.signals/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
*
!.gitignore

3
.ssh/.gitignore vendored
View File

@ -1 +1,2 @@
id_rsa
*
!.gitignore

View File

@ -1,15 +0,0 @@
Host node-admin
Hostname node-admin
User ceph-admin
StrictHostKeyChecking no
Host node-osd1
Hostname node-osd1
User ceph-admin
StrictHostKeyChecking no
Host node-osd2
Hostname node-osd2
User ceph-admin
StrictHostKeyChecking no

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAq5y73H3NbLPGsS/JRbNNHlR/DiwBilcXygw3F+1o82J5FiWR
gDB85z4M0DpndvzMvqEtKJo2trCaxHrkfqFcmp8PKzlzkcKJLcY4UDJEJSeU0KKF
pJhSFTD8ZVZaIB1vLXvwcfmcyly3OEX0HWhep/GQeQiIzxPG+eLQ3wIV7Y08Q9ze
uREd4pbpEi5G2XDJWmb6+tF234ZoyS+A7j56ItfskzWVHn4sOdMScCvomfcymVaX
B6UUJuhJsbosEPtoliF6XoIPL1eCFzqeo/nU0qk9OXT+53KVBwiSmPUYCzG4J4Br
0J6QieGWCvPRZhxfsEIFUbR52PoJFmYHHATeEQIDAQABAoIBAEG5yYls9RxIzjRW
ZPO49DRFkFPRLtXY/Vd4zjDv0F5GvLFqugsqhuvZq/akJ7Q+pfvspusCBSp7AQt3
NcP58QmXGawoWMbehsNtQ2wTlF3WhvxRAnZDkExlcoUtSV87ZrU43qIIQ7CNSe7X
e+M3celIb1irmBoXLQHPiMegeN19tUUEeHjXB2AlPrCZaco4mJ+PQhuh1uzLi4QD
blGuzNwSOYHk21xvEXm7xoe+Q30utF0xYpOjDDUAlFCJRy/UNqC+2VYzBLtgqSZA
dU20BuMpzgFokuywfjm7ftUoHtYiiUJZXDhXot9gBEN+WdC83XdT225pZfaW862J
3azDoAECgYEA4Hg4yzi3A47s82xKXiJUM5DAXcAx1jd9leePhOjhl2NEfTlVPMDZ
45BFBYhumOcgN/nrM4OqMi/hoCjcC2aDs4jG+12k7Y4MZijyrWmqc7PzJzLzG1yk
7hlsSsMH454eZ5kjfAyzHumN9/Iz5hWwrF9Fesau5mpbgHR5/ekB1pECgYEAw7fL
wZvapsIDUQOZeKZxNaDGDS/QG+BuZ5cZk3KGEIisOSv0sKMNmgs1rowPmzgZv2MF
ubVzMa5wwuYc6q4vrQFijXm9uo2IlaOGXa84XSvCWpzmH4qa8+OZAtdyXZucUQlh
JztK6plSJw57pxx1go/sJ01UGx9Xe7xXtO1RT4ECgYEAntBwXlgiJEJbI17avv9K
07D2aV4x5H4ePJE42bBq69EwA+kshW9subS5INEGbugXw4LvBeOB50A5Qjemd+pw
A/Yh0nbO2cRGwYD3MfowvuvyfSlvF0zU5CDYnYyfaEvW9zVCBeJ2WMilyWdzkLaH
6Wz+m382/48AbkH6jcrtogECgYAJO96tH8FNJNIrPFQ9UPz+Xrt5W8whxJDw6jwW
rMyOcyRM/jnpK4g0wFqkBY+m7xqn4PAgNuNccLJ5P4oVuGofa3VO33OIHa1YAgMQ
4Hd0TeA5jjV9P3jYEODjNW7745+YYyzmQPHj55/sn85NsBYLNucZY5iYPv5NpDEv
JBM2AQKBgHS5+HaCNSIziYWl7uowwxkJR4crg1KKzWqFbtR21F6cswP96k2+nXj2
PymRCMWxRugssIe+GOlcCXsopdkvGvb/mFNug9NEmtbgNKQ/2bZRXlRNz2aEOEpt
ryZn67PmOCV+ttcqna6xfBllsr2wyRrcTMfZQWZZn+gZ9bBYR4WU
-----END RSA PRIVATE KEY-----

View File

@ -1 +0,0 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrnLvcfc1ss8axL8lFs00eVH8OLAGKVxfKDDcX7WjzYnkWJZGAMHznPgzQOmd2/My+oS0omja2sJrEeuR+oVyanw8rOXORwoktxjhQMkQlJ5TQooWkmFIVMPxlVlogHW8te/Bx+ZzKXLc4RfQdaF6n8ZB5CIjPE8b54tDfAhXtjTxD3N65ER3ilukSLkbZcMlaZvr60XbfhmjJL4DuPnoi1+yTNZUefiw50xJwK+iZ9zKZVpcHpRQm6EmxuiwQ+2iWIXpegg8vV4IXOp6j+dTSqT05dP7ncpUHCJKY9RgLMbgngGvQnpCJ4ZYK89FmHF+wQgVRtHnY+gkWZgccBN4R matou@catsserver-3

39
Vagrantfile vendored
View File

@ -5,17 +5,36 @@ Vagrant.configure("2") do |config|
# Get the VagrantFile directory
vagrant_root = File.dirname(__FILE__)
# Trigger the Vagrant preflight script before uping the first VM only
# Trigger the Vagrant pre-up script before uping the first VM only
config.trigger.before :up, :vm => ["node-admin"], :append_to_path => ["#{vagrant_root}/scripts"] do
run "vagrant-preflight.sh"
run "pre-up.sh"
end
# Trigger the Vagrant post-destroy script aftre destroying the last VM only
# config.trigger.after :destroy, :vm => ["node-admin"], :append_to_path => ["#{vagrant_root}/scripts"] do
run "post-destroy.sh"
# end
# Shell provisionner for all VMs
config.vm.provision "shell", path: "scripts/ceph-preflight.sh"
config.vm.provision "shell", path: "scripts/provision.sh"
# All VMs are based on the same box
config.vm.box = "bento/ubuntu-16.04"
# Use nfs for shared folder
config.vm.synced_folder ".", "/vagrant",
nfs: true,
linux__nfs_options: ['rw','no_subtree_check','all_squash','async'],
nfs_version: 4,
nfs_udp: false
# Public host bridge
# config.vm.network "public_network",
# network_name: "public-network",
# dev: "br0",
# type: "bridge",
# mode: "bridge"
# Standard configuration for all VMs
config.vm.provider :libvirt do |libvirt|
libvirt.memory = 1024
@ -30,16 +49,17 @@ Vagrant.configure("2") do |config|
end
# osd1 VM with private cluster network
# 5 additional disks: 1 for ceph journal and 4 for osd
# 3 additional disks: 1 for journals and 2 for osd
config.vm.define "node-osd1" do |osd1|
osd1.vm.hostname = "node-osd1"
osd1.vm.network :private_network,
:type => "dhcp",
:type => "dhcp",
:mac => "52:54:00:79:1e:b0",
:libvirt__dhcp_start => "172.28.128.10",
:libvirt__dhcp_stop => "172.28.128.250",
:libvirt__network_address => "172.28.128.0",
:libvirt__netmask => "255.255.255.0",
:libvirt__network_name => "cluster-net"
:libvirt__network_name => "cluster-network"
osd1.vm.provider :libvirt do |libvirt|
libvirt.storage :file,
:size => "30G",
@ -57,12 +77,13 @@ Vagrant.configure("2") do |config|
end
# osd2 VM with private cluster network
# 5 additional disks: 1 for ceph journal and 4 for osd
# 3 additional disks: 1 for journals and 2 for osd
config.vm.define "node-osd2" do |osd2|
osd2.vm.hostname = "node-osd2"
osd2.vm.network :private_network,
:type => "dhcp",
:libvirt__network_name => "cluster-net"
:type => "dhcp",
:mac => "52:54:00:dc:51:7c",
:libvirt__network_name => "cluster-network"
osd2.vm.provider :libvirt do |libvirt|
libvirt.storage :file,
:size => "30G",

View File

@ -27,11 +27,7 @@ VERSION=ceph-install-1.0-120417
# This script is executed in guest context
source "/vagrant/scripts/cephtest-utils.sh"
# Directories (depending of the context)
GUEST_USER_DIR="/home/$CEPH_ADMIN_USER"
GUEST_USER_SSH_DIR="$GUEST_USER_DIR/.ssh"
GUEST_VAGRANT_SCRIPT_DIR="/vagrant/scripts"
GUEST_VAGRANT_SSH_DIR="/vagrant/.ssh"
# Network (dynamically defined by Vagrant)
IP_ADDRESS=$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)
PUBLIC_NETWORK=$(echo $IP_ADDRESS | awk -F '.' '{print $1"."$2"."$3".0"}')
@ -42,6 +38,12 @@ if [[ $(whoami) != $CEPH_ADMIN_USER ]]; then
exit 1
fi
# Make sure this script is run from the admin node
if [[ $(hostname -s) != $ADMIN_NODE ]]; then
echo "This script must be run from $ADMIN_NODE" 1>&2
exit 1
fi
# Cluster configuration directory
mkdir -p "$GUEST_USER_DIR/ceph-cluster"
cd "$GUEST_USER_DIR/ceph-cluster"

View File

@ -28,7 +28,7 @@ VERSION=cephtest-utils-1.0-120417
CEPH_ADMIN_USER="ceph-admin"
CEPH_ADMIN_EXEC="sudo -i -u $CEPH_ADMIN_USER"
# Machines
# Nodes
ADMIN_NODE="node-admin"
OSD_NODES="node-osd1 node-osd2"
NODES="$ADMIN_NODE $OSD_NODES"
@ -36,3 +36,18 @@ NODES="$ADMIN_NODE $OSD_NODES"
# Networks
CLUSTER_NETWORK="172.28.128.0"
# Guest name
GUEST_NAME=$(hostname -s)
# Guest directories
GUEST_USER_DIR="/home/$CEPH_ADMIN_USER"
GUEST_USER_SSH_DIR="$GUEST_USER_DIR/.ssh"
GUEST_VAGRANT_DIR="/vagrant"
GUEST_VAGRANT_SCRIPT_DIR="$GUEST_VAGRANT_DIR/scripts"
GUEST_VAGRANT_SSH_DIR="$GUEST_VAGRANT_DIR/.ssh"
GUEST_VAGRANT_SIGNAL_DIR="$GUEST_VAGRANT_DIR/.signals"
# Host directories
HOST_SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
HOST_SSH_DIR="$(readlink -f "$HOST_SCRIPT_DIR/../.ssh")"
HOST_SIGNAL_DIR="$(readlink -f "$HOST_SCRIPT_DIR/../.signals")"

39
scripts/post-destroy.sh Executable file
View File

@ -0,0 +1,39 @@
#!/bin/bash
# ========================================================================================
# Execute post-destroy cleaning
#
# Written by : Denis Lambolez
# Release : 1.0
# Creation date : 16 December 2017
# Description : Bash script
# This script has been designed and written on Ubuntu 16.04 plateform.
# It must be executed in vagrant context
# Usage : ./post-destroy.sh
# ----------------------------------------------------------------------------------------
# ========================================================================================
#
# HISTORY :
# Release | Date | Authors | Description
# --------------+---------------+--------------- +------------------------------------------
# 1.0 | 12.16.17 | Denis Lambolez | Creation
# | | |
# =========================================================================================
#set -xev
# Version
VERSION=post-destroy-1.0-121617
# This script is executed in host context
source "$(dirname "$(readlink -f "$0")")/cephtest-utils.sh"
# clean-up networks to start with fresh configuration
for NETWORK in vagrant-libvirt vagrant-private-dhcp; do
virsh net-list --all 2> /dev/null | grep $NETWORK | grep active
if [[ $? -eq 0 ]]; then
virsh net-destroy $NETWORK 2> /dev/null
fi
virsh net-list --all 2> /dev/null | grep $NETWORK
if [[ $? -eq 0 ]]; then
virsh net-undefine $NETWORK 2> /dev/null
fi
done

View File

@ -1,35 +1,32 @@
#!/bin/bash
# ========================================================================================
# Execute preflight configuration needed to deploy vagrant cluster
# Execute pre-up configuration needed to deploy vagrant cluster
#
# Written by : Denis Lambolez
# Release : 1.1
# Creation date : 04 December 2017
# Release : 2.0
# Creation date : 16 December 2017
# Description : Bash script
# This script has been designed and written on Ubuntu 16.04 plateform.
# It must be executed in vagrant context
# Usage : ./vagrant-preflight.sh
# Usage : ./pre-up.sh
# ----------------------------------------------------------------------------------------
# ========================================================================================
#
# HISTORY :
# Release | Date | Authors | Description
# --------------+---------------+--------------- +------------------------------------------
# 2.0 | 12.16.17 | Denis Lambolez | Renamed pre-up.sh and linked to post-destroy
# 1.1 | 12.04.17 | Denis Lambolez | Sourcing parameters from cephtest-utils
# 1.0 | 12.02.17 | Denis Lambolez | Creation
# | | |
# | | |
# =========================================================================================
#set -xev
# Version
VERSION=vagrant-preflight-1.0-120217
VERSION=pre-up-2.0-121617
# This script is executed in host context
source "$(dirname "$(readlink -f "$0")")/cephtest-utils.sh"
# Directories (depending of the context)
HOST_SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
HOST_SSH_DIR="$(readlink -f "$HOST_SCRIPT_DIR/../.ssh")"
# (re)Create ssh keys
rm -f "$HOST_SSH_DIR/$CEPH_ADMIN_USER"-id_rsa*
@ -43,3 +40,9 @@ for NODE in $NODES; do
echo -e "Host $NODE\n\tHostname $NODE\n\tUser $CEPH_ADMIN_USER\n\tStrictHostKeyChecking no\n" >> "$HOST_SSH_DIR/$CEPH_ADMIN_USER-config"
done
chmod 644 "$HOST_SSH_DIR/$CEPH_ADMIN_USER-config"
# Clean up IP and PROVISION signals
for NODE in $NODES; do
rm -f "$HOST_SIGNAL_DIR/$NODE-IP"
rm -f "$HOST_SIGNAL_DIR/$NODE-PROVISION"
done

View File

@ -3,35 +3,32 @@
# Execute preflight configuration needed to deploy ceph distributed storage
#
# Written by : Denis Lambolez
# Release : 1.1
# Creation date : 04 December 2017
# Release : 2.0
# Creation date : 17 December 2017
# Description : Bash script
# This script has been designed and written on Ubuntu 16.04 plateform.
# It must be executed in privileged mode
# Usage : ./ceph-preflight.sh
# Usage : ./provision.sh
# ----------------------------------------------------------------------------------------
# ========================================================================================
#
# HISTORY :
# Release | Date | Authors | Description
# --------------+---------------+--------------- +------------------------------------------
# 2.0 | 12.17.17 | Denis Lambolez | Adding /etc/hosts modification and
# | | | synchronization with other nodes. Renamed
# | | | to provision.sh
# 1.1 | 12.04.17 | Denis Lambolez | Sourcing parameters from cephtest-utils
# 1.0 | 12.02.17 | Denis Lambolez | Creation
# | | |
# | | |
# =========================================================================================
#set -xev
# Version
VERSION=ceph-preflight-1.0-120217
VERSION=provision-2.0-121717
# This script is executed in guest context
source "/vagrant/scripts/cephtest-utils.sh"
# Directories (depending of the context)
GUEST_USER_DIR="/home/$CEPH_ADMIN_USER"
GUEST_USER_SSH_DIR="$GUEST_USER_DIR/.ssh"
GUEST_VAGRANT_SCRIPT_DIR="/vagrant/scripts"
GUEST_VAGRANT_SSH_DIR="/vagrant/.ssh"
# Make sure only root can run the script
if [[ $EUID -ne 0 ]]; then
@ -39,8 +36,11 @@ if [[ $EUID -ne 0 ]]; then
exit 1
fi
# Create user ceph-admin
useradd -m -s /bin/bash $CEPH_ADMIN_USER
# Create user ceph-admin if not existing
cat /etc/passwd | grep $CEPH_ADMIN_USER
if [[ $? -ne 0 ]]; then
useradd -m -s /bin/bash $CEPH_ADMIN_USER
fi
# Make ceph-admin passwordless sudoer
echo "$CEPH_ADMIN_USER ALL = (root) NOPASSWD:ALL" | tee "/etc/sudoers.d/$CEPH_ADMIN_USER"
@ -72,15 +72,15 @@ apt-get update
# vnstat for network stats, htop for system monitor and ceph-deploy
apt-get -y install chrony gdisk vnstat htop ceph-deploy
# Modify /etc/hosts to allow ceph-deploy to resolve the guest
# Need to replace the loopback address by the real address
GUEST_NAME=$(hostname -s)
# Modify /etc/hosts to allow ceph-deploy to resolve the guests
IP_ADDRESS=$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)
# Need to replace the loopback address by the real address
sed -i "s/127.0.0.1\t$GUEST_NAME\t$GUEST_NAME/$IP_ADDRESS\t$GUEST_NAME\t$GUEST_NAME/g" /etc/hosts
# Create partitions on journal disk for osd nodes only
for NODE in $OSD_NODES; do
if [[ NODE == $GUEST_NAME ]]; then
if [[ $NODE == $GUEST_NAME ]]; then
sgdisk --zap-all
sgdisk --new=0:0:10G /dev/vda > /dev/null 2>&1
sgdisk --new=0:0:20G /dev/vda > /dev/null 2>&1
sgdisk --largest-new=0 /dev/vda > /dev/null 2>&1
@ -91,3 +91,50 @@ done
# Full update
#apt-get -y dist-upgrade
#apt-get -y autoremove
# Signal that IP is ready
echo -e "$IP_ADDRESS\t$GUEST_NAME" > "$GUEST_VAGRANT_SIGNAL_DIR/$GUEST_NAME-IP"
# Wait for all nodes IP and update /etc/hosts
TIMER_MAX=300
echo >> /etc/hosts
for NODE in $NODES; do
if [[ $NODE != $GUEST_NAME ]]; then
TIMER=0
until [[ -r "$GUEST_VAGRANT_SIGNAL_DIR/$NODE-IP" ]]; do
sleep 1
TIMER=$(($TIMER + 1))
if [[ $TIMER -gt $TIMER_MAX ]]; then
echo "Can't get IP from $NODE" >&2
exit 1
fi
done
# Remove record if existing
sed -i "/$NODE/d" /etc/hosts
# Add new record
cat "$GUEST_VAGRANT_SIGNAL_DIR/$NODE-IP" >> /etc/hosts
fi
done
# Signal that provision is done
echo "$(date --rfc-3339=ns) - Done!" | tee "$GUEST_VAGRANT_SIGNAL_DIR/$GUEST_NAME-PROVISION"
# Continue provisioning on admin node only
[[ $GUEST_NAME != $ADMIN_NODE ]] && exit 0
# Wait for all nodes to be ready
TIMER_MAX=300
for NODE in $NODES; do
TIMER=0
until [[ -r "$GUEST_VAGRANT_SIGNAL_DIR/$NODE-PROVISION" ]]; do
sleep 1
TIMER=$(($TIMER + 1))
if [[ $TIMER -gt $TIMER_MAX ]]; then
echo "Waited too long for $NODE!" >&2
exit 1
fi
done
done
# Install ceph in ceph-admin context
$CEPH_ADMIN_EXEC $GUEST_VAGRANT_SCRIPT_DIR/ceph-install.sh