Provisioning OK - Start install

This commit is contained in:
Denis Lambolez 2017-12-03 20:19:25 +01:00
parent a90e859390
commit aec00ed62f
11 changed files with 297 additions and 42 deletions

1
.ssh/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
id_rsa

15
.ssh/ceph-admin-config Normal file
View File

@ -0,0 +1,15 @@
Host node-admin
Hostname node-admin
User ceph-admin
StrictHostKeyChecking no
Host node-osd1
Hostname node-osd1
User ceph-admin
StrictHostKeyChecking no
Host node-osd2
Hostname node-osd2
User ceph-admin
StrictHostKeyChecking no

27
.ssh/ceph-admin-id_rsa Normal file
View File

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAqS/4Caje6o3Bxekv0bZM22oqtefn7hXWLLxOQJKfgHEHC24q
uimcZmFhw+ZbWvKyVC9qJbIFbdb5Lpok4RDwrbPx8JGS1nXCVgcmSbk4ssddLeHr
e7v7SP/foc2QmC4mKPYp85HNYDCXBMaMMRiwDlg4LgTIWARRQXeD1LDbnInPC+8t
Xo7PRBlkUysBxzBrQTVa9mWf5Fwix2ND4aoh7S1p4EDbcqvIb62L0iJDYsVWs8dt
2veUetNkw7ZXRb+Iz+tkxXG/I/7KZDxdcDYmtTmG1pVyQ0PAgfIfQKUYnCdAv0Pm
o+u1/UQc1+Rsj6CaM2T2ZLsn8cx3Wv8ra7UjyQIDAQABAoIBAHbeyykOS0EFRGYn
Rn/T1AA9gbC5NY1kLkzUKS7ZVdPKliaDyuVtowOT3DrluL4X0w+vGKdPgtz/s6f4
iXzQiCmjSpO51C92IYqMD2yxBJMn2+IG34raMt3BWYC3i6ZAlnMlMah+govY1+J1
Fb7RF7Gcbix3E8QzlFuo+ykO8d3zF/Qn304A5M0E1s6Za+RlQXc2NRMCI9DnmS86
hNEpr0GrotapbleTIoR+ORBOm/vfwsoB4v/IQoBphE5gU+XzNjsU9IzdryfWK5hG
pWtgA+r1NwHOIr0VCcDZgO18Z+QqVkP7v5fEtHugK2HIZnfWGjo4A+hmwlkbsGyx
PMdmC/kCgYEA4Vzi497fakHqGb4eDIHVpBo/CvihvMfJA+aay9hh+B2/0+t/08H4
l+67Cbgq1lNTVgvw0V+jn09f73f5KKJyu1puisot0NHQJTbCj5MMELkETdsk4Dqc
aOlxV5DRE/9fFi2dNbfkAkRrMJhWaghxRRqprdtur5D5KzATkewxhisCgYEAwDAO
JaPBTaW38AS+LM1CdfTTtSMNeF8a1NR262l7wS5cHU9RwONRZ9uTpAMseYLjMYmU
M3vgfj6yzQn50HSrqRMs3HZMnXEY/8OkDFItFLZRE6BxqxFWz7q7gJtOPrgDB5BF
sx18tY4OfXd/SVh94P+AILzsBzqTIWxCYK+cl9sCgYEA02+hU6dzty6WY4aSjKt3
dJpHUZTJseNDPkyqKNwftni61Cnb6SHQiUMq6kiGsETHKDDVxKpVSg9fc7383tni
iNKkacScTZG8Fl4VFj0hEcN0PVOAenui6W8zrrMo5fkvDhDHsXGRE2t7ocEbOion
k20biwkYBNIgZBucEMkoAbcCgYAZwNbEeHMYwpIpWZlaS48SW2JbLX1KmgPwbSAa
X6IzutChJNXhsRRhhGz+V2aISd/D7ZmsZHcqs6AmFaYiJuAx4d3rFPcjSdp9u2uq
MwDTe7LVnHRe5g3p/C3QB/+uK95MDysn21Wwxn5ulaq6217S3an/uREvzsuUePtU
FXQ4bQKBgCMevjftLyQ1xslHb7CiRDGreRbsuQHXUkJg/mrdZoUFmCDC+Ffj7MJK
B8rpVQ2wUFKD4fgqfZAneM8HMAKKi6Owr9BNYZag/GfQ+NvmdUlwt09fYG7+01+x
I5fE2c2u1loOxUHtGyP+r9FFRhGpFFClJoQa3gakUtn8Z2ORX7ei
-----END RSA PRIVATE KEY-----

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCpL/gJqN7qjcHF6S/Rtkzbaiq15+fuFdYsvE5Akp+AcQcLbiq6KZxmYWHD5lta8rJUL2olsgVt1vkumiThEPCts/HwkZLWdcJWByZJuTiyx10t4et7u/tI/9+hzZCYLiYo9inzkc1gMJcExowxGLAOWDguBMhYBFFBd4PUsNucic8L7y1ejs9EGWRTKwHHMGtBNVr2ZZ/kXCLHY0PhqiHtLWngQNtyq8hvrYvSIkNixVazx23a95R602TDtldFv4jP62TFcb8j/spkPF1wNia1OYbWlXJDQ8CB8h9ApRicJ0C/Q+aj67X9RBzX5GyPoJozZPZkuyfxzHda/ytrtSPJ matou@catsserver-3

2
.vault/.gitignore vendored
View File

@ -1,2 +0,0 @@
*
!.gitignore

View File

@ -1,12 +1,19 @@
# cephtest
A Vagrant libvirt kvm cluster to test Ceph distributed storage
### Cluster Design
### Pre-flight
### References
A Vagrant libvirt kvm cluster to test Ceph distributed storage.
## Cluster Design
## Pre-flight
## References
* Ceph: http://ceph.com/
* Vagrant: https://www.vagrantup.com/
* Vagrant libvirt plugin: https://github.com/vagrant-libvirt/vagrant-libvirt
* Vagrant plugins:
* vagrant-libvirt: https://github.com/vagrant-libvirt/vagrant-libvirt
* vagrant-mutate: https://github.com/sciurus/vagrant-mutate
* vagrant-triggers: https://github.com/emyl/vagrant-triggers
* Bento boxes: https://app.vagrantup.com/bento
* Libvirt: https://libvirt.org/
* KVM: https://www.linux-kvm.org/page/Main_Page

64
Vagrantfile vendored
View File

@ -2,8 +2,21 @@
Vagrant.configure("2") do |config|
config.vm.box = "bento/ubuntu-17.04"
# Get the VagrantFile directory
vagrant_root = File.dirname(__FILE__)
# Trigger the Vagrant preflight script before uping the first VM only
config.trigger.before :up, :vm => ["node-admin"], :append_to_path => ["#{vagrant_root}/scripts"] do
run "vagrant-preflight.sh"
end
# Shell provisionner for all VMs
config.vm.provision "shell", path: "scripts/ceph-preflight.sh"
# All VMs are based on the same box
config.vm.box = "bento/ubuntu-16.04"
# Standard configuration for all VMs
config.vm.provider :libvirt do |libvirt|
libvirt.memory = 1024
libvirt.volume_cache = "writeback"
@ -11,30 +24,25 @@ Vagrant.configure("2") do |config|
libvirt.video_type = "qxl"
end
config.vm.define "admin", primary: true do |admin|
admin.vm.hostname = "admin.local"
# admin VM
config.vm.define "node-admin", primary: true do |admin|
admin.vm.hostname = "node-admin"
end
config.vm.define "osd1" do |osd1|
osd1.vm.hostname = "osd1.local"
# osd1 VM with private cluster network
# 5 additional disks: 1 for ceph journal and 4 for osd
config.vm.define "node-osd1" do |osd1|
osd1.vm.hostname = "node-osd1"
osd1.vm.network :private_network,
:type => "dhcp",
:libvirt__dhcp_start => "172.28.128.10",
:libvirt__dhcp_stop => "172.28.128.250",
:libvirt__network_address => "172.28.128.0",
:libvirt__domain_name => "osd.private",
:libvirt__network_name => "osd-private"
:libvirt__network_address => "172.28.128.0",
:libvirt__netmask => "255.255.255.0",
:libvirt__network_name => "cluster-net"
osd1.vm.provider :libvirt do |libvirt|
libvirt.storage :file,
:size => "40G",
:type => "raw",
:cache => "writeback"
libvirt.storage :file,
:size => "20G",
:type => "raw",
:cache => "writeback"
libvirt.storage :file,
:size => "20G",
:size => "30G",
:type => "raw",
:cache => "writeback"
libvirt.storage :file,
@ -48,26 +56,16 @@ Vagrant.configure("2") do |config|
end
end
config.vm.define "osd2" do |osd2|
osd2.vm.hostname = "osd2.local"
# osd2 VM with private cluster network
# 5 additional disks: 1 for ceph journal and 4 for osd
config.vm.define "node-osd2" do |osd2|
osd2.vm.hostname = "node-osd2"
osd2.vm.network :private_network,
:type => "dhcp",
:libvirt__dhcp_start => "172.28.128.10",
:libvirt__dhcp_stop => "172.28.128.250",
:libvirt__network_address => "172.28.128.0",
:libvirt__domain_name => "osd.private",
:libvirt__network_name => "osd-private"
:libvirt__network_name => "cluster-net"
osd2.vm.provider :libvirt do |libvirt|
libvirt.storage :file,
:size => "40G",
:type => "raw",
:cache => "writeback"
libvirt.storage :file,
:size => "20G",
:type => "raw",
:cache => "writeback"
libvirt.storage :file,
:size => "20G",
:size => "30G",
:type => "raw",
:cache => "writeback"
libvirt.storage :file,

46
scripts/ceph-install.sh Normal file
View File

@ -0,0 +1,46 @@
#!/bin/bash
# ========================================================================================
# Execute ceph distributed storage installation steps from the admin node via ceph-deploy
#
# Written by : Denis Lambolez
# Release : 1.0
# Creation date : 04 December 2017
# Description : Bash script
# This script has been designed and written on Ubuntu 16.04 plateform.
# It must be executed in ceph-admin context, on admin node
# Usage : ./ceph-install.sh
# ----------------------------------------------------------------------------------------
# ========================================================================================
#
# HISTORY :
# Release | Date | Authors | Description
# --------------+---------------+--------------- +------------------------------------------
# 1.0 | 12.04.17 | Denis Lambolez | Creation
# | | |
# | | |
# | | |
# =========================================================================================
set -xv
# Version
VERSION=ceph-install-1.0-120417
# This script is executed in guest context
source "/vagrant/scripts/cephtest-utils.sh"
# Directories (depending of the context)
GUEST_USER_DIR="/home/$CEPH_ADMIN_USER"
GUEST_USER_SSH_DIR="$GUEST_USER_DIR/.ssh"
GUEST_VAGRANT_SCRIPT_DIR="/vagrant/scripts"
GUEST_VAGRANT_SSH_DIR="/vagrant/.ssh"
GUEST_IP=$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)
PUBLIC_NETWORK=$(echo $GUEST_IP | awk -F '.' '{print $1"."$2"."$3".0/24"}')
mkdir -p "$GUEST_USER_DIR/ceph-cluster"
cd "$GUEST_USER_DIR/ceph-cluster"
#ceph-deploy new node-admin
#echo "public network = $PUBLIC_NETWORK" >> ceph.conf
#echo "cluster network = $CLUSTER_NETWORK" >> ceph.conf
#echo "" >> ceph.conf
#echo "osd pool default size = 2" >> ceph.conf
#echo "osd pool default min size = 1" >> ceph.conf

View File

@ -1,2 +1,85 @@
#!/bin/bash
#!/bin/bash
# ========================================================================================
# Execute preflight configuration needed to deploy ceph distributed storage
#
# Written by : Denis Lambolez
# Release : 1.1
# Creation date : 04 December 2017
# Description : Bash script
# This script has been designed and written on Ubuntu 16.04 plateform.
# It must be executed in privileged mode
# Usage : ./ceph-preflight.sh
# ----------------------------------------------------------------------------------------
# ========================================================================================
#
# HISTORY :
# Release | Date | Authors | Description
# --------------+---------------+--------------- +------------------------------------------
# 1.1 | 12.04.17 | Denis Lambolez | Sourcing parameters from cephtest-utils
# 1.0 | 12.02.17 | Denis Lambolez | Creation
# | | |
# | | |
# =========================================================================================
#set -xev
# Version
VERSION=ceph-preflight-1.0-120217
# This script is executed in guest context
source "/vagrant/scripts/cephtest-utils.sh"
# Directories (depending of the context)
GUEST_USER_DIR="/home/$CEPH_ADMIN_USER"
GUEST_USER_SSH_DIR="$GUEST_USER_DIR/.ssh"
GUEST_VAGRANT_SCRIPT_DIR="/vagrant/scripts"
GUEST_VAGRANT_SSH_DIR="/vagrant/.ssh"
# Make sure only root can run the script
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root" 1>&2
exit 1
fi
# Create user ceph-admin
useradd -m -s /bin/bash $CEPH_ADMIN_USER
# Clean-up previous provisioning
$CEPH_ADMIN_EXEC rm -f "$GUEST_USER_DIR/provision-ok"
# Make ceph-admin passwordless sudoer
echo "$CEPH_ADMIN_USER ALL = (root) NOPASSWD:ALL" | tee "/etc/sudoers.d/$CEPH_ADMIN_USER"
chmod 0440 "/etc/sudoers.d/$CEPH_ADMIN_USER"
# Copy ceph-admin ssh keys and ssh config from Vagrant synced folder (muste be created by vagrant-preflight script)
$CEPH_ADMIN_EXEC mkdir -p "$GUEST_USER_SSH_DIR"
$CEPH_ADMIN_EXEC chmod 700 "$GUEST_USER_SSH_DIR"
for FILE in id_rsa id_rsa.pub config; do
$CEPH_ADMIN_EXEC rm -f "$GUEST_USER_SSH_DIR/$FILE"
$CEPH_ADMIN_EXEC cp "$GUEST_VAGRANT_SSH_DIR/$CEPH_ADMIN_USER-$FILE" "$GUEST_USER_SSH_DIR/$FILE"
$CEPH_ADMIN_EXEC chmod 644 "$GUEST_USER_SSH_DIR/$FILE"
done
$CEPH_ADMIN_EXEC chmod 600 "$GUEST_USER_SSH_DIR/id_rsa"
# Copy ceph-admin public key in authorized_keys
$CEPH_ADMIN_EXEC rm -f "$GUEST_USER_SSH_DIR/authorized_keys"
$CEPH_ADMIN_EXEC cp "$GUEST_VAGRANT_SSH_DIR/$CEPH_ADMIN_USER-id_rsa.pub" "$GUEST_USER_SSH_DIR/authorized_keys"
$CEPH_ADMIN_EXEC chmod 644 "$GUEST_USER_SSH_DIR/authorized_keys"
# Make debconf non interactive
export DEBIAN_FRONTEND=noninteractive
# Install ceph repository
wget -q -O- 'https://download.ceph.com/keys/release.asc' | apt-key add -
echo deb https://download.ceph.com/debian/ $(lsb_release -sc) main | tee /etc/apt/sources.list.d/ceph.list
apt-get update
# Install chrony (time synchronization) and ceph-deploy
apt-get -y install chrony ceph-deploy
# Modify /etc/hosts to allow ceph-deploy to resolve the guest
# Need to replace the loopback address by the real address
GUEST_NAME=$(hostname -s)
GUEST_IP=$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)
sed -i "s/127.0.0.1\t$GUEST_NAME\t$GUEST_NAME/$GUEST_IP\t$GUEST_NAME\t$GUEST_NAME/g" /etc/hosts
# Full update
#apt-get -y dist-upgrade
#apt-get -y autoremove

34
scripts/cephtest-utils.sh Normal file
View File

@ -0,0 +1,34 @@
#!/bin/bash
# ========================================================================================
# Define parameters for creation of the cephtest vagrant cluster
#
# Written by : Denis Lambolez
# Release : 1.0
# Creation date : 04 December 2017
# Description : Bash script
# This script has been designed and written on Ubuntu 16.04 plateform.
# It's expected to be sourced by other scripts
# Usage : ./ceph-install.sh
# ----------------------------------------------------------------------------------------
# ========================================================================================
#
# HISTORY :
# Release | Date | Authors | Description
# --------------+---------------+--------------- +------------------------------------------
# 1.0 | 12.04.17 | Denis Lambolez | Creation
# | | |
# | | |
# | | |
# =========================================================================================
# Version
VERSION=cephtest-utils-1.0-120417
# Ceph user
CEPH_ADMIN_USER="ceph-admin"
CEPH_ADMIN_EXEC="sudo -i -u $CEPH_ADMIN_USER"
# Machines
NODES="node-admin node-osd1 node-osd2"
ADMIN_NODE="node-admin"

View File

@ -0,0 +1,45 @@
#!/bin/bash
# ========================================================================================
# Execute preflight configuration needed to deploy vagrant cluster
#
# Written by : Denis Lambolez
# Release : 1.1
# Creation date : 04 December 2017
# Description : Bash script
# This script has been designed and written on Ubuntu 16.04 plateform.
# It must be executed in vagrant context
# Usage : ./vagrant-preflight.sh
# ----------------------------------------------------------------------------------------
# ========================================================================================
#
# HISTORY :
# Release | Date | Authors | Description
# --------------+---------------+--------------- +------------------------------------------
# 1.1 | 12.04.17 | Denis Lambolez | Sourcing parameters from cephtest-utils
# 1.0 | 12.02.17 | Denis Lambolez | Creation
# | | |
# | | |
# =========================================================================================
#set -xev
# Version
VERSION=vagrant-preflight-1.0-120217
# This script is executed in host context
source "$(dirname "$(readlink -f "$0")")/cephtest-utils.sh"
# Directories (depending of the context)
HOST_SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
HOST_SSH_DIR="$(readlink -f "$HOST_SCRIPT_DIR/../.ssh")"
# (re)Create ssh keys
rm -f "$HOST_SSH_DIR/$CEPH_ADMIN_USER"-id_rsa*
ssh-keygen -q -N "" -f "$HOST_SSH_DIR/$CEPH_ADMIN_USER-id_rsa"
chmod 644 "$HOST_SSH_DIR/$CEPH_ADMIN_USER-id_rsa"
chmod 644 "$HOST_SSH_DIR/$CEPH_ADMIN_USER-id_rsa.pub"
# (re)Create ssh config file
rm -f "$HOST_SSH_DIR/$CEPH_ADMIN_USER-config"
for NODE in $NODES; do
echo -e "Host $NODE\n\tHostname $NODE\n\tUser $CEPH_ADMIN_USER\n\tStrictHostKeyChecking no\n" >> "$HOST_SSH_DIR/$CEPH_ADMIN_USER-config"
done
chmod 644 "$HOST_SSH_DIR/$CEPH_ADMIN_USER-config"