First version for production
This commit is contained in:
parent
4fa6d90a6b
commit
a6bd8a9c55
|
@ -57,16 +57,16 @@ Vagrant.configure("2") do |config|
|
|||
:libvirt__netmask => "255.255.255.0",
|
||||
:libvirt__network_name => "cluster-network"
|
||||
osd1.vm.provider :libvirt do |libvirt|
|
||||
libvirt.storage :file,
|
||||
:size => "30G",
|
||||
:type => "raw",
|
||||
:cache => "writeback"
|
||||
libvirt.storage :file,
|
||||
:size => "20G",
|
||||
:type => "raw",
|
||||
:cache => "writeback"
|
||||
libvirt.storage :file,
|
||||
:size => "20G",
|
||||
:size => "40G",
|
||||
:type => "raw",
|
||||
:cache => "writeback"
|
||||
libvirt.storage :file,
|
||||
:size => "40G",
|
||||
:type => "raw",
|
||||
:cache => "writeback"
|
||||
end
|
||||
|
@ -81,16 +81,16 @@ Vagrant.configure("2") do |config|
|
|||
:mac => "52:54:00:dc:51:7c",
|
||||
:libvirt__network_name => "cluster-network"
|
||||
osd2.vm.provider :libvirt do |libvirt|
|
||||
libvirt.storage :file,
|
||||
:size => "30G",
|
||||
:type => "raw",
|
||||
:cache => "writeback"
|
||||
libvirt.storage :file,
|
||||
:size => "20G",
|
||||
:type => "raw",
|
||||
:cache => "writeback"
|
||||
libvirt.storage :file,
|
||||
:size => "20G",
|
||||
:size => "40G",
|
||||
:type => "raw",
|
||||
:cache => "writeback"
|
||||
libvirt.storage :file,
|
||||
:size => "40G",
|
||||
:type => "raw",
|
||||
:cache => "writeback"
|
||||
end
|
||||
|
|
|
@ -39,6 +39,7 @@ IP_ADDRESS=$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)
|
|||
PUBLIC_NETWORK=$(echo $IP_ADDRESS | awk -F '.' '{print $1"."$2"."$3".0/24"}')
|
||||
|
||||
# Cluster configuration directory
|
||||
$OUTPUT_LOG "Create new cluster configuration"
|
||||
mkdir -p "$GUEST_USER_DIR/ceph-cluster"
|
||||
cd "$GUEST_USER_DIR/ceph-cluster"
|
||||
|
||||
|
@ -46,18 +47,19 @@ cd "$GUEST_USER_DIR/ceph-cluster"
|
|||
ceph-deploy new $ADMIN_NODE
|
||||
|
||||
# Initialize cluster configuration
|
||||
cat << CLUSTERCONFIG >> ceph.conf
|
||||
$OUTPUT_LOG "Initialize cluster configuration"
|
||||
cat << CLUSTERCONFIG | tee -a ceph.conf
|
||||
public network = $PUBLIC_NETWORK
|
||||
cluster network = $CLUSTER_NETWORK
|
||||
|
||||
osd pool default size = 2
|
||||
osd pool default min size = 1
|
||||
osd pool default pg num = 256
|
||||
osd pool default pgp num = 256
|
||||
osd pool default pg num = 128
|
||||
osd pool default pgp num = 128
|
||||
CLUSTERCONFIG
|
||||
|
||||
for NODE in $OSD_NODES; do
|
||||
cat << CLUSTERCONFIG >> ceph.conf
|
||||
cat << CLUSTERCONFIG | tee -a ceph.conf
|
||||
|
||||
[mds.$NODE]
|
||||
mds standby replay = true
|
||||
|
@ -65,29 +67,39 @@ mds standby for rank = 0
|
|||
CLUSTERCONFIG
|
||||
done
|
||||
|
||||
# Install ceph on all nodes
|
||||
# Deploy ceph on all nodes
|
||||
$OUTPUT_LOG "Deploy ceph on all nodes"
|
||||
ceph-deploy install --release luminous $NODES
|
||||
|
||||
# Create initial monitor
|
||||
$OUTPUT_LOG "Create initial monitor"
|
||||
ceph-deploy --overwrite-conf mon create-initial
|
||||
|
||||
# Deploy configuration file and client keys
|
||||
$OUTPUT_LOG "Deploy configuration file and client keys on all nodes"
|
||||
ceph-deploy admin $NODES
|
||||
|
||||
# Add monitor on osd nodes
|
||||
ceph-deploy mon add $OSD_NODES
|
||||
for NODE in $OSD_NODES; do
|
||||
$OUTPUT_LOG "Add monitor on $NODE"
|
||||
ceph-deploy mon add $NODE
|
||||
done
|
||||
|
||||
# Create manager on all nodes
|
||||
$OUTPUT_LOG "Create manager on all nodes"
|
||||
ceph-deploy mgr create $NODES
|
||||
|
||||
# Create metadata server on osd nodes
|
||||
$OUTPUT_LOG "Create metadata server on osd nodes"
|
||||
ceph-deploy mds create $OSD_NODES
|
||||
|
||||
# For each osd node, gather keys from admin node and create OSDs
|
||||
for NODE in $OSD_NODES; do
|
||||
$OUTPUT_LOG "Gather keys on $NODE"
|
||||
ssh $NODE ceph-deploy gatherkeys $ADMIN_NODE
|
||||
ssh $NODE sudo cp /home/$CEPH_ADMIN_USER/ceph.bootstrap-osd.keyring /var/lib/ceph/bootstrap-osd/ceph.keyring
|
||||
ssh $NODE sudo chown ceph:ceph /var/lib/ceph/bootstrap-osd/ceph.keyring
|
||||
$OUTPUT_LOG "Create OSDs on $NODE"
|
||||
ssh $NODE sudo ceph-volume lvm create --filestore --data /dev/vdb --journal /dev/vda1
|
||||
ssh $NODE sudo ceph-volume lvm create --filestore --data /dev/vdc --journal /dev/vda2
|
||||
done;
|
||||
|
@ -95,3 +107,5 @@ done;
|
|||
# wait 10 seconds and get cluster status
|
||||
sleep 10
|
||||
sudo ceph -s
|
||||
|
||||
$OUTPUT_LOG "Ceph cluster is now ready for pool creation and other activities.\nThanks for waiting. Enjoy!"
|
||||
|
|
|
@ -24,6 +24,15 @@
|
|||
# Version
|
||||
VERSION=cephtest-utils-1.0-120417
|
||||
|
||||
# Cluster name
|
||||
CLUSTER_NAME="cephtest"
|
||||
|
||||
# Script name
|
||||
SCRIPT=$(basename --suffix=.sh "$0")
|
||||
|
||||
# Define log output
|
||||
OUTPUT_LOG="echo -e \n{$CLUSTER_NAME} {$SCRIPT} "
|
||||
|
||||
# Ceph user
|
||||
CEPH_ADMIN_USER="ceph-admin"
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@ VERSION=pre-up-2.0-121617
|
|||
source "$(dirname "$(readlink -f "$0")")/cephtest-utils.sh"
|
||||
|
||||
# (re)Create ssh keys
|
||||
$OUTPUT_LOG "Create SSH keys and config for ceph admin user"
|
||||
rm -f "$HOST_SSH_DIR/$CEPH_ADMIN_USER"-id_rsa*
|
||||
ssh-keygen -q -N "" -f "$HOST_SSH_DIR/$CEPH_ADMIN_USER-id_rsa"
|
||||
chmod 644 "$HOST_SSH_DIR/$CEPH_ADMIN_USER-id_rsa"
|
||||
|
@ -37,11 +38,12 @@ chmod 644 "$HOST_SSH_DIR/$CEPH_ADMIN_USER-id_rsa.pub"
|
|||
# (re)Create ssh config file
|
||||
rm -f "$HOST_SSH_DIR/$CEPH_ADMIN_USER-config"
|
||||
for NODE in $NODES; do
|
||||
echo -e "Host $NODE\n\tHostname $NODE\n\tUser $CEPH_ADMIN_USER\n\tStrictHostKeyChecking no\n" >> "$HOST_SSH_DIR/$CEPH_ADMIN_USER-config"
|
||||
echo -e "Host $NODE\n\tHostname $NODE\n\tUser $CEPH_ADMIN_USER\n\tStrictHostKeyChecking no\n" | tee -a "$HOST_SSH_DIR/$CEPH_ADMIN_USER-config"
|
||||
done
|
||||
chmod 644 "$HOST_SSH_DIR/$CEPH_ADMIN_USER-config"
|
||||
|
||||
# Clean up IP and PROVISION signals
|
||||
$OUTPUT_LOG "Clean up IP and PROVISION signals"
|
||||
for NODE in $NODES; do
|
||||
rm -f "$HOST_SIGNAL_DIR/$NODE-IP"
|
||||
rm -f "$HOST_SIGNAL_DIR/$NODE-PROVISION"
|
||||
|
|
|
@ -34,6 +34,7 @@ if [[ $(hostname -s) != $ADMIN_NODE ]]; then
|
|||
fi
|
||||
|
||||
# Wait for all nodes to be ready
|
||||
$OUTPUT_LOG "Wait of all nodes ready, with provision done"
|
||||
TIMER_MAX=300
|
||||
for NODE in $NODES; do
|
||||
TIMER=0
|
||||
|
|
|
@ -42,39 +42,58 @@ export DEBIAN_FRONTEND=noninteractive
|
|||
locale-gen fr_FR.UTF-8
|
||||
|
||||
# Install ceph repository (luminous version)
|
||||
$OUTPUT_LOG "Install ceph repository (luminous version)"
|
||||
wget -q -O- 'https://download.ceph.com/keys/release.asc' | apt-key add -
|
||||
echo deb https://download.ceph.com/debian-luminous/ $(lsb_release -sc) main | tee /etc/apt/sources.list.d/ceph.list
|
||||
apt-get update
|
||||
|
||||
# Install chrony for time synchronization, gdisk for GPT partitioning,
|
||||
# vnstat for network stats, htop for system monitor and ceph-deploy
|
||||
$OUTPUT_LOG "Install chrony, gdisk, vnstat, htop, ceph-deploy"
|
||||
apt-get -y install chrony gdisk vnstat htop ceph-deploy
|
||||
|
||||
# Configure chrony with admin node as server and osd nodes as clients
|
||||
# Update chronyc password
|
||||
$OUTPUT_LOG "Configure chrony with admin node as server and osd nodes as clients"
|
||||
echo "1 chrony" > /etc/chrony/chrony.keys
|
||||
if [[ $GUEST_NAME == $ADMIN_NODE ]]; then
|
||||
sed -i "s/#local stratum/local stratum/g" /etc/chrony/chrony.conf
|
||||
sed -i "s/#allow 10\/8/allow 192.168\/16/g" /etc/chrony/chrony.conf
|
||||
else
|
||||
sed -i "s/pool/server $ADMIN_NODE\n#pool/" /etc/chrony/chrony.conf
|
||||
fi
|
||||
# Restart chrony with new config
|
||||
systemctl restart chrony
|
||||
|
||||
# Full update
|
||||
#apt-get -y dist-upgrade
|
||||
#apt-get -y autoremove
|
||||
|
||||
# Create partitions on journal disk for osd nodes only
|
||||
$OUTPUT_LOG "Create partitions on journal disk for osd nodes"
|
||||
for NODE in $OSD_NODES; do
|
||||
if [[ $NODE == $GUEST_NAME ]]; then
|
||||
$OUTPUT_LOG "Create partitions on $NODE"
|
||||
sgdisk --zap-all
|
||||
sgdisk --new=0:0:10G /dev/vda > /dev/null 2>&1
|
||||
sgdisk --new=0:0:20G /dev/vda > /dev/null 2>&1
|
||||
sgdisk --largest-new=0 /dev/vda > /dev/null 2>&1
|
||||
sgdisk --print /dev/vda
|
||||
fi
|
||||
done
|
||||
|
||||
# Modify /etc/hosts to allow ceph-deploy to resolve the guests
|
||||
# Modify /etc/hosts to allow ceph-deploy to resolve the guest
|
||||
IP_ADDRESS=$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)
|
||||
# Need to replace the loopback address by the real address
|
||||
$OUTPUT_LOG "Modify /etc/hosts to allow ceph-deploy to resolve the guest"
|
||||
sed -i "s/127.0.0.1\t$GUEST_NAME\t$GUEST_NAME/$IP_ADDRESS\t$GUEST_NAME\t$GUEST_NAME/g" /etc/hosts
|
||||
echo >> /etc/hosts
|
||||
|
||||
# Signal that IP is ready
|
||||
echo -e "$IP_ADDRESS\t$GUEST_NAME" > "$GUEST_VAGRANT_SIGNAL_DIR/$GUEST_NAME-IP"
|
||||
$OUTPUT_LOG "Signal that IP is ready"
|
||||
echo -e "$IP_ADDRESS\t$GUEST_NAME" | tee "$GUEST_VAGRANT_SIGNAL_DIR/$GUEST_NAME-IP"
|
||||
|
||||
# Wait for all nodes IP and update /etc/hosts
|
||||
$OUTPUT_LOG "Wait for all nodes IP and update /etc/hosts"
|
||||
TIMER_MAX=300
|
||||
for NODE in $NODES; do
|
||||
if [[ $NODE != $GUEST_NAME ]]; then
|
||||
|
@ -90,11 +109,13 @@ for NODE in $NODES; do
|
|||
# Remove record if existing
|
||||
sed -i "/$NODE/d" /etc/hosts
|
||||
# Add new record
|
||||
cat "$GUEST_VAGRANT_SIGNAL_DIR/$NODE-IP" >> /etc/hosts
|
||||
$OUTPUT_LOG "Add IP for $NODE"
|
||||
cat "$GUEST_VAGRANT_SIGNAL_DIR/$NODE-IP" | tee -a /etc/hosts
|
||||
fi
|
||||
done
|
||||
|
||||
# Create user ceph-admin if not existing
|
||||
$OUTPUT_LOG "Create user ceph-admin if not existing and make it paswordless sudoer"
|
||||
cat /etc/passwd | grep $CEPH_ADMIN_USER || useradd -m -s /bin/bash $CEPH_ADMIN_USER
|
||||
|
||||
# Make ceph-admin passwordless sudoer
|
||||
|
@ -104,8 +125,9 @@ chmod 0440 "/etc/sudoers.d/$CEPH_ADMIN_USER"
|
|||
# Copy ceph-admin ssh keys and ssh config from Vagrant folder
|
||||
# Keys must be created by pre-up script
|
||||
# Executed in ceph admin context
|
||||
$OUTPUT_LOG "Copy ssh keys, config and authorized keys"
|
||||
$OUTPUT_LOG "Switch to $CEPH_ADMIN_USER context"
|
||||
sudo -i -u $CEPH_ADMIN_USER << CEPHADMINBLOCK
|
||||
echo "Switch to $CEPH_ADMIN_USER context"
|
||||
mkdir -p $GUEST_USER_SSH_DIR
|
||||
chmod 700 $GUEST_USER_SSH_DIR
|
||||
cd $GUEST_USER_SSH_DIR
|
||||
|
@ -117,7 +139,8 @@ sudo -i -u $CEPH_ADMIN_USER <<CEPHADMINBLOCK
|
|||
chmod 644 id_rsa.pub config authorized_keys
|
||||
chmod 600 id_rsa
|
||||
CEPHADMINBLOCK
|
||||
echo "Switch to $(whoami) context"
|
||||
$OUTPUT_LOG "Switch to $(whoami) context"
|
||||
|
||||
# Signal that provision is done
|
||||
$OUTPUT_LOG "Signal that provision is done"
|
||||
echo "$(date --rfc-3339=ns) - Done!" | tee "$GUEST_VAGRANT_SIGNAL_DIR/$GUEST_NAME-PROVISION"
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
Command being timed: "vagrant up"
|
||||
User time (seconds): 4.97
|
||||
System time (seconds): 0.68
|
||||
Percent of CPU this job got: 0%
|
||||
Elapsed (wall clock) time (h:mm:ss or m:ss): 26:11.21
|
||||
Average shared text size (kbytes): 0
|
||||
Average unshared data size (kbytes): 0
|
||||
Average stack size (kbytes): 0
|
||||
Average total size (kbytes): 0
|
||||
Maximum resident set size (kbytes): 123304
|
||||
Average resident set size (kbytes): 0
|
||||
Major (requiring I/O) page faults: 6
|
||||
Minor (reclaiming a frame) page faults: 168409
|
||||
Voluntary context switches: 15839
|
||||
Involuntary context switches: 9104
|
||||
Swaps: 0
|
||||
File system inputs: 184
|
||||
File system outputs: 15888
|
||||
Socket messages sent: 0
|
||||
Socket messages received: 0
|
||||
Signals delivered: 0
|
||||
Page size (bytes): 4096
|
||||
Exit status: 0
|
Loading…
Reference in New Issue