Commit 176b7420 authored by Rafael Monnerat's avatar Rafael Monnerat

Added first official ansible playbooks for slapos.

parent 39e07b73
- name: a play that runs entirely on the ansible host
hosts: 127.0.0.1
connection: local
roles:
- erp5-standalone
- name: a play that runs entirely on the ansible host
hosts: 127.0.0.1
connection: local
vars_files:
- settings/gnet.yml
vars_prompt:
- name: "re6sttoken"
prompt: "If you have re6st token if you have (ignore if you already have a configured re6st):"
private: no
default: "notoken"
- name: "computer_name"
prompt: "What is this computer name? (ignore if you already have a configured re6st):"
private: no
default: "noname"
roles:
- re6stnet
- { role: package, package_name: ntp, package_state: present }
- name: a play that runs entirely on the ansible host
hosts: 127.0.0.1
connection: local
vars_files:
- settings/gnet.yml
vars_prompt:
- name: "re6sttoken"
prompt: "If you have re6st token if you have (ignore if you already have a configured re6st):"
private: no
default: "notoken"
- name: "computer_name"
prompt: "What is this computer name? (ignore if you already have a configured re6st and slapos):"
private: no
default: "noname"
- name: "slapostoken"
prompt: "If you have slapos token if you have (ignore if you already have a configured slapos):"
private: no
default: "notoken"
roles:
- re6stnet
- slapos
- { role: package, package_name: ntp, package_state: present }
---
base_open_build_url: http://download.opensuse.org/repositories/home:/VIFIBnexedi/
is_ubuntu: "'{{ ansible_distribution }}' == 'Ubuntu'"
is_debian: "'{{ ansible_distribution }}' == 'Debian'"
is_debian_or_ubuntu: "'{{ ansible_distribution }}' in ['Ubuntu', 'Debian']"
is_centos: "'{{ ansible_distribution }}' == 'CentOS'"
is_ubuntu_precise: "{{ is_ubuntu }} and '{{ ansible_distribution_release }}' == 'precise'"
is_ubuntu_raring: "{{ is_ubuntu }} and '{{ ansible_distribution_release }}' == 'raring'"
is_ubuntu_trusty: "{{ is_ubuntu }} and '{{ ansible_distribution_release }}' == 'trusty'"
is_redhat: "{{ ansible_os_family }} == 'RedHat'"
[targets]
localhost ansible_connection=local
- name: a play that runs entirely on the ansible host
hosts: 127.0.0.1
connection: local
vars_files:
- settings/imt.yml
vars_prompt:
- name: "re6sttoken"
prompt: "If you have re6st token if you have (ignore if you already have a configured re6st running):"
private: no
default: "notoken"
- name: "computer_name"
prompt: "What is this computer name? (ignore if you already have a configured re6st running):"
private: no
default: "noname"
roles:
- re6stnet
- name: a play that runs entirely on the ansible host
hosts: 127.0.0.1
connection: local
roles:
- { role: package, package_name: ntp, package_state: latest }
- { role: package, package_name: re6st-node, package_state: latest }
- { role: package, package_name: slapos-node, package_state: latest }
- name: a play that runs entirely on the ansible host
hosts: 127.0.0.1
connection: local
vars_files:
- settings/imt.yml
vars_prompt:
- name: "re6sttoken"
prompt: "If you have re6st token if you have (ignore if you already have a configured re6st):"
private: no
default: "notoken"
- name: "computer_name"
prompt: "What is this computer name? (ignore if you already have a configured re6st and slapos):"
private: no
default: "noname"
- name: "slapostoken"
prompt: "If you have slapos token if you have (ignore if you already have a configured slapos):"
private: no
default: "notoken"
roles:
- re6stnet
- slapos
- { role: package, package_name: ntp, package_state: present }
- name: a play that runs entirely on the ansible host
hosts: 127.0.0.1
connection: local
roles:
- vm-bootstrap
- ntp
- { role: vm-disks, vd_disk: b, data_n: 1 }
- { role: vm-disks, vd_disk: c, data_n: 2 }
- { role: vm-disks, vd_disk: d, data_n: 3 }
- { role: vm-disks, vd_disk: e, data_n: 4 }
- { role: vm-disks, vd_disk: f, data_n: 5 }
- { role: vm-disks, vd_disk: g, data_n: 6 }
- { role: vm-disks, vd_disk: h, data_n: 7 }
- { role: vm-disks, vd_disk: i, data_n: 8 }
- { role: vm-disks, vd_disk: j, data_n: 9 }
- { role: vm-disks, vd_disk: k, data_n: 10 }
- { role: vm-disks, vd_disk: l, data_n: 11 }
- { role: vm-disks, vd_disk: m, data_n: 12 }
- { role: vm-disks, vd_disk: n, data_n: 12 }
- { role: vm-disks, vd_disk: o, data_n: 12 }
- { role: vm-disks, vd_disk: p, data_n: 12 }
- { role: vm-disks, vd_disk: q, data_n: 12 }
- { role: vm-disks, vd_disk: r, data_n: 12 }
- { role: vm-disks, vd_disk: s, data_n: 12 }
- { role: vm-disks, vd_disk: t, data_n: 12 }
- { role: vm-disks, vd_disk: u, data_n: 12 }
- name: a play that runs entirely on the ansible host
hosts: 127.0.0.1
connection: local
roles:
- vm-bootstrap
- ntp
- vm-cloudera-manager
- name: a play that runs entirely on the ansible host
hosts: 127.0.0.1
connection: local
vars:
- playbook_file: https://lab.nexedi.cn/rafael/slapos.playbook/repository/archive.tar.gz?ref=master
- base_path: .
pre_tasks:
- name: Create script structure
command: mkdir -p {{ base_path }}/{{ item }}
with_items:
- install/gnet/
- install/imt/
- name: generate scripts
template: src=roles/install-script/templates/base_setup.j2 dest={{ base_path }}/install/base-setup mode=0666
- name: Register base-setup md5
stat: path={{ base_path }}/install/base-setup
register: base_setup
- name: generate scripts
template: src=roles/install-script/templates/dev.j2 dest={{ base_path }}/install/devbook mode=0666
roles:
- { role: "install-script", playbook_yml: "slapos.yml", script_path: "install/slapos" }
- { role: "install-script", playbook_yml: "re6stnet.yml", script_path: "install/re6st"}
- { role: "install-script", playbook_yml: "vifib.yml", script_path: "install/vifib"}
- { role: "install-script", playbook_yml: "erp5-standalone.yml", script_path: "install/erp5-standalone"}
- { role: "install-script", playbook_yml: "slapos-test-node.yml", script_path: "install/slapos-test-node"}
- { role: "install-script", playbook_yml: "gnet-re6stnet.yml", script_path: "install/gnet/re6st"}
- { role: "install-script", playbook_yml: "gnet-server.yml", script_path: "install/gnet/slapos"}
- { role: "install-script", playbook_yml: "imt-server-update.yml", script_path: "install/imt/slapos-update"}
- { role: "install-script", playbook_yml: "imt-server.yml", script_path: "install/imt/slapos"}
- { role: "install-script", playbook_yml: "imt-vm-bootstrap.yml", script_path: "install/imt/vm-bootstrap"}
- { role: "install-script", playbook_yml: "imt-vm-cloudera-manager.yml", script_path: "install/imt/vm-cloudera-manager"}
- { role: "install-script", playbook_yml: "imt-re6stnet.yml", script_path: "install/imt/re6st"}
- name: a play that runs entirely on the ansible host
hosts: 127.0.0.1
connection: local
vars_files:
- settings/vifib.yml
vars_prompt:
- name: "computer_name"
prompt: "What is this computer name? (ignore if you already have a configured re6st and slapos):"
private: no
default: "noname"
roles:
- re6stnet-legacy
- { role: package, package_name: ntp, package_state: present }
#!/usr/bin/python2.7
import os
import sys
import subprocess
import glob
import time
import getopt
import sqlite3
from xml.dom import minidom
#from slapos.proxy.db_version import DB_VERSION
import json
def fmt_date():
return time.strftime("%Y%m%d")
def get_connection_information():
conn = sqlite3.connect("/opt/slapos/slapproxy.db")
cur = conn.cursor()
qry = cur.execute("SELECT connection_xml FROM partition11 WHERE connection_xml IS NOT NULL AND software_type='create-erp5-site'")
for row in qry:
xml = str(row[0])
break
instance = minidom.parseString(xml)
try:
el = instance.getElementsByTagName('parameter')[0]
value = el.childNodes[0].nodeValue
json_text = json.loads(value)
return (json_text['family-admin'], json_text['inituser-password'])
except Exception, e:
print e
print "empty"
return (None, None)
def check_tables():
conn = sqlite3.connect("/opt/slapos/slapproxy.db")
cur = conn.cursor()
qry = cur.execut("SELECT CASE WHEN tbl_name = 'partition11' THEN 1 ELSE 0 END FROM sqlite_master WHERE tbl_name = 'partition11' AND type = 'table'")
if qry:
pass
else:
print "tables aren't ready yet, your build may have failed, check logs in /opt/slapos/log/"
sys.exit(0)
def get_build_status():
try:
f = open("/opt/slapos/log/slapos-node-software-" + fmt_date() + ".log")
except:
f = open("/opt/slapos/log/slapos-node-software.log")
lines = f.readlines()
if "Finished software releases" not in lines[-1]:
return False
if "ERROR" in lines[-3]:
return "err"
return True
# Check if the last two lines show the software finished building.
# If an error came just before this, we'll report failure.
# Otherwise it passed and we can move on.
# We want to open today's log, as it is most up to date
def status():
build = get_build_status()
if build:
zope_ip, pw = get_connection_information()
print ("Build successful, connect to:\n"
" " + zope_ip + " with\n"
" username: zope password: " + pw)
elif not build:
print "Your software is still building, be patient it can take awhile"
elif build == "err":
print "An error occurred while building, check /opt/slapos/log/slapos-node-software-" + \
fmt_date() + ".log for details"
def info():
if get_build_status():
print get_connection_information()
else:
print "Information unavailable at this time, run " + sys.argv[0] + " -s for details"
def usage():
print ("Get the status and information of your ERP5 build\n"
"Usage:")
print (" --help (-h): Print this message and exit\n"
" --status (-s): Print the status of the build\n"
" --info (-i): Print the partition tables\n"
" --dump (-d): Dump the entire database (alias for slapos proxy show)\n")
def dump():
subprocess.call(["slapos", "proxy", "show", "-u", "/opt/slapos/slapproxy.db"])
def main(argv):
# parse command line options
try:
opts, args = getopt.getopt(argv, "sihd", ["status", "info", "help", "dump"])
except getopt.error, msg:
usage()
sys.exit(2)
# process arguments
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-s", "--status"):
check_tables()
status()
elif opt in ("-i", "--info"):
check_tables()
info()
elif opt in ("-d", "--dump"):
dump()
if __name__ == "__main__":
main(sys.argv[1:])
#!/bin/bash
# Reruns the ansible playbook, does nothing else
PLAYBOOK_ROOT=/opt/slapos.playbook/playbook/
PLAYBOOK_FILE=erp5-standalone.yml
cd $PLAYBOOK_ROOT # cd into the playbook directory
echo "Starting Ansible playbook:"
ansible-playbook $PLAYBOOK_FILE -i hosts --connection=local
import json
software_url = 'http://git.erp5.org/gitweb/slapos.git/blob_plain/refs/heads/erp5-cluster:/software/erp5/software.cfg'
# Choose a SlapOS Node
# If you are deploying SlapOS Master with Webrunner, then computer_id is 'slaprunner'
computer_id = 'local_computer'
parameter_dict = {
"timezone": "UTC",
"site-id": "erp5",
"bt5": "erp5_full_text_myisam_catalog erp5_configurator_standard",
"zope-partition-dict": {
"admin": {
"family": "admin",
"thread-amount": 4,
"port-base": 2220,
"instance-count": 1
},
"activities-node": {
"family": "activities",
"thread-amount": 4,
"instance-count": 1,
"timerserver-interval": 1,
"port-base": 2230
},
"distribution-node": {
"family": "distribution",
"thread-amount": 1,
"instance-count": 1,
"port-base": 2210,
"timerserver-interval": 1
}
}
}
# Choose a title
title = "instance-of-erp5-cluster"
request(software_url,
title,
filter_kw={'computer_guid': computer_id},
software_type='create-erp5-site',
partition_parameter_kw={
'_': json.dumps(parameter_dict, sort_keys=True, indent=2),
}
)
---
dependencies:
- slapos-proxy
---
- name: Add ipv6 to lo interface
shell: ip -6 addr add 2001::1/64 dev lo
ignore_errors: True
- name: Supply erp5 software release
shell: slapos supply http://git.erp5.org/gitweb/slapos.git/blob_plain/refs/heads/erp5-cluster:/software/erp5/software.cfg local_computer
- name: create partition script
copy: src=request-erp5-cluster dest=/tmp/playbook-request-erp5-cluster mode=700
- name: create erp5-show
copy: src=erp5-show dest=/usr/local/bin/erp5-show mode=755
- name: Request ERP5 Cluster
shell: cat /tmp/playbook-request-erp5-cluster | slapos console
- name: Add startup script
copy: src=erp5-startup dest=/usr/local/bin/erp5-startup mode=755
- name: Add to rc.local
lineinfile:
dest=/etc/rc.local insertbefore=BOF
line='bash /usr/local/bin/erp5-startup &'
state=present
- name: Get slapos.playbook directory name
shell: cd /tmp/tmpplaybookerp5-standalone.*/slapos.playbook.git/playbook/; echo $(pwd)/
register: tmp_dir
- name: Check if /opt/slapos.playbook already exists
stat: path=/opt/slapos.playbook/
register: playbook_state
- name: Copy slapos.playbook
copy: src={{ tmp_dir.stdout }} dest=/opt/slapos.playbook/
when: playbook_state.stat.exists == False
#!/bin/bash
#
# functions-common - Common functions used by DevStack components
#
# The canonical copy of this file is maintained in the DevStack repo.
# All modifications should be made there and then sync'ed to other repos
# as required.
#
# This file is sorted alphabetically within the function groups.
#
# - Config Functions
# - Control Functions
# - Distro Functions
# - Git Functions
# - OpenStack Functions
# - Package Functions
# - Process Functions
# - Service Functions
# - System Functions
#
# The following variables are assumed to be defined by certain functions:
#
# - ``ENABLED_SERVICES``
# - ``ERROR_ON_CLONE``
# - ``FILES``
# - ``OFFLINE``
# - ``RECLONE``
# - ``REQUIREMENTS_DIR``
# - ``STACK_USER``
# - ``TRACK_DEPENDS``
# - ``UNDO_REQUIREMENTS``
# - ``http_proxy``, ``https_proxy``, ``no_proxy``
#
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
# Global Config Variables
declare -A GITREPO
declare -A GITBRANCH
declare -A GITDIR
TRACK_DEPENDS=${TRACK_DEPENDS:-False}
# Normalize config values to True or False
# Accepts as False: 0 no No NO false False FALSE
# Accepts as True: 1 yes Yes YES true True TRUE
# VAR=$(trueorfalse default-value test-value)
function trueorfalse {
local xtrace=$(set +o | grep xtrace)
set +o xtrace
local default=$1
local literal=$2
local testval=${!literal:-}
[[ -z "$testval" ]] && { echo "$default"; return; }
[[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; }
[[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; }
echo "$default"
$xtrace
}
function isset {
[[ -v "$1" ]]
}
# Control Functions
# =================
# Prints backtrace info
# filename:lineno:function
# backtrace level
function backtrace {
local level=$1
local deep=$((${#BASH_SOURCE[@]} - 1))
echo "[Call Trace]"
while [ $level -le $deep ]; do
echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}"
deep=$((deep - 1))
done
}
# Prints line number and "message" then exits
# die $LINENO "message"
function die {
local exitcode=$?
set +o xtrace
local line=$1; shift
if [ $exitcode == 0 ]; then
exitcode=1
fi
backtrace 2
err $line "$*"
# Give buffers a second to flush
sleep 1
exit $exitcode
}
# Checks an environment variable is not set or has length 0 OR if the
# exit code is non-zero and prints "message" and exits
# NOTE: env-var is the variable name without a '$'
# die_if_not_set $LINENO env-var "message"
function die_if_not_set {
local exitcode=$?
local xtrace=$(set +o | grep xtrace)
set +o xtrace
local line=$1; shift
local evar=$1; shift
if ! is_set $evar || [ $exitcode != 0 ]; then
die $line "$*"
fi
$xtrace
}
# Prints line number and "message" in error format
# err $LINENO "message"
function err {
local exitcode=$?
local xtrace=$(set +o | grep xtrace)
set +o xtrace
local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2"
echo $msg 1>&2;
if [[ -n ${LOGDIR} ]]; then
echo $msg >> "${LOGDIR}/error.log"
fi
$xtrace
return $exitcode
}
# Checks an environment variable is not set or has length 0 OR if the
# exit code is non-zero and prints "message"
# NOTE: env-var is the variable name without a '$'
# err_if_not_set $LINENO env-var "message"
function err_if_not_set {
local exitcode=$?
local xtrace=$(set +o | grep xtrace)
set +o xtrace
local line=$1; shift
local evar=$1; shift
if ! is_set $evar || [ $exitcode != 0 ]; then
err $line "$*"
fi
$xtrace
return $exitcode
}
# Exit after outputting a message about the distribution not being supported.
# exit_distro_not_supported [optional-string-telling-what-is-missing]
function exit_distro_not_supported {
if [[ -z "$DISTRO" ]]; then
GetDistro
fi
if [ $# -gt 0 ]; then
die $LINENO "Support for $DISTRO is incomplete: no support for $@"
else
die $LINENO "Support for $DISTRO is incomplete."
fi
}
# Test if the named environment variable is set and not zero length
# is_set env-var
function is_set {
local var=\$"$1"
eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this
}
# Prints line number and "message" in warning format
# warn $LINENO "message"
function warn {
local exitcode=$?
local xtrace=$(set +o | grep xtrace)
set +o xtrace
local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2"
echo $msg 1>&2;
if [[ -n ${LOGDIR} ]]; then
echo $msg >> "${LOGDIR}/error.log"
fi
$xtrace
return $exitcode
}
# Distro Functions
# ================
# Determine OS Vendor, Release and Update
# Tested with OS/X, Ubuntu, RedHat, CentOS, Fedora
# Returns results in global variables:
# ``os_VENDOR`` - vendor name: ``Ubuntu``, ``Fedora``, etc
# ``os_RELEASE`` - major release: ``14.04`` (Ubuntu), ``20`` (Fedora)
# ``os_UPDATE`` - update: ex. the ``5`` in ``RHEL6.5``
# ``os_PACKAGE`` - package type: ``deb`` or ``rpm``
# ``os_CODENAME`` - vendor's codename for release: ``snow leopard``, ``trusty``
os_VENDOR=""
os_RELEASE=""
os_UPDATE=""
os_PACKAGE=""
os_CODENAME=""
# GetOSVersion
function GetOSVersion {
# Figure out which vendor we are
if [[ -x "`which sw_vers 2>/dev/null`" ]]; then
# OS/X
os_VENDOR=`sw_vers -productName`
os_RELEASE=`sw_vers -productVersion`
os_UPDATE=${os_RELEASE##*.}
os_RELEASE=${os_RELEASE%.*}
os_PACKAGE=""
if [[ "$os_RELEASE" =~ "10.7" ]]; then
os_CODENAME="lion"
elif [[ "$os_RELEASE" =~ "10.6" ]]; then
os_CODENAME="snow leopard"
elif [[ "$os_RELEASE" =~ "10.5" ]]; then
os_CODENAME="leopard"
elif [[ "$os_RELEASE" =~ "10.4" ]]; then
os_CODENAME="tiger"
elif [[ "$os_RELEASE" =~ "10.3" ]]; then
os_CODENAME="panther"
else
os_CODENAME=""
fi
elif [[ -x $(which lsb_release 2>/dev/null) ]]; then
os_VENDOR=$(lsb_release -i -s)
os_RELEASE=$(lsb_release -r -s)
os_UPDATE=""
os_PACKAGE="rpm"
if [[ "Debian,Ubuntu,LinuxMint" =~ $os_VENDOR ]]; then
os_PACKAGE="deb"
elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then
lsb_release -d -s | grep -q openSUSE
if [[ $? -eq 0 ]]; then
os_VENDOR="openSUSE"
fi
elif [[ $os_VENDOR == "openSUSE project" ]]; then
os_VENDOR="openSUSE"
elif [[ $os_VENDOR =~ Red.*Hat ]]; then
os_VENDOR="Red Hat"
fi
os_CODENAME=$(lsb_release -c -s)
elif [[ -r /etc/redhat-release ]]; then
# Red Hat Enterprise Linux Server release 5.5 (Tikanga)
# Red Hat Enterprise Linux Server release 7.0 Beta (Maipo)
# CentOS release 5.5 (Final)
# CentOS Linux release 6.0 (Final)
# Fedora release 16 (Verne)
# XenServer release 6.2.0-70446c (xenenterprise)
# Oracle Linux release 7
os_CODENAME=""
for r in "Red Hat" CentOS Fedora XenServer; do
os_VENDOR=$r
if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then
ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release`
os_CODENAME=${ver#*|}
os_RELEASE=${ver%|*}
os_UPDATE=${os_RELEASE##*.}
os_RELEASE=${os_RELEASE%.*}
break
fi
os_VENDOR=""
done
if [ "$os_VENDOR" = "Red Hat" ] && [[ -r /etc/oracle-release ]]; then
os_VENDOR=OracleLinux
fi
os_PACKAGE="rpm"
elif [[ -r /etc/SuSE-release ]]; then
for r in openSUSE "SUSE Linux"; do
if [[ "$r" = "SUSE Linux" ]]; then
os_VENDOR="SUSE LINUX"
else
os_VENDOR=$r
fi
if [[ -n "`grep \"$r\" /etc/SuSE-release`" ]]; then
os_CODENAME=`grep "CODENAME = " /etc/SuSE-release | sed 's:.* = ::g'`
os_RELEASE=`grep "VERSION = " /etc/SuSE-release | sed 's:.* = ::g'`
os_UPDATE=`grep "PATCHLEVEL = " /etc/SuSE-release | sed 's:.* = ::g'`
break
fi
os_VENDOR=""
done
os_PACKAGE="rpm"
# If lsb_release is not installed, we should be able to detect Debian OS
elif [[ -f /etc/debian_version ]] && [[ $(cat /proc/version) =~ "Debian" ]]; then
os_VENDOR="Debian"
os_PACKAGE="deb"
os_CODENAME=$(awk '/VERSION=/' /etc/os-release | sed 's/VERSION=//' | sed -r 's/\"|\(|\)//g' | awk '{print $2}')
os_RELEASE=$(awk '/VERSION_ID=/' /etc/os-release | sed 's/VERSION_ID=//' | sed 's/\"//g')
fi
export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME
}
# Translate the OS version values into common nomenclature
# Sets global ``DISTRO`` from the ``os_*`` values
declare DISTRO
function GetDistro {
GetOSVersion
if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then
# 'Everyone' refers to Ubuntu / Debian releases by the code name adjective
DISTRO=$os_CODENAME
elif [[ "$os_VENDOR" =~ (Fedora) ]]; then
# For Fedora, just use 'f' and the release
DISTRO="f$os_RELEASE"
elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then
DISTRO="opensuse-$os_RELEASE"
elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then
# For SLE, also use the service pack
if [[ -z "$os_UPDATE" ]]; then
DISTRO="sle${os_RELEASE}"
else
DISTRO="sle${os_RELEASE}sp${os_UPDATE}"
fi
elif [[ "$os_VENDOR" =~ (Red Hat) || \
"$os_VENDOR" =~ (CentOS) || \
"$os_VENDOR" =~ (OracleLinux) ]]; then
# Drop the . release as we assume it's compatible
DISTRO="rhel${os_RELEASE::1}"
elif [[ "$os_VENDOR" =~ (XenServer) ]]; then
DISTRO="xs$os_RELEASE"
else
# Catch-all for now is Vendor + Release + Update
DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE"
fi
export DISTRO
}
# Utility function for checking machine architecture
# is_arch arch-type
function is_arch {
[[ "$(uname -m)" == "$1" ]]
}
# Determine if current distribution is an Oracle distribution
# is_oraclelinux
function is_oraclelinux {
if [[ -z "$os_VENDOR" ]]; then
GetOSVersion
fi
[ "$os_VENDOR" = "OracleLinux" ]
}
# Determine if current distribution is a Fedora-based distribution
# (Fedora, RHEL, CentOS, etc).
# is_fedora
function is_fedora {
if [[ -z "$os_VENDOR" ]]; then
GetOSVersion
fi
[ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \
[ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleLinux" ]
}
# Determine if current distribution is a SUSE-based distribution
# (openSUSE, SLE).
# is_suse
function is_suse {
if [[ -z "$os_VENDOR" ]]; then
GetOSVersion
fi
[ "$os_VENDOR" = "openSUSE" ] || [ "$os_VENDOR" = "SUSE LINUX" ]
}
# Determine if current distribution is an Ubuntu-based distribution
# It will also detect non-Ubuntu but Debian-based distros
# is_ubuntu
function is_ubuntu {
if [[ -z "$os_PACKAGE" ]]; then
GetOSVersion
fi
[ "$os_PACKAGE" = "deb" ]
}
# Git Functions
# =============
# Returns openstack release name for a given branch name
# ``get_release_name_from_branch branch-name``
function get_release_name_from_branch {
local branch=$1
if [[ $branch =~ "stable/" || $branch =~ "proposed/" ]]; then
echo ${branch#*/}
else
echo "master"
fi
}
# git clone only if directory doesn't exist already. Since ``DEST`` might not
# be owned by the installation user, we create the directory and change the
# ownership to the proper user.
# Set global ``RECLONE=yes`` to simulate a clone when dest-dir exists
# Set global ``ERROR_ON_CLONE=True`` to abort execution with an error if the git repo
# does not exist (default is False, meaning the repo will be cloned).
# Uses globals ``ERROR_ON_CLONE``, ``OFFLINE``, ``RECLONE``
# git_clone remote dest-dir branch
function git_clone {
local git_remote=$1
local git_dest=$2
local git_ref=$3
local orig_dir=$(pwd)
local git_clone_flags=""
RECLONE=$(trueorfalse False RECLONE)
if [[ "${GIT_DEPTH}" -gt 0 ]]; then
git_clone_flags="$git_clone_flags --depth $GIT_DEPTH"
fi
if [[ "$OFFLINE" = "True" ]]; then
echo "Running in offline mode, clones already exist"
# print out the results so we know what change was used in the logs
cd $git_dest
git show --oneline | head -1
cd $orig_dir
return
fi
if echo $git_ref | egrep -q "^refs"; then
# If our branch name is a gerrit style refs/changes/...
if [[ ! -d $git_dest ]]; then
[[ "$ERROR_ON_CLONE" = "True" ]] && \
die $LINENO "Cloning not allowed in this configuration"
git_timed clone $git_clone_flags $git_remote $git_dest
fi
cd $git_dest
git_timed fetch $git_remote $git_ref && git checkout FETCH_HEAD
else
# do a full clone only if the directory doesn't exist
if [[ ! -d $git_dest ]]; then
[[ "$ERROR_ON_CLONE" = "True" ]] && \
die $LINENO "Cloning not allowed in this configuration"
git_timed clone $git_clone_flags $git_remote $git_dest
cd $git_dest
# This checkout syntax works for both branches and tags
git checkout $git_ref
elif [[ "$RECLONE" = "True" ]]; then
# if it does exist then simulate what clone does if asked to RECLONE
cd $git_dest
# set the url to pull from and fetch
git remote set-url origin $git_remote
git_timed fetch origin
# remove the existing ignored files (like pyc) as they cause breakage
# (due to the py files having older timestamps than our pyc, so python
# thinks the pyc files are correct using them)
find $git_dest -name '*.pyc' -delete
# handle git_ref accordingly to type (tag, branch)
if [[ -n "`git show-ref refs/tags/$git_ref`" ]]; then
git_update_tag $git_ref
elif [[ -n "`git show-ref refs/heads/$git_ref`" ]]; then
git_update_branch $git_ref
elif [[ -n "`git show-ref refs/remotes/origin/$git_ref`" ]]; then
git_update_remote_branch $git_ref
else
die $LINENO "$git_ref is neither branch nor tag"
fi
fi
fi
# print out the results so we know what change was used in the logs
cd $git_dest
git show --oneline | head -1
cd $orig_dir
}
# A variation on git clone that lets us specify a project by it's
# actual name, like oslo.config. This is exceptionally useful in the
# library installation case
function git_clone_by_name {
local name=$1
local repo=${GITREPO[$name]}
local dir=${GITDIR[$name]}
local branch=${GITBRANCH[$name]}
git_clone $repo $dir $branch
}
# git can sometimes get itself infinitely stuck with transient network
# errors or other issues with the remote end. This wraps git in a
# timeout/retry loop and is intended to watch over non-local git
# processes that might hang. GIT_TIMEOUT, if set, is passed directly
# to timeout(1); otherwise the default value of 0 maintains the status
# quo of waiting forever.
# usage: git_timed <git-command>
function git_timed {
local count=0
local timeout=0
if [[ -n "${GIT_TIMEOUT}" ]]; then
timeout=${GIT_TIMEOUT}
fi
until timeout -s SIGINT ${timeout} git "$@"; do
# 124 is timeout(1)'s special return code when it reached the
# timeout; otherwise assume fatal failure
if [[ $? -ne 124 ]]; then
die $LINENO "git call failed: [git $@]"
fi
count=$(($count + 1))
warn "timeout ${count} for git call: [git $@]"
if [ $count -eq 3 ]; then
die $LINENO "Maximum of 3 git retries reached"
fi
sleep 5
done
}
# git update using reference as a branch.
# git_update_branch ref
function git_update_branch {
local git_branch=$1
git checkout -f origin/$git_branch
# a local branch might not exist
git branch -D $git_branch || true
git checkout -b $git_branch
}
# git update using reference as a branch.
# git_update_remote_branch ref
function git_update_remote_branch {
local git_branch=$1
git checkout -b $git_branch -t origin/$git_branch
}
# git update using reference as a tag. Be careful editing source at that repo
# as working copy will be in a detached mode
# git_update_tag ref
function git_update_tag {
local git_tag=$1
git tag -d $git_tag
# fetching given tag only
git_timed fetch origin tag $git_tag
git checkout -f $git_tag
}
# OpenStack Functions
# ===================
# Get the default value for HOST_IP
# get_default_host_ip fixed_range floating_range host_ip_iface host_ip
function get_default_host_ip {
local fixed_range=$1
local floating_range=$2
local host_ip_iface=$3
local host_ip=$4
# Search for an IP unless an explicit is set by ``HOST_IP`` environment variable
if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then
host_ip=""
# Find the interface used for the default route
host_ip_iface=${host_ip_iface:-$(ip route | awk '/default/ {print $5}' | head -1)}
local host_ips=$(LC_ALL=C ip -f inet addr show ${host_ip_iface} | awk '/inet/ {split($2,parts,"/"); print parts[1]}')
local ip
for ip in $host_ips; do
# Attempt to filter out IP addresses that are part of the fixed and
# floating range. Note that this method only works if the ``netaddr``
# python library is installed. If it is not installed, an error
# will be printed and the first IP from the interface will be used.
# If that is not correct set ``HOST_IP`` in ``localrc`` to the correct
# address.
if ! (address_in_net $ip $fixed_range || address_in_net $ip $floating_range); then
host_ip=$ip
break;
fi
done
fi
echo $host_ip
}
# Generates hex string from ``size`` byte of pseudo random data
# generate_hex_string size
function generate_hex_string {
local size=$1
hexdump -n "$size" -v -e '/1 "%02x"' /dev/urandom
}
# Grab a numbered field from python prettytable output
# Fields are numbered starting with 1
# Reverse syntax is supported: -1 is the last field, -2 is second to last, etc.
# get_field field-number
function get_field {
local data field
while read data; do
if [ "$1" -lt 0 ]; then
field="(\$(NF$1))"
else
field="\$$(($1 + 1))"
fi
echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}"
done
}
# install default policy
# copy over a default policy.json and policy.d for projects
function install_default_policy {
local project=$1
local project_uc=$(echo $1|tr a-z A-Z)
local conf_dir="${project_uc}_CONF_DIR"
# eval conf dir to get the variable
conf_dir="${!conf_dir}"
local project_dir="${project_uc}_DIR"
# eval project dir to get the variable
project_dir="${!project_dir}"
local sample_conf_dir="${project_dir}/etc/${project}"
local sample_policy_dir="${project_dir}/etc/${project}/policy.d"
# first copy any policy.json
cp -p $sample_conf_dir/policy.json $conf_dir
# then optionally copy over policy.d
if [[ -d $sample_policy_dir ]]; then
cp -r $sample_policy_dir $conf_dir/policy.d
fi
}
# Add a policy to a policy.json file
# Do nothing if the policy already exists
# ``policy_add policy_file policy_name policy_permissions``
function policy_add {
local policy_file=$1
local policy_name=$2
local policy_perm=$3
if grep -q ${policy_name} ${policy_file}; then
echo "Policy ${policy_name} already exists in ${policy_file}"
return
fi
# Add a terminating comma to policy lines without one
# Remove the closing '}' and all lines following to the end-of-file
local tmpfile=$(mktemp)
uniq ${policy_file} | sed -e '
s/]$/],/
/^[}]/,$d
' > ${tmpfile}
# Append policy and closing brace
echo " \"${policy_name}\": ${policy_perm}" >>${tmpfile}
echo "}" >>${tmpfile}
mv ${tmpfile} ${policy_file}
}
# Gets or creates a domain
# Usage: get_or_create_domain <name> <description>
function get_or_create_domain {
local os_url="$KEYSTONE_SERVICE_URI_V3"
# Gets domain id
local domain_id=$(
# Gets domain id
openstack --os-token=$OS_TOKEN --os-url=$os_url \
--os-identity-api-version=3 domain show $1 \
-f value -c id 2>/dev/null ||
# Creates new domain
openstack --os-token=$OS_TOKEN --os-url=$os_url \
--os-identity-api-version=3 domain create $1 \
--description "$2" \
-f value -c id
)
echo $domain_id
}
# Gets or creates group
# Usage: get_or_create_group <groupname> [<domain> <description>]
function get_or_create_group {
local domain=${2:+--domain ${2}}
local desc="${3:-}"
local os_url="$KEYSTONE_SERVICE_URI_V3"
# Gets group id
local group_id=$(
# Creates new group with --or-show
openstack --os-token=$OS_TOKEN --os-url=$os_url \
--os-identity-api-version=3 group create $1 \
$domain --description "$desc" --or-show \
-f value -c id
)
echo $group_id
}
# Gets or creates user
# Usage: get_or_create_user <username> <password> [<email> [<domain>]]
function get_or_create_user {
if [[ ! -z "$3" ]]; then
local email="--email=$3"
else
local email=""
fi
local os_cmd="openstack"
local domain=""
if [[ ! -z "$4" ]]; then
domain="--domain=$4"
os_cmd="$os_cmd --os-url=$KEYSTONE_SERVICE_URI_V3 --os-identity-api-version=3"
fi
# Gets user id
local user_id=$(
# Creates new user with --or-show
$os_cmd user create \
$1 \
--password "$2" \
$email \
$domain \
--or-show \
-f value -c id
)
echo $user_id
}
# Gets or creates project
# Usage: get_or_create_project <name> [<domain>]
function get_or_create_project {
# Gets project id
local os_cmd="openstack"
local domain=""
if [[ ! -z "$2" ]]; then
domain="--domain=$2"
os_cmd="$os_cmd --os-url=$KEYSTONE_SERVICE_URI_V3 --os-identity-api-version=3"
fi
local project_id=$(
# Creates new project with --or-show
$os_cmd project create $1 $domain --or-show -f value -c id
)
echo $project_id
}
# Gets or creates role
# Usage: get_or_create_role <name>
function get_or_create_role {
local role_id=$(
# Creates role with --or-show
openstack role create $1 --or-show -f value -c id
)
echo $role_id
}
# Gets or adds user role to project
# Usage: get_or_add_user_project_role <role> <user> <project>
function get_or_add_user_project_role {
# Gets user role id
local user_role_id=$(openstack role list \
--user $2 \
--project $3 \
--column "ID" \
--column "Name" \
| grep " $1 " | get_field 1)
if [[ -z "$user_role_id" ]]; then
# Adds role to user
user_role_id=$(openstack role add \
$1 \
--user $2 \
--project $3 \
| grep " id " | get_field 2)
fi
echo $user_role_id
}
# Gets or adds group role to project
# Usage: get_or_add_group_project_role <role> <group> <project>
function get_or_add_group_project_role {
# Gets group role id
local group_role_id=$(openstack role list \
--group $2 \
--project $3 \
--column "ID" \
--column "Name" \
| grep " $1 " | get_field 1)
if [[ -z "$group_role_id" ]]; then
# Adds role to group
group_role_id=$(openstack role add \
$1 \
--group $2 \
--project $3 \
| grep " id " | get_field 2)
fi
echo $group_role_id
}
# Gets or creates service
# Usage: get_or_create_service <name> <type> <description>
function get_or_create_service {
# Gets service id
local service_id=$(
# Gets service id
openstack service show $1 -f value -c id 2>/dev/null ||
# Creates new service if not exists
openstack service create \
$2 \
--name $1 \
--description="$3" \
-f value -c id
)
echo $service_id
}
# Gets or creates endpoint
# Usage: get_or_create_endpoint <service> <region> <publicurl> <adminurl> <internalurl>
function get_or_create_endpoint {
# Gets endpoint id
local endpoint_id=$(openstack endpoint list \
--column "ID" \
--column "Region" \
--column "Service Name" \
| grep " $2 " \
| grep " $1 " | get_field 1)
if [[ -z "$endpoint_id" ]]; then
# Creates new endpoint
endpoint_id=$(openstack endpoint create \
$1 \
--region $2 \
--publicurl $3 \
--adminurl $4 \
--internalurl $5 \
| grep " id " | get_field 2)
fi
echo $endpoint_id
}
# Package Functions
# =================
# _get_package_dir
function _get_package_dir {
local base_dir=$1
local pkg_dir
if [[ -z "$base_dir" ]]; then
base_dir=$FILES
fi
if is_ubuntu; then
pkg_dir=$base_dir/debs
elif is_fedora; then
pkg_dir=$base_dir/rpms
elif is_suse; then
pkg_dir=$base_dir/rpms-suse
else
exit_distro_not_supported "list of packages"
fi
echo "$pkg_dir"
}
# Wrapper for ``apt-get`` to set cache and proxy environment variables
# Uses globals ``OFFLINE``, ``*_proxy``
# apt_get operation package [package ...]
function apt_get {
local xtrace=$(set +o | grep xtrace)
set +o xtrace
[[ "$OFFLINE" = "True" || -z "$@" ]] && return
local sudo="sudo"
[[ "$(id -u)" = "0" ]] && sudo="env"
$xtrace
$sudo DEBIAN_FRONTEND=noninteractive \
http_proxy=${http_proxy:-} https_proxy=${https_proxy:-} \
no_proxy=${no_proxy:-} \
apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@"
}
function _parse_package_files {
local files_to_parse=$@
if [[ -z "$DISTRO" ]]; then
GetDistro
fi
for fname in ${files_to_parse}; do
local OIFS line package distros distro
[[ -e $fname ]] || continue
OIFS=$IFS
IFS=$'\n'
for line in $(<${fname}); do
if [[ $line =~ "NOPRIME" ]]; then
continue
fi
# Assume we want this package
package=${line%#*}
inst_pkg=1
# Look for # dist:xxx in comment
if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then
# We are using BASH regexp matching feature.
package=${BASH_REMATCH[1]}
distros=${BASH_REMATCH[2]}
# In bash ${VAR,,} will lowecase VAR
# Look for a match in the distro list
if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then
# If no match then skip this package
inst_pkg=0
fi
fi
if [[ $inst_pkg = 1 ]]; then
echo $package
fi
done
IFS=$OIFS
done
}
# get_packages() collects a list of package names of any type from the
# prerequisite files in ``files/{debs|rpms}``. The list is intended
# to be passed to a package installer such as apt or yum.
#
# Only packages required for the services in 1st argument will be
# included. Two bits of metadata are recognized in the prerequisite files:
#
# - ``# NOPRIME`` defers installation to be performed later in `stack.sh`
# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection
# of the package to the distros listed. The distro names are case insensitive.
function get_packages {
local xtrace=$(set +o | grep xtrace)
set +o xtrace
local services=$@
local package_dir=$(_get_package_dir)
local file_to_parse=""
local service=""
if [[ -z "$package_dir" ]]; then
echo "No package directory supplied"
return 1
fi
for service in ${services//,/ }; do
# Allow individual services to specify dependencies
if [[ -e ${package_dir}/${service} ]]; then
file_to_parse="${file_to_parse} ${package_dir}/${service}"
fi
# NOTE(sdague) n-api needs glance for now because that's where
# glance client is
if [[ $service == n-api ]]; then
if [[ ! $file_to_parse =~ $package_dir/nova ]]; then
file_to_parse="${file_to_parse} ${package_dir}/nova"
fi
if [[ ! $file_to_parse =~ $package_dir/glance ]]; then
file_to_parse="${file_to_parse} ${package_dir}/glance"
fi
elif [[ $service == c-* ]]; then
if [[ ! $file_to_parse =~ $package_dir/cinder ]]; then
file_to_parse="${file_to_parse} ${package_dir}/cinder"
fi
elif [[ $service == ceilometer-* ]]; then
if [[ ! $file_to_parse =~ $package_dir/ceilometer ]]; then
file_to_parse="${file_to_parse} ${package_dir}/ceilometer"
fi
elif [[ $service == s-* ]]; then
if [[ ! $file_to_parse =~ $package_dir/swift ]]; then
file_to_parse="${file_to_parse} ${package_dir}/swift"
fi
elif [[ $service == n-* ]]; then
if [[ ! $file_to_parse =~ $package_dir/nova ]]; then
file_to_parse="${file_to_parse} ${package_dir}/nova"
fi
elif [[ $service == g-* ]]; then
if [[ ! $file_to_parse =~ $package_dir/glance ]]; then
file_to_parse="${file_to_parse} ${package_dir}/glance"
fi
elif [[ $service == key* ]]; then
if [[ ! $file_to_parse =~ $package_dir/keystone ]]; then
file_to_parse="${file_to_parse} ${package_dir}/keystone"
fi
elif [[ $service == q-* ]]; then
if [[ ! $file_to_parse =~ $package_dir/neutron ]]; then
file_to_parse="${file_to_parse} ${package_dir}/neutron"
fi
elif [[ $service == ir-* ]]; then
if [[ ! $file_to_parse =~ $package_dir/ironic ]]; then
file_to_parse="${file_to_parse} ${package_dir}/ironic"
fi
fi
done
echo "$(_parse_package_files $file_to_parse)"
$xtrace
}
# get_plugin_packages() collects a list of package names of any type from a
# plugin's prerequisite files in ``$PLUGIN/devstack/files/{debs|rpms}``. The
# list is intended to be passed to a package installer such as apt or yum.
#
# Only packages required for enabled and collected plugins will included.
#
# The same metadata used in the main DevStack prerequisite files may be used
# in these prerequisite files, see get_packages() for more info.
function get_plugin_packages {
local xtrace=$(set +o | grep xtrace)
set +o xtrace
local files_to_parse=""
local package_dir=""
for plugin in ${DEVSTACK_PLUGINS//,/ }; do
local package_dir="$(_get_package_dir ${GITDIR[$plugin]}/devstack/files)"
files_to_parse+="$package_dir/$plugin"
done
echo "$(_parse_package_files $files_to_parse)"
$xtrace
}
# Distro-agnostic package installer
# Uses globals ``NO_UPDATE_REPOS``, ``REPOS_UPDATED``, ``RETRY_UPDATE``
# install_package package [package ...]
function update_package_repo {
NO_UPDATE_REPOS=${NO_UPDATE_REPOS:-False}
REPOS_UPDATED=${REPOS_UPDATED:-False}
RETRY_UPDATE=${RETRY_UPDATE:-False}
if [[ "$NO_UPDATE_REPOS" = "True" ]]; then
return 0
fi
if is_ubuntu; then
local xtrace=$(set +o | grep xtrace)
set +o xtrace
if [[ "$REPOS_UPDATED" != "True" || "$RETRY_UPDATE" = "True" ]]; then
# if there are transient errors pulling the updates, that's fine.
# It may be secondary repositories that we don't really care about.
apt_get update || /bin/true
REPOS_UPDATED=True
fi
$xtrace
fi
}
function real_install_package {
if is_ubuntu; then
apt_get install "$@"
elif is_fedora; then
yum_install "$@"
elif is_suse; then
zypper_install "$@"
else
exit_distro_not_supported "installing packages"
fi
}
# Distro-agnostic package installer
# install_package package [package ...]
function install_package {
update_package_repo
real_install_package $@ || RETRY_UPDATE=True update_package_repo && real_install_package $@
}
# Distro-agnostic function to tell if a package is installed
# is_package_installed package [package ...]
function is_package_installed {
if [[ -z "$@" ]]; then
return 1
fi
if [[ -z "$os_PACKAGE" ]]; then
GetOSVersion
fi
if [[ "$os_PACKAGE" = "deb" ]]; then
dpkg -s "$@" > /dev/null 2> /dev/null
elif [[ "$os_PACKAGE" = "rpm" ]]; then
rpm --quiet -q "$@"
else
exit_distro_not_supported "finding if a package is installed"
fi
}
# Distro-agnostic package uninstaller
# uninstall_package package [package ...]
function uninstall_package {
if is_ubuntu; then
apt_get purge "$@"
elif is_fedora; then
sudo ${YUM:-yum} remove -y "$@" ||:
elif is_suse; then
sudo zypper rm "$@"
else
exit_distro_not_supported "uninstalling packages"
fi
}
# Wrapper for ``yum`` to set proxy environment variables
# Uses globals ``OFFLINE``, ``*_proxy``, ``YUM``
# yum_install package [package ...]
function yum_install {
[[ "$OFFLINE" = "True" ]] && return
local sudo="sudo"
[[ "$(id -u)" = "0" ]] && sudo="env"
# The manual check for missing packages is because yum -y assumes
# missing packages are OK. See
# https://bugzilla.redhat.com/show_bug.cgi?id=965567
$sudo http_proxy="${http_proxy:-}" https_proxy="${https_proxy:-}" \
no_proxy="${no_proxy:-}" \
${YUM:-yum} install -y "$@" 2>&1 | \
awk '
BEGIN { fail=0 }
/No package/ { fail=1 }
{ print }
END { exit fail }' || \
die $LINENO "Missing packages detected"
# also ensure we catch a yum failure
if [[ ${PIPESTATUS[0]} != 0 ]]; then
die $LINENO "${YUM:-yum} install failure"
fi
}
# zypper wrapper to set arguments correctly
# Uses globals ``OFFLINE``, ``*_proxy``
# zypper_install package [package ...]
function zypper_install {
[[ "$OFFLINE" = "True" ]] && return
local sudo="sudo"
[[ "$(id -u)" = "0" ]] && sudo="env"
$sudo http_proxy="${http_proxy:-}" https_proxy="${https_proxy:-}" \
no_proxy="${no_proxy:-}" \
zypper --non-interactive install --auto-agree-with-licenses "$@"
}
# Process Functions
# =================
# _run_process() is designed to be backgrounded by run_process() to simulate a
# fork. It includes the dirty work of closing extra filehandles and preparing log
# files to produce the same logs as screen_it(). The log filename is derived
# from the service name.
# Uses globals ``CURRENT_LOG_TIME``, ``LOGDIR``, ``SCREEN_LOGDIR``, ``SCREEN_NAME``, ``SERVICE_DIR``
# If an optional group is provided sg will be used to set the group of
# the command.
# _run_process service "command-line" [group]
function _run_process {
local service=$1
local command="$2"
local group=$3
# Undo logging redirections and close the extra descriptors
exec 1>&3
exec 2>&3
exec 3>&-
exec 6>&-
local real_logfile="${LOGDIR}/${service}.log.${CURRENT_LOG_TIME}"
if [[ -n ${LOGDIR} ]]; then
exec 1>&"$real_logfile" 2>&1
ln -sf "$real_logfile" ${LOGDIR}/${service}.log
if [[ -n ${SCREEN_LOGDIR} ]]; then
# Drop the backward-compat symlink
ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${service}.log
fi
# TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs.
export PYTHONUNBUFFERED=1
fi
# Run under ``setsid`` to force the process to become a session and group leader.
# The pid saved can be used with pkill -g to get the entire process group.
if [[ -n "$group" ]]; then
setsid sg $group "$command" & echo $! >$SERVICE_DIR/$SCREEN_NAME/$service.pid
else
setsid $command & echo $! >$SERVICE_DIR/$SCREEN_NAME/$service.pid
fi
# Just silently exit this process
exit 0
}
# Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``.
# This is used for ``service_check`` when all the ``screen_it`` are called finished
# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``
# init_service_check
function init_service_check {
SCREEN_NAME=${SCREEN_NAME:-stack}
SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then
mkdir -p "$SERVICE_DIR/$SCREEN_NAME"
fi
rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure
}
# Find out if a process exists by partial name.
# is_running name
function is_running {
local name=$1
ps auxw | grep -v grep | grep ${name} > /dev/null
local exitcode=$?
# some times I really hate bash reverse binary logic
return $exitcode
}
# Run a single service under screen or directly
# If the command includes shell metachatacters (;<>*) it must be run using a shell
# If an optional group is provided sg will be used to run the
# command as that group.
# run_process service "command-line" [group]
function run_process {
local service=$1
local command="$2"
local group=$3
if is_service_enabled $service; then
if [[ "$USE_SCREEN" = "True" ]]; then
screen_process "$service" "$command" "$group"
else
# Spawn directly without screen
_run_process "$service" "$command" "$group" &
fi
fi
}
# Helper to launch a process in a named screen
# Uses globals ``CURRENT_LOG_TIME``, ```LOGDIR``, ``SCREEN_LOGDIR``, `SCREEN_NAME``,
# ``SERVICE_DIR``, ``USE_SCREEN``
# screen_process name "command-line" [group]
# Run a command in a shell in a screen window, if an optional group
# is provided, use sg to set the group of the command.
function screen_process {
local name=$1
local command="$2"
local group=$3
SCREEN_NAME=${SCREEN_NAME:-stack}
SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
USE_SCREEN=$(trueorfalse True USE_SCREEN)
screen -S $SCREEN_NAME -X screen -t $name
local real_logfile="${LOGDIR}/${name}.log.${CURRENT_LOG_TIME}"
echo "LOGDIR: $LOGDIR"
echo "SCREEN_LOGDIR: $SCREEN_LOGDIR"
echo "log: $real_logfile"
if [[ -n ${LOGDIR} ]]; then
screen -S $SCREEN_NAME -p $name -X logfile "$real_logfile"
screen -S $SCREEN_NAME -p $name -X log on
ln -sf "$real_logfile" ${LOGDIR}/${name}.log
if [[ -n ${SCREEN_LOGDIR} ]]; then
# Drop the backward-compat symlink
ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${1}.log
fi
fi
# sleep to allow bash to be ready to be send the command - we are
# creating a new window in screen and then sends characters, so if
# bash isn't running by the time we send the command, nothing
# happens. This sleep was added originally to handle gate runs
# where we needed this to be at least 3 seconds to pass
# consistently on slow clouds. Now this is configurable so that we
# can determine a reasonable value for the local case which should
# be much smaller.
sleep ${SCREEN_SLEEP:-3}
NL=`echo -ne '\015'`
# This fun command does the following:
# - the passed server command is backgrounded
# - the pid of the background process is saved in the usual place
# - the server process is brought back to the foreground
# - if the server process exits prematurely the fg command errors
# and a message is written to stdout and the process failure file
#
# The pid saved can be used in stop_process() as a process group
# id to kill off all child processes
if [[ -n "$group" ]]; then
command="sg $group '$command'"
fi
# Append the process to the screen rc file
screen_rc "$name" "$command"
screen -S $SCREEN_NAME -p $name -X stuff "$command & echo \$! >$SERVICE_DIR/$SCREEN_NAME/${name}.pid; fg || echo \"$name failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/${name}.failure\"$NL"
}
# Screen rc file builder
# Uses globals ``SCREEN_NAME``, ``SCREENRC``
# screen_rc service "command-line"
function screen_rc {
SCREEN_NAME=${SCREEN_NAME:-stack}
SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc
if [[ ! -e $SCREENRC ]]; then
# Name the screen session
echo "sessionname $SCREEN_NAME" > $SCREENRC
# Set a reasonable statusbar
echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC
# Some distributions override PROMPT_COMMAND for the screen terminal type - turn that off
echo "setenv PROMPT_COMMAND /bin/true" >> $SCREENRC
echo "screen -t shell bash" >> $SCREENRC
fi
# If this service doesn't already exist in the screenrc file
if ! grep $1 $SCREENRC 2>&1 > /dev/null; then
NL=`echo -ne '\015'`
echo "screen -t $1 bash" >> $SCREENRC
echo "stuff \"$2$NL\"" >> $SCREENRC
if [[ -n ${LOGDIR} ]]; then
echo "logfile ${LOGDIR}/${1}.log.${CURRENT_LOG_TIME}" >>$SCREENRC
echo "log on" >>$SCREENRC
fi
fi
}
# Stop a service in screen
# If a PID is available use it, kill the whole process group via TERM
# If screen is being used kill the screen window; this will catch processes
# that did not leave a PID behind
# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``, ``USE_SCREEN``
# screen_stop_service service
function screen_stop_service {
local service=$1
SCREEN_NAME=${SCREEN_NAME:-stack}
SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
USE_SCREEN=$(trueorfalse True USE_SCREEN)
if is_service_enabled $service; then
# Clean up the screen window
screen -S $SCREEN_NAME -p $service -X kill
fi
}
# Stop a service process
# If a PID is available use it, kill the whole process group via TERM
# If screen is being used kill the screen window; this will catch processes
# that did not leave a PID behind
# Uses globals ``SERVICE_DIR``, ``USE_SCREEN``
# stop_process service
function stop_process {
local service=$1
SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
USE_SCREEN=$(trueorfalse True USE_SCREEN)
if is_service_enabled $service; then
# Kill via pid if we have one available
if [[ -r $SERVICE_DIR/$SCREEN_NAME/$service.pid ]]; then
pkill -g $(cat $SERVICE_DIR/$SCREEN_NAME/$service.pid)
rm $SERVICE_DIR/$SCREEN_NAME/$service.pid
fi
if [[ "$USE_SCREEN" = "True" ]]; then
# Clean up the screen window
screen_stop_service $service
fi
fi
}
# Helper to get the status of each running service
# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``
# service_check
function service_check {
local service
local failures
SCREEN_NAME=${SCREEN_NAME:-stack}
SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then
echo "No service status directory found"
return
fi
# Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME
# make this -o errexit safe
failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null || /bin/true`
for service in $failures; do
service=`basename $service`
service=${service%.failure}
echo "Error: Service $service is not running"
done
if [ -n "$failures" ]; then
die $LINENO "More details about the above errors can be found with screen, with ./rejoin-stack.sh"
fi
}
# Tail a log file in a screen if USE_SCREEN is true.
function tail_log {
local name=$1
local logfile=$2
USE_SCREEN=$(trueorfalse True USE_SCREEN)
if [[ "$USE_SCREEN" = "True" ]]; then
screen_process "$name" "sudo tail -f $logfile"
fi
}
# Deprecated Functions
# --------------------
# _old_run_process() is designed to be backgrounded by old_run_process() to simulate a
# fork. It includes the dirty work of closing extra filehandles and preparing log
# files to produce the same logs as screen_it(). The log filename is derived
# from the service name and global-and-now-misnamed ``SCREEN_LOGDIR``
# Uses globals ``CURRENT_LOG_TIME``, ``SCREEN_LOGDIR``, ``SCREEN_NAME``, ``SERVICE_DIR``
# _old_run_process service "command-line"
function _old_run_process {
local service=$1
local command="$2"
# Undo logging redirections and close the extra descriptors
exec 1>&3
exec 2>&3
exec 3>&-
exec 6>&-
if [[ -n ${SCREEN_LOGDIR} ]]; then
exec 1>&${SCREEN_LOGDIR}/screen-${1}.log.${CURRENT_LOG_TIME} 2>&1
ln -sf ${SCREEN_LOGDIR}/screen-${1}.log.${CURRENT_LOG_TIME} ${SCREEN_LOGDIR}/screen-${1}.log
# TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs.
export PYTHONUNBUFFERED=1
fi
exec /bin/bash -c "$command"
die "$service exec failure: $command"
}
# old_run_process() launches a child process that closes all file descriptors and
# then exec's the passed in command. This is meant to duplicate the semantics
# of screen_it() without screen. PIDs are written to
# ``$SERVICE_DIR/$SCREEN_NAME/$service.pid`` by the spawned child process.
# old_run_process service "command-line"
function old_run_process {
local service=$1
local command="$2"
# Spawn the child process
_old_run_process "$service" "$command" &
echo $!
}
# Compatibility for existing start_XXXX() functions
# Uses global ``USE_SCREEN``
# screen_it service "command-line"
function screen_it {
if is_service_enabled $1; then
# Append the service to the screen rc file
screen_rc "$1" "$2"
if [[ "$USE_SCREEN" = "True" ]]; then
screen_process "$1" "$2"
else
# Spawn directly without screen
old_run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid
fi
fi
}
# Compatibility for existing stop_XXXX() functions
# Stop a service in screen
# If a PID is available use it, kill the whole process group via TERM
# If screen is being used kill the screen window; this will catch processes
# that did not leave a PID behind
# screen_stop service
function screen_stop {
# Clean up the screen window
stop_process $1
}
# Plugin Functions
# =================
DEVSTACK_PLUGINS=${DEVSTACK_PLUGINS:-""}
# enable_plugin <name> <url> [branch]
#
# ``name`` is an arbitrary name - (aka: glusterfs, nova-docker, zaqar)
# ``url`` is a git url
# ``branch`` is a gitref. If it's not set, defaults to master
function enable_plugin {
local name=$1
local url=$2
local branch=${3:-master}
DEVSTACK_PLUGINS+=",$name"
GITREPO[$name]=$url
GITDIR[$name]=$DEST/$name
GITBRANCH[$name]=$branch
}
# fetch_plugins
#
# clones all plugins
function fetch_plugins {
local plugins="${DEVSTACK_PLUGINS}"
local plugin
# short circuit if nothing to do
if [[ -z $plugins ]]; then
return
fi
echo "Fetching DevStack plugins"
for plugin in ${plugins//,/ }; do
git_clone_by_name $plugin
done
}
# load_plugin_settings
#
# Load settings from plugins in the order that they were registered
function load_plugin_settings {
local plugins="${DEVSTACK_PLUGINS}"
local plugin
# short circuit if nothing to do
if [[ -z $plugins ]]; then
return
fi
echo "Loading plugin settings"
for plugin in ${plugins//,/ }; do
local dir=${GITDIR[$plugin]}
# source any known settings
if [[ -f $dir/devstack/settings ]]; then
source $dir/devstack/settings
fi
done
}
# plugin_override_defaults
#
# Run an extremely early setting phase for plugins that allows default
# overriding of services.
function plugin_override_defaults {
local plugins="${DEVSTACK_PLUGINS}"
local plugin
# short circuit if nothing to do
if [[ -z $plugins ]]; then
return
fi
echo "Overriding Configuration Defaults"
for plugin in ${plugins//,/ }; do
local dir=${GITDIR[$plugin]}
# source any overrides
if [[ -f $dir/devstack/override-defaults ]]; then
# be really verbose that an override is happening, as it
# may not be obvious if things fail later.
echo "$plugin has overriden the following defaults"
cat $dir/devstack/override-defaults
source $dir/devstack/override-defaults
fi
done
}
# run_plugins
#
# Run the devstack/plugin.sh in all the plugin directories. These are
# run in registration order.
function run_plugins {
local mode=$1
local phase=$2
local plugins="${DEVSTACK_PLUGINS}"
local plugin
for plugin in ${plugins//,/ }; do
local dir=${GITDIR[$plugin]}
if [[ -f $dir/devstack/plugin.sh ]]; then
source $dir/devstack/plugin.sh $mode $phase
fi
done
}
function run_phase {
local mode=$1
local phase=$2
if [[ -d $TOP_DIR/extras.d ]]; then
for i in $TOP_DIR/extras.d/*.sh; do
[[ -r $i ]] && source $i $mode $phase
done
fi
# the source phase corresponds to settings loading in plugins
if [[ "$mode" == "source" ]]; then
load_plugin_settings
elif [[ "$mode" == "override_defaults" ]]; then
plugin_override_defaults
else
run_plugins $mode $phase
fi
}
# Service Functions
# =================
# remove extra commas from the input string (i.e. ``ENABLED_SERVICES``)
# _cleanup_service_list service-list
function _cleanup_service_list {
echo "$1" | sed -e '
s/,,/,/g;
s/^,//;
s/,$//
'
}
# disable_all_services() removes all current services
# from ``ENABLED_SERVICES`` to reset the configuration
# before a minimal installation
# Uses global ``ENABLED_SERVICES``
# disable_all_services
function disable_all_services {
ENABLED_SERVICES=""
}
# Remove all services starting with '-'. For example, to install all default
# services except rabbit (rabbit) set in ``localrc``:
# ENABLED_SERVICES+=",-rabbit"
# Uses global ``ENABLED_SERVICES``
# disable_negated_services
function disable_negated_services {
local tmpsvcs="${ENABLED_SERVICES}"
local service
for service in ${tmpsvcs//,/ }; do
if [[ ${service} == -* ]]; then
tmpsvcs=$(echo ${tmpsvcs}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g")
fi
done
ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
}
# disable_service() removes the services passed as argument to the
# ``ENABLED_SERVICES`` list, if they are present.
#
# For example:
# disable_service rabbit
#
# This function does not know about the special cases
# for nova, glance, and neutron built into is_service_enabled().
# Uses global ``ENABLED_SERVICES``
# disable_service service [service ...]
function disable_service {
local tmpsvcs=",${ENABLED_SERVICES},"
local service
for service in $@; do
if is_service_enabled $service; then
tmpsvcs=${tmpsvcs//,$service,/,}
fi
done
ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
}
# enable_service() adds the services passed as argument to the
# ``ENABLED_SERVICES`` list, if they are not already present.
#
# For example:
# enable_service qpid
#
# This function does not know about the special cases
# for nova, glance, and neutron built into is_service_enabled().
# Uses global ``ENABLED_SERVICES``
# enable_service service [service ...]
function enable_service {
local tmpsvcs="${ENABLED_SERVICES}"
local service
for service in $@; do
if ! is_service_enabled $service; then
tmpsvcs+=",$service"
fi
done
ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
disable_negated_services
}
# is_service_enabled() checks if the service(s) specified as arguments are
# enabled by the user in ``ENABLED_SERVICES``.
#
# Multiple services specified as arguments are ``OR``'ed together; the test
# is a short-circuit boolean, i.e it returns on the first match.
#
# There are special cases for some 'catch-all' services::
# **nova** returns true if any service enabled start with **n-**
# **cinder** returns true if any service enabled start with **c-**
# **ceilometer** returns true if any service enabled start with **ceilometer**
# **glance** returns true if any service enabled start with **g-**
# **neutron** returns true if any service enabled start with **q-**
# **swift** returns true if any service enabled start with **s-**
# **trove** returns true if any service enabled start with **tr-**
# For backward compatibility if we have **swift** in ENABLED_SERVICES all the
# **s-** services will be enabled. This will be deprecated in the future.
#
# Cells within nova is enabled if **n-cell** is in ``ENABLED_SERVICES``.
# We also need to make sure to treat **n-cell-region** and **n-cell-child**
# as enabled in this case.
#
# Uses global ``ENABLED_SERVICES``
# is_service_enabled service [service ...]
function is_service_enabled {
local xtrace=$(set +o | grep xtrace)
set +o xtrace
local enabled=1
local services=$@
local service
for service in ${services}; do
[[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && enabled=0
# Look for top-level 'enabled' function for this service
if type is_${service}_enabled >/dev/null 2>&1; then
# A function exists for this service, use it
is_${service}_enabled
enabled=$?
fi
# TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled()
# are implemented
[[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && enabled=0
[[ ${service} == n-cpu-* && ${ENABLED_SERVICES} =~ "n-cpu" ]] && enabled=0
[[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && enabled=0
[[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && enabled=0
[[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && enabled=0
[[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && enabled=0
[[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && enabled=0
[[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && enabled=0
[[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && enabled=0
[[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && enabled=0
[[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && enabled=0
done
$xtrace
return $enabled
}
# Toggle enable/disable_service for services that must run exclusive of each other
# $1 The name of a variable containing a space-separated list of services
# $2 The name of a variable in which to store the enabled service's name
# $3 The name of the service to enable
function use_exclusive_service {
local options=${!1}
local selection=$3
local out=$2
[ -z $selection ] || [[ ! "$options" =~ "$selection" ]] && return 1
local opt
for opt in $options;do
[[ "$opt" = "$selection" ]] && enable_service $opt || disable_service $opt
done
eval "$out=$selection"
return 0
}
# System Functions
# ================
# Only run the command if the target file (the last arg) is not on an
# NFS filesystem.
function _safe_permission_operation {
local xtrace=$(set +o | grep xtrace)
set +o xtrace
local args=( $@ )
local last
local sudo_cmd
local dir_to_check
let last="${#args[*]} - 1"
local dir_to_check=${args[$last]}
if [ ! -d "$dir_to_check" ]; then
dir_to_check=`dirname "$dir_to_check"`
fi
if is_nfs_directory "$dir_to_check" ; then
$xtrace
return 0
fi
if [[ $TRACK_DEPENDS = True ]]; then
sudo_cmd="env"
else
sudo_cmd="sudo"
fi
$xtrace
$sudo_cmd $@
}
# Exit 0 if address is in network or 1 if address is not in network
# ip-range is in CIDR notation: 1.2.3.4/20
# address_in_net ip-address ip-range
function address_in_net {
local ip=$1
local range=$2
local masklen=${range#*/}
local network=$(maskip ${range%/*} $(cidr2netmask $masklen))
local subnet=$(maskip $ip $(cidr2netmask $masklen))
[[ $network == $subnet ]]
}
# Add a user to a group.
# add_user_to_group user group
function add_user_to_group {
local user=$1
local group=$2
if [[ -z "$os_VENDOR" ]]; then
GetOSVersion
fi
# SLE11 and openSUSE 12.2 don't have the usual usermod
if ! is_suse || [[ "$os_VENDOR" = "openSUSE" && "$os_RELEASE" != "12.2" ]]; then
sudo usermod -a -G "$group" "$user"
else
sudo usermod -A "$group" "$user"
fi
}
# Convert CIDR notation to a IPv4 netmask
# cidr2netmask cidr-bits
function cidr2netmask {
local maskpat="255 255 255 255"
local maskdgt="254 252 248 240 224 192 128"
set -- ${maskpat:0:$(( ($1 / 8) * 4 ))}${maskdgt:$(( (7 - ($1 % 8)) * 4 )):3}
echo ${1-0}.${2-0}.${3-0}.${4-0}
}
# Gracefully cp only if source file/dir exists
# cp_it source destination
function cp_it {
if [ -e $1 ] || [ -d $1 ]; then
cp -pRL $1 $2
fi
}
# HTTP and HTTPS proxy servers are supported via the usual environment variables [1]
# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in
# ``localrc`` or on the command line if necessary::
#
# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html
#
# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh
function export_proxy_variables {
if isset http_proxy ; then
export http_proxy=$http_proxy
fi
if isset https_proxy ; then
export https_proxy=$https_proxy
fi
if isset no_proxy ; then
export no_proxy=$no_proxy
fi
}
# Returns true if the directory is on a filesystem mounted via NFS.
function is_nfs_directory {
local mount_type=`stat -f -L -c %T $1`
test "$mount_type" == "nfs"
}
# Return the network portion of the given IP address using netmask
# netmask is in the traditional dotted-quad format
# maskip ip-address netmask
function maskip {
local ip=$1
local mask=$2
local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}"
local subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.}))
echo $subnet
}
# Service wrapper to restart services
# restart_service service-name
function restart_service {
if is_ubuntu; then
sudo /usr/sbin/service $1 restart
else
sudo /sbin/service $1 restart
fi
}
# Only change permissions of a file or directory if it is not on an
# NFS filesystem.
function safe_chmod {
_safe_permission_operation chmod $@
}
# Only change ownership of a file or directory if it is not on an NFS
# filesystem.
function safe_chown {
_safe_permission_operation chown $@
}
# Service wrapper to start services
# start_service service-name
function start_service {
if is_ubuntu; then
sudo /usr/sbin/service $1 start
else
sudo /sbin/service $1 start
fi
}
# Service wrapper to stop services
# stop_service service-name
function stop_service {
if is_ubuntu; then
sudo /usr/sbin/service $1 stop
else
sudo /sbin/service $1 stop
fi
}
# Restore xtrace
$XTRACE
# Local variables:
# mode: shell-script
# End:
---
- name: generate scripts
template: src=install.j2 dest={{ base_path }}/{{ script_path }} mode=0666
- name: Set ansible playbook on scripts
lineinfile: dest={{ base_path }}/{{ script_path }} line="ansible-playbook {{ playbook_yml }} -i hosts --connection=local" state=present
{{ lookup('file', 'roles/install-script/files/function-common') }}
# Include Additional Functions
function download_playbook {
if [ ! -f /etc/opt/slapcache.cfg ]; then
slapcache-conf
fi
DFILE="/tmp/tmpplaybook$(basename $0).$$/"
TFILE="archive.tar.gz"
mkdir -p $DFILE
cd $DFILE
slapcache-download --destination=$TFILE
tar -xzvf $TFILE
rm $TFILE
}
# Determine what system we are running on. This provides ``os_VENDOR``,
# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME``
# and ``DISTRO``
GetDistro
if [[ ${DISTRO} =~ (-.) ]] && [[ -f /etc/debian_version ]]; then
apt_get install lsb-release
GetDistro
fi
if [[ $EUID -gt 0 ]]; then
echo "####################################################"
echo "# #"
echo "# ERROR: You must be root to run this script!!!! #"
echo "# #"
echo "####################################################"
exit 1
fi
# Warn users who aren't on an explicitly supported distro, but allow them to
# override check and attempt installation with ``export FORCE=yes``
if [[ ! ${DISTRO} =~ (wheezy|jessie|trusty|rhel7) ]]; then
echo "WARNING: this script has not been tested on $DISTRO"
if [[ "$FORCE" != "yes" ]]; then
die $LINENO "If you wish to run this script anyway run with FORCE=yes"
fi
fi
# Make sure wheezy backports are available.
if [[ $DISTRO == "wheezy" ]]; then
echo "deb http://ftp.debian.org/debian wheezy-backports main contrib " > /etc/apt/sources.list.d/wheezy-backports.list
fi
if is_fedora && [[ $DISTRO == "rhel7" ]]; then
# RHEL requires EPEL for many Open Stack dependencies
# NOTE: We always remove and install latest -- some environments
# use snapshot images, and if EPEL version updates they break
# unless we update them to latest version.
if sudo yum repolist enabled epel | grep -q 'epel'; then
uninstall_package epel-release || true
fi
# This trick installs the latest epel-release from a bootstrap
# repo, then removes itself (as epel-release installed the
# "real" repo).
#
# You would think that rather than this, you could use
# $releasever directly in .repo file we create below. However
# RHEL gives a $releasever of "6Server" which breaks the path;
# see https://bugzilla.redhat.com/show_bug.cgi?id=1150759
cat <<EOF | sudo tee /etc/yum.repos.d/epel-bootstrap.repo
[epel-bootstrap]
name=Bootstrap EPEL
mirrorlist=http://mirrors.fedoraproject.org/mirrorlist?repo=epel-7&arch=\$basearch
failovermethod=priority
enabled=0
gpgcheck=0
EOF
# Enable a bootstrap repo. It is removed after finishing
# the epel-release installation.
yum-config-manager --enable epel-bootstrap
yum_install epel-release || \
die $LINENO "Error installing EPEL repo, cannot continue"
# EPEL rpm has installed it's version
rm -f /etc/yum.repos.d/epel-bootstrap.repo
# ... and also optional to be enabled
is_package_installed yum-utils || install_package yum-utils
yum-config-manager --enable rhel-7-server-optional-rpms
fi
is_package_installed ansible || install_package ansible
is_package_installed python-setuptools || install_package python-setuptools
if is_ubuntu && [[ $DISTRO == "trusty" ]]; then
is_package_installed python-apt || install_package python-apt
is_package_installed python-pycurl || install_package python-pycurl
fi
ansible localhost -m easy_install -a name=slapcache
#!/bin/bash
PLAYBOOK_REPOSITORY_URL=https://lab.nexedi.cn/rafael/slapos.playbook.git
#### Setup Ansible and load few libraries #####
BASE_SETUP_SCRIPT_MD5={{ base_setup.stat.md5 }}
wget --no-check-certificate https://deploy.nexedi.cn/base-setup -O /tmp/base-setup
if [ "`md5sum /tmp/base-setup | cut -f1 -d\ `" != "$BASE_SETUP_SCRIPT_MD5" ]; then
echo "ERROR: base-setup has wrong md5 `md5sum /tmp/base-setup | cut -f1 -d\ ` != $BASE_SETUP_SCRIPT_MD5"
exit 1
fi
source /tmp/base-setup
is_package_installed git || install_package git
git clone $PLAYBOOK_REPOSITORY_URL
#!/bin/bash
set -e
#### Setup Ansible and load few libraries #####
BASE_SETUP_SCRIPT_MD5={{ base_setup.stat.md5 }}
type wget >/dev/null 2>&1 || { echo >&2 "I require wget but it's not installed. Aborting."; exit 1; }
wget https://deploy.erp5.cn/base-setup -O /tmp/base-setup
if [ "`md5sum /tmp/base-setup | cut -f1 -d\ `" != "$BASE_SETUP_SCRIPT_MD5" ]; then
echo "ERROR: base-setup has wrong md5 `md5sum /tmp/base-setup | cut -f1 -d\ ` != $BASE_SETUP_SCRIPT_MD5"
exit 1
fi
source /tmp/base-setup
download_playbook
clear
echo "Starting Ansible playbook:"
---
- name: restart ntpd
service: name=ntpd state=restarted
- name: Install ntp
apt: name=ntp state=latest
when: ansible_os_family == "Debian"
- name: Install ntp
yum: name=ntp state=latest
when: ansible_os_family == "RedHat"
- name: ensure ntp is runing
service: name=ntp state=running enabled=yes
---
dependencies:
- { role: repository }
- name: Install packages using apt
apt: name={{ package_name }} state={{ package_state }} update_cache=yes cache_valid_time=3600
when: ansible_os_family == "Debian"
- name: Install re6stnet on CentOS
yum: name={{ package_name }} state={{ package_state }} update_cache=yes
when: ansible_os_family == "RedHat"
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHMFf/eh931HRskwFHRHg28Cajic6jdxVIH/gRMaxBFI+FgYOeb1ovehBa/C6vQGYqhlJf+Kuz9HsXyebVqH55yg/2BsSh6QQolgWwwwoWvTTFY2QRQwhkNSykWdHJhURNU2BGpPi0qTWnYj/jGN2hJFvOMbHtwzuMtKSLeFXh6d8A4cTWmme0FTKNqJfMYX2bOsnKWleLB25DDrs50IN2+bVThVt/70M24yervnyxhqnGpyTm9XJEI7nnLoWCK0UVr78kpQlikjSwhRggTjPRx1SrqRNNotHbbMEb5rCBTZc0s0klwKVzr77g3AOgAYRz9QM/l995Npm987AgbFuj rafael@localhost
- name: Add rafael SSH Key
authorized_key: user=root key="{{ lookup('file', 'rafael_key') }}"
- name: Add rafael SSH Key on slapos user
authorized_key: user=slapos key="{{ lookup('file', 'rafael_key') }}"
---
- name: restart re6stnet
service: name=re6stnet state=restarted
---
- name: Install GPG
command: gpg --keyserver keys.gnupg.net --recv-keys 0990BF79AC87AE42AE5329074C23BE591A716324
when: ansible_distribution == "Debian"
- name: Install Debian 7 repository key
shell: gpg --export 1A716324 | apt-key add -
when: ansible_distribution == "Debian"
- name: Install Debian 7 repository
apt_repository: repo='deb http://git.erp5.org/dist/deb ./' state=present
when: ansible_distribution == "Debian"
- name: Install re6stnet package
apt: name=babeld=1.5.1-nxd2 state=present update_cache=yes
when: ansible_distribution == "Debian"
- name: Install re6stnet package
apt: name=re6stnet state=present update_cache=yes
when: ansible_distribution == "Debian"
- name: Check if configuration exists already
stat: path=/etc/re6stnet/re6stnet.conf
register: re6stnet_conf
- name: Configure Re6st with re6st-conf
shell: "re6st-conf --registry {{ re6st_registry_url }} -r title {{ computer_name }} -d /etc/re6stnet --anonymous"
when: ansible_distribution == "Debian" and re6stnet_conf.stat.exists == False and "{{ computer_name }}" != "noname"
- name: Start re6stnet service
service: name=re6stnet state=started enabled=yes
when: ansible_distribution == "Debian" and re6stnet_conf.stat.exists == True
- name: Add table 0
lineinfile: dest=/etc/re6stnet/re6stnet.conf line="table 0"
notify:
- restart re6stnet
---
- name: restart re6stnet
service: name=re6stnet state=restarted
---
dependencies:
- { role: package, package_name: re6st-node, package_state: present }
- name: Check if configuration exists already
stat: path=/etc/re6stnet/re6stnet.conf
register: re6stnet_conf
- name: Configure Re6st with re6st-conf
shell: "re6st-conf --registry {{ re6st_registry_url }} --token {{ re6sttoken }} -r title {{ computer_name }} -d /etc/re6stnet"
when: re6stnet_conf.stat.exists == False and "{{ re6sttoken }}" != "notoken" and "{{ computer_name }}" != "noname"
- name: Start re6st-node service
service: name=re6st-node state=started enabled=yes
when: re6stnet_conf.stat.exists == True
- name: Install Debian 8 repository key
apt_key: url={{ base_open_build_url }}/Debian_8.0/Release.key state=present
when: ansible_distribution == "Debian" and ansible_distribution_major_version == "8"
- name: Install Debian 7 repository key
apt_key: url={{ base_open_build_url }}/Debian_7.0/Release.key state=present
when: ansible_distribution == "Debian" and ansible_distribution_major_version == "7"
- name: Install Debian 6 repository key
apt_key: url={{ base_open_build_url }}/Debian_6.0/Release.key state=present
when: ansible_distribution == "Debian" and ansible_distribution_major_version == "6"
- name: Install Debian 8 repository
apt_repository: repo='deb {{ base_open_build_url }}/Debian_8.0/ ./' state=present
when: ansible_distribution == "Debian" and ansible_distribution_major_version == "8"
- name: Install Debian 7 repository
apt_repository: repo='deb {{ base_open_build_url }}/Debian_7.0/ ./' state=present
when: ansible_distribution == "Debian" and ansible_distribution_major_version == "7"
- name: Install Debian 6 repository
apt_repository: repo='deb {{ base_open_build_url }}/Debian_6.0/ ./' state=present
when: ansible_distribution == "Debian" and ansible_distribution_major_version == "6"
- name: Install Ubuntu 14.04 repository key
apt_key: url={{ base_open_build_url }}/xUbuntu_14.04/Release.key state=present
when: ansible_distribution == "Ubuntu" and ansible_distribution_version == "14.04"
- name: Install Ubuntu 14.04 repository
apt_repository: repo='deb {{ base_open_build_url }}/xUbuntu_14.04/ ./' state=present
when: ansible_distribution == "Ubuntu" and ansible_distribution_version == "14.04"
- name: Install CentOS 7 Repository
get_url: url=http://download.opensuse.org/repositories/home:/VIFIBnexedi/CentOS_7/home:VIFIBnexedi.repo dest=/etc/yum.repos.d/slapos.repo mode=0440
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
- name: Install CentOS 7 RPM Key
rpm_key: state=present key=http://download.opensuse.org/repositories/home:/VIFIBnexedi/CentOS_CentOS-6/repodata/repomd.xml.key
when: ansible_distribution == "CentOS" and ansible_distribution_major_version == "7"
---
dependencies:
- { role: repository }
- name: Install packages using apt
apt: name=slapos-node state=present update_cache=yes cache_valid_time=3600
when: ansible_os_family == "Debian"
- name: Install re6stnet on CentOS
yum: name=slapos.node state=present update_cache=yes
when: ansible_os_family == "RedHat"
- name: Check if configuration exists already
stat: path=/etc/opt/slapos/slapos.cfg
register: slapos_cfg
- name: Configure SlapOS with slapos configure local
shell: "slapos configure local"
when: slapos_cfg.stat.exists == False
- name: Wait for proxy
wait_for: host=127.0.0.1 port=8080 delay=10
- name: Run slapos format for initial bootstrap
service: "slapos node format --now --alter_user=True"
when: slapos_cfg.stat.exists == False
\ No newline at end of file
---
dependencies:
- { role: repository }
- name: Install packages using apt
apt: name=slapos-node state=present update_cache=yes cache_valid_time=3600
when: ansible_os_family == "Debian"
- name: Install re6stnet on CentOS
yum: name=slapos.node state=present update_cache=yes
when: ansible_os_family == "RedHat"
- name: Check if configuration exists already
stat: path=/etc/opt/slapos/slapos.cfg
register: slapos_cfg
- name: Configure SlapOS with slapos node register
shell: "slapos node register --token {{ slapostoken }} --master-url {{ slapos_master_url }} --master-url-web {{ slapos_web_master_url }} --interface-name {{ interface_name }} {{ computer_name }}"
when: slapos_cfg.stat.exists == False and "{{ slapostoken }}" != "notoken" and "{{ computer_name }}" != "noname"
- name: Check if configuration exists already
stat: path=/etc/opt/slapos/slapos.cfg
register: xslapos_cfg
- name: Run slapos format for initial bootstrap
shell: "slapos node format --now --alter_user=True"
when: xslapos_cfg.stat.exists == True
\ No newline at end of file
---
- name: Include table 0 on re6st
shell: echo "TODO"
- name: interface on re6st
shell: echo "TODO"
- name: Load kvm_intel
shell: echo "TODO if machine has support."
- name: Set 666 on /dev/kvm
shell: echo "TODO if machine has support."
- name: Increase ulimit
shell: echo "TODO"
---
dependencies:
- { role: vifib-server }
---
- name: "Load acpi_cpufreq"
shell: modprobe acpi_cpufreq > /dev/null 2>&1
- name: "Load coretemp"
shell: modprobe coretemp > /dev/null 2>&1
- name: "Load f71882fg"
shell: modprobe f71882fg > /dev/null 2>&1
- name: "Do sysctl"
shell: sysctl -w net.ipv4.neigh.default.gc_thresh1=512 net.ipv4.neigh.default.gc_thresh2=1024 net.ipv4.neigh.default.gc_thresh3=2048 net.ipv6.neigh.default.gc_thresh1=512 net.ipv6.neigh.default.gc_thresh2=1024 net.ipv6.neigh.default.gc_thresh3=2048
- name: echo 1 > /sys/kernel/mm/ksm/run
shell: echo 1 > /sys/kernel/mm/ksm/run
- name: "Install firmware-realtek"
apt: name=firmware-realtek
apt: name=firmware-realtek state=latest update_cache=yes cache_valid_time=3600
when: ansible_os_family == "Debian"
#!/usr/bin/python
import sys
print sys.argv[1:]
host_content = open('/etc/hosts', 'r').read()
hpath = sys.argv[1]
domain = sys.argv[2]
with open('/etc/hosts', 'a') as hosts:
for content in open(hpath, 'r').readlines():
if content:
items = content.strip().split(' ')
entry = "%s %s.%s %s" % (items[1], items[0], domain, items[0])
print entry
if not entry in host_content:
hosts.write(entry)
hosts.write('\n')
---
- name: restart ssh
service: name=ssh state=restarted
---
- name: Download hostname
get_url: url=http://10.0.2.100/hostname dest=/etc/opt/hostname mode=666
ignore_errors: True
- name: Download ipv4
get_url: url=http://10.0.2.100/ipv4 dest=/etc/opt/ipv4 mode=666
ignore_errors: True
- name: Download cluster.hash
get_url: url=http://10.0.2.100/cluster.hash dest=/tmp/cluster.hash mode=666
ignore_errors: True
- name: stat /etc/opt/hostname
stat: path=/etc/opt/hostname
register: hostname_file
- name: stat /etc/opt/ipv4
stat: path=/etc/opt/ipv4
register: ipv4_file
- name: stat /tmp/cluster.hash
stat: path=/tmp/cluster.hash
register: cluster_hash
- name: replace /etc/hostname
copy: src=/etc/opt/hostname dest=/etc/hostname mode=666
when: hostname_file.stat.exists == True
- name: update hostname
command: hostname -F /etc/hostname
when: hostname_file.stat.exists == True
- name: managing hosts file
lineinfile: dest=/etc/hosts regexp="^127.0.1.1" line=""
when: hostname_file.stat.exists == True
- name: setting hosts special line
lineinfile: dest=/etc/hosts line="{{ lookup('file', '/etc/opt/ipv4') }} {{ lookup('file', '/etc/opt/hostname') }}.tl.teralab-datascience.fr {{ lookup('file', '/etc/opt/hostname') }}"
when: hostname_file.stat.exists == True
- name: setting cluster
get_url: url="{{ lookup('file', '/tmp/cluster.hash') }}/hosts" dest=/tmp/hosts mode=666 validate_certs=no
when: cluster_hash.stat.exists == True
- name: stat /tmp/hosts
stat: path=/tmp/hosts
register: hostname_file
- name: Format hosts
script: format_hosts /tmp/hosts tl.teralab-datascience.fr
when: cluster_hash.stat.exists == True
- name: managing resolv.conf file
lineinfile: dest=/etc/resolv.conf line="search tl.teralab-datascience.fr"
- name: adding entry from workspace
lineinfile: dest=/etc/resolv.conf line="nameserver 10.200.218.1"
- name: Create /etc/opt dir
file: dest=/etc/opt mode=775 state=directory
- name: Download configuration
get_url: url=http://10.0.2.100/netconfig.sh dest=/etc/opt/netconfig.sh mode=755
ignore_errors: True
- name: stat /etc/opt/netconfig.sh
stat: path=/etc/opt/netconfig.sh
register: netconfig_file
- name: replace /etc/rc.local
copy: src=/etc/opt/netconfig.sh dest=/etc/rc.local mode=755
when: netconfig_file.stat.exists == True
- name: call netconfig.sh
command: /etc/opt/netconfig.sh
when: netconfig_file.stat.exists == True
ignore_errors: True
- include: hostname.yml
- include: user.yml
- include: sudo.yml
- include: ssh.yml
---
- name: Create ~/.ssh
file: path=/root/.ssh state=directory mode=700
- name: Create /home/netadmin/.ssh
file: path=/home/netadmin/.ssh state=directory mode=700 owner=netadmin group=netadmin
- name: Download ssh authorized keys
get_url: url=http://10.0.2.100/authorized_keys dest=/root/.ssh/authorized_keys.download mode=755
ignore_errors: True
- name: stat /root/.ssh/authorized_keys.download
stat: path=/root/.ssh/authorized_keys.download
register: authorized_keys
- name: replace /root/.ssh/authorized_keys
copy: src=/root/.ssh/authorized_keys.download dest=/root/.ssh/authorized_keys mode=644
when: authorized_keys.stat.exists == True
- name: replace /home/netadmin/.ssh/authorized_keys
copy: src=/root/.ssh/authorized_keys.download dest=/home/netadmin/.ssh/authorized_keys mode=644 owner=netadmin group=netadmin
when: authorized_keys.stat.exists == True
- name: update /etc/ssh/sshd_config
lineinfile: dest=/etc/ssh/sshd_config regexp="^PermitRootLogin (?!no)" line="PermitRootLogin no"
notify:
- restart ssh
- name: update /etc/ssh/sshd_config
lineinfile: dest=/etc/ssh/sshd_config line="PermitRootLogin no"
notify:
- restart ssh
---
- name: Ensure sudo is installed
apt: name=sudo state=latest update_cache=yes cache_valid_time=3600
when: ansible_os_family == "Debian"
- name: Ensure sudo is installed (yum)
yum: name=sudo state=latest update_cache=yes
when: ansible_os_family == "RedHat"
- name: Ensure /etc/sudoers.d directory is present
file: path=/etc/sudoers.d state=directory
- name: Ensure /etc/sudoers.d is scanned by sudo
action: lineinfile dest=/etc/sudoers regexp="#includedir\s+/etc/sudoers.d" line="#includedir /etc/sudoers.d"
- name: Add hadoop user to the sudoers
lineinfile: dest=/etc/sudoers.d/netadmin state=present create=yes regexp="netadmin .*" line="netadmin ALL=(root) NOPASSWD:ALL"
- name: Ensure /etc/sudoers.d/netadmin file has correct permissions
action: file path=/etc/sudoers.d/netadmin mode=0440 state=file owner=root group=root
---
- name: create netadmin
user: name=netadmin shell=/bin/bash groups=root password=$6$rounds=100000$RcjiHRBJa/ORaqst$0L2hFmL5kwGpP6sY2r.hENK3DR5YLdlSDf.r57hwsLjKioYx36CdeLso.52av1UA61rhWIJWdoRYI5bFz.RAC1
- name: remove other unwanted users
user: name={{ item }} state=absent
with_items:
- slapos
- nexedi
---
- name: get CDH repository package
get_url: url=http://archive.cloudera.com/cdh5/one-click-install/wheezy/amd64/cdh5-repository_1.0_all.deb dest=~/cdh5-repository_1.0_all.deb
- name: Install CDH repository package
shell: dpkg -i ~/cdh5-repository_1.0_all.deb
- name: get ClouderaManager repo list
get_url: url=http://archive.cloudera.com/cm5/debian/wheezy/amd64/cm/cloudera.list dest=/etc/apt/sources.list.d/cloudera-manager.list
- name: update the repositories
shell: apt-get update
- name: update apt repositories
action: shell apt-get update ; true
- name: install python-apt
raw: "apt-get install -y python-apt"
- name: install sudo
apt: name=sudo state=latest
- name: install tree
apt: name=tree state=latest
- name: install curl
apt: name=curl state=latest
- name: install oracle-j2sdk1.6
apt: name=oracle-j2sdk1.7 state=latest
- name: managing hosts file
action: lineinfile dest=/etc/hosts regexp="^127.0.1.1" line=""
- name: install CM server
apt: name={{ item }} state=latest
with_items:
- cloudera-manager-daemons
- cloudera-manager-server-db
- cloudera-manager-server
- name: start cloudera-manager-server and -db
service: name={{ item }} state=started
with_items:
- cloudera-scm-server-db
- cloudera-scm-server
#!/bin/bash
DISK=$1
echo "n
p
1
w
"|fdisk $DISK
---
- name: stat /dev/vd{{ vd_disk }}
stat: path=/dev/vd{{ vd_disk }}
register: disk
- name: stat partition /dev/vd{{ vd_disk }}1
stat: path=/dev/vd{{ vd_disk }}1
register: partition_vd
- name: create partition script
copy: src=autofdisk dest=/usr/local/playbook-autofdisk mode=700
when: disk.stat.exists == True and partition_vd.stat.exists == False
- name: Create Partition
shell: /usr/local/playbook-autofdisk /dev/vd{{ vd_disk }}
when: disk.stat.exists == True and partition_vd.stat.exists == False
- name: Create Partition
filesystem: fstype=ext4 dev=/dev/vd{{ vd_disk }}1
when: disk.stat.exists == True and partition_vd.stat.exists == False
- name: stat partition /dev/vd{{ vd_disk }}1
stat: path=/dev/vd{{ vd_disk }}1
register: partition_vd_created
- name: Create directory /data{{ data_n }}
file: path=/data{{ data_n }} state=directory
when: partition_vd_created.stat.exists == True
- name: Mount /data{{ data_n }}
mount: name=/data{{ data_n }} src=/dev/vd{{ vd_disk }}1 fstype=ext4 state=mounted
when: partition_vd_created.stat.exists == True
---
re6st_registry_url: http://re6stnet.gnet.erp5.cn/
slapos_master_url: https://slap.grandenet.cn/
slapos_web_master_url: https://slapos.grandenet.cn/
interface_name: lo
---
re6st_registry_url: http://re6stnet.imt.vifib.com/
slapos_master_url: https://slap.imt.vifib.com/
slapos_web_master_url: https://imt.vifib.com/
interface_name: lo
---
re6st_registry_url: http://re6stnet.nexedi.com/
slapos_master_url: https://slap.vifib.com/
slapos_web_master_url: https://slapos.vifib.com/
interface_name: lo
\ No newline at end of file
- name: a play that runs entirely on the ansible host
hosts: 127.0.0.1
connection: local
vars_files:
- settings/vifib.yml
vars:
interface_name: eth0
vars_prompt:
- name: "computer_name"
prompt: "What is this computer name? (ignore if you already have a configured re6st and slapos):"
private: no
default: "noname"
- name: "slapostoken"
prompt: "If you have slapos token if you have (ignore if you already have a configured slapos):"
private: no
default: "notoken"
roles:
- slapos
- { role: package, package_name: ntp, package_state: present }
- rafael_ssh_key
- name: a play that runs entirely on the ansible host
hosts: 127.0.0.1
connection: local
vars_files:
- settings/vifib.yml
vars_prompt:
- name: "computer_name"
prompt: "What is this computer name? (ignore if you already have a configured re6st and slapos):"
private: no
default: "noname"
- name: "slapostoken"
prompt: "If you have slapos token if you have (ignore if you already have a configured slapos):"
private: no
default: "notoken"
roles:
- slapos
- { role: package, package_name: ntp, package_state: present }
- name: a play that runs entirely on the ansible host
hosts: 127.0.0.1
connection: local
vars_files:
- settings/vifib.yml
vars_prompt:
- name: "computer_name"
prompt: "What is this computer name? (ignore if you already have a configured re6st and slapos):"
private: no
default: "noname"
- name: "slapostoken"
prompt: "If you have slapos token if you have (ignore if you already have a configured slapos):"
private: no
default: "notoken"
roles:
- re6stnet-legacy
- slapos
- { role: package, package_name: ntp, package_state: present }
- vifib-server-shuttle
- name: a play that runs entirely on the ansible host
hosts: 127.0.0.1
connection: local
vars_files:
- settings/vifib.yml
vars_prompt:
- name: "computer_name"
prompt: "What is this computer name? (ignore if you already have a configured re6st and slapos):"
private: no
default: "noname"
- name: "slapostoken"
prompt: "If you have slapos token if you have (ignore if you already have a configured slapos):"
private: no
default: "notoken"
roles:
- re6stnet-legacy
- slapos
- { role: package, package_name: ntp, package_state: present }
- vifib-server
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment