Commit 5e03b689 authored by Alain Takoudjou's avatar Alain Takoudjou

Repman: Replication Manager for SlapOS

Create a Software Release for Replication Manager 2.1

https://github.com/signal18/replication-manager

See merge request !719
parents 7a82aadc 494eb5e1
[buildout]
extends =
../ncurses/buildout.cfg
../openssl/buildout.cfg
parts =
cmake
......@@ -11,5 +12,5 @@ shared = true
url = https://cmake.org/files/v3.7/cmake-3.7.2.tar.gz
md5sum = 79bd7e65cd81ea3aa2619484ad6ff25a
environment =
CMAKE_INCLUDE_PATH=${ncurses:location}/include
CMAKE_LIBRARY_PATH=${ncurses:location}/lib
CMAKE_INCLUDE_PATH=${ncurses:location}/include:${openssl:location}/include
CMAKE_LIBRARY_PATH=${ncurses:location}/lib:${openssl:location}/lib
......@@ -68,6 +68,14 @@ md5sum = 6132109d4050da349eadc9f7b0304ef4
environment-extra =
GOROOT_BOOTSTRAP=${golang14:location}
[golang1.13]
<= golang-common
url = https://dl.google.com/go/go1.13.9.src.tar.gz
md5sum = 4ad8b04f962be93a32f3021e6f35b3b9
# go1.13 needs go1.4 to bootstrap
environment-extra =
GOROOT_BOOTSTRAP=${golang14:location}
# ---- infrastructure to build Go workspaces / projects ----
......
[buildout]
extends =
../libiconv/buildout.cfg
../gnutls/buildout.cfg
../curl/buildout.cfg
parts =
libmicrohttpd
[libmicrohttpd]
recipe = slapos.recipe.cmmi
url = https://ftp.gnu.org/gnu/libmicrohttpd/libmicrohttpd-0.9.70.tar.gz
md5sum = dcd6045ecb4ea18c120afedccbd1da74
configure-options =
--with-libcurl=${curl:location}
--with-gnutls=${gnutls:location}
--with-libiconv-prefix=${libiconv:location}
--without-libintl-prefix
[buildout]
extends =
../autoconf/buildout.cfg
../automake/buildout.cfg
../libtool/buildout.cfg
../cmake/buildout.cfg
../openssl/buildout.cfg
../patch/buildout.cfg
../git/buildout.cfg
../openssl/buildout.cfg
../bzip2/buildout.cfg
../perl/buildout.cfg
../gnutls/buildout.cfg
../curl/buildout.cfg
../gnutls/buildout.cfg
../libzip/buildout.cfg
../m4/buildout.cfg
../pcre/buildout.cfg
../jemalloc/buildout.cfg
../libmicrohttpd/buildout.cfg
parts =
proxysql
[proxysql]
recipe = slapos.recipe.cmmi
version = v2.0.12
url = https://github.com/sysown/proxysql/archive/${:version}.tar.gz
md5sum = 70ec17fe73703a25730fdd44b6bc3ef5
prefix = @@LOCATION@@
# Patch installation path for SlapOS
pre-configure =
mkdir -p ${:prefix}/bin ${:prefix}/etc/init.d ${:prefix}/lib/systemd/system
sed -ri "
s#(\s)/usr/bin#\1${:prefix}/bin#g
s#(\s)/etc#\1${:prefix}/etc#g
s#(\s)/usr/lib#\1${:prefix}/lib#
s#(\s)/var/lib#\1${:prefix}/lib#g
s#(\s)useradd#\1echo useradd#g
s#(\s)systemctl#\1echo systemctl#g
s#(\s)chkconfig#\1echo chkconfig#g
s#(\s)update-rc.d#\1echo update-rc.d#g" Makefile
configure-command = true
environment =
GIT_VERSION=${:version}
PKG_CONFIG_PATH=${openssl:location}/lib/pkgconfig:${gnutls:location}/lib/pkgconfig:${libgcrypt:location}/lib/pkgconfig:${zlib:location}/lib/pkgconfig:${pcre:location}/lib/pkgconfig
PATH=${m4:location}/bin:${libtool:location}/bin:${libgcrypt:location}/bin:${curl:location}/bin:${perl:location}/bin:${pkgconfig:location}/bin:${bzip2:location}/bin:${autoconf:location}/bin:${git:location}/bin:${automake:location}/bin:${patch:location}/bin:${cmake:location}/bin:%(PATH)s
CXXFLAGS=-I${openssl:location}/include -I${gnutls:location}/include -I${zlib:location}/include
CFLAGS=-I${gnutls:location}/include
LDFLAGS=-L${openssl:location}/lib -Wl,-rpath -Wl,${gnutls:location}/lib -L${gnutls:location}/lib -Wl,-rpath=${curl:location}/lib -L${libtool:location}/lib -L${zlib:location}/lib -Wl,-rpath -Wl,${zlib:location}/lib -L${curl:location}/lib -L${pcre:location}/lib -L${jemalloc:location}/lib -L${libmicrohttpd:location}/lib
CMAKE_INCLUDE_PATH=${openssl:location}/include:${gnutls:location}/include:${curl:location}/include:${pcre:location}/include:${jemalloc:location}/include:${libmicrohttpd:location}/include
CMAKE_LIBRARY_PATH=${openssl:location}/lib:${gnutls:location}/lib:${curl:location}/lib:${pcre:location}/lib:${jemalloc:location}/lib:${libmicrohttpd:location}/lib
LIBTOOL=libtool
ACLOCAL_PATH=${pkgconfig:location}/share/aclocal:${libtool:location}/share/aclocal
[buildout]
extends =
../../component/golang/buildout.cfg
parts =
gowork
[gowork]
golang = ${golang1.13:location}
install =
buildflags = -v --tags server --ldflags "-extldflags 'static' -w -s -X main.GoOS=linux -X main.GoArch=amd64 -X main.Version=2.1 -X main.FullVersion=$FULLVERSION -X main.Build=$(date +%FT%T%z) -X main.WithProvisioning=ON -X main.WithOpenSVC=OFF -X main.WithHaproxy=ON -X main.WithMaxscale=ON -X main.WithMariadbshardproxy=ON -X main.WithProxysql=ON -X main.WithSphinx=ON -X main.WithArbitration=OFF -X main.WithArbitrationClient=ON -X main.WithMonitoring=ON -X main.WithHttp=ON -X main.WithBackup=ON -X main.WithMail=ON -X main.WithEnforce=ON -X main.WithDeprecate=ON"
[gowork.goinstall]
depends_gitfetch =
${git.signal18.io_signal18_repman:recipe}
command = set -e
. ${gowork:env.sh}
cd ${git.signal18.io_signal18_repman:location}
export GO111MODULE=on
export FULLVERSION=$(git describe --tags)
go build ${gowork:buildflags} -o ${gowork:bin}/replication-manager
chmod -R u+w .
# Remove binary files provided with replication manager else testnode will complain with 'libXXX => not found'.
rm -f ${git.signal18.io_signal18_repman:location}/share/amd64/darwin/*
rm -f ${git.signal18.io_signal18_repman:location}/share/amd64/linux/*
[git.signal18.io_signal18_repman]
<= go-git-package
go.importpath = github.com/signal18/replication-manager
repository = https://github.com/signal18/replication-manager
branch = 2.1
revision = 9167a82c81af8f7be41cf51bc9be8a37dc3d8c03
\ No newline at end of file
[buildout]
extends =
../../component/golang/buildout.cfg
parts =
restic
[gowork]
golang = ${golang1.13:location}
[restic]
recipe = plone.recipe.command
update-command = ${:command}
stop-on-error = True
# GO111MODULE=on enables go modules support
# the chmod is needed as modules are fetched with u-w
command =
. ${gowork:env.sh} &&
cd ${git.github.com_restic_restic:location} &&
export GO111MODULE=on &&
go run build.go -o ${:output} &&
chmod -R u+w .
output = ${gowork:bin}/restic
location = ${:output}
[git.github.com_restic_restic]
<= go-git-package
go.importpath = github.com/restic/restic
repository = https://github.com/restic/restic
revision = v0.9.6
\ No newline at end of file
......@@ -7,3 +7,5 @@ recipe = slapos.recipe.cmmi
url = http://www.dest-unreach.org/socat/download/socat-${:version}.tar.gz
version = 1.7.3.2
md5sum = aec3154f7854580cfab0c2d81e910519
environment =
LDFLAGS=-L${openssl:location}/lib -Wl,-rpath=${openssl:location}/lib
\ No newline at end of file
[buildout]
extends =
../autoconf/buildout.cfg
../automake/buildout.cfg
../libtool/buildout.cfg
../gettext/buildout.cfg
../m4/buildout.cfg
../mariadb/buildout.cfg
parts =
sysbench
[sysbench]
recipe = slapos.recipe.cmmi
shared = true
url = https://github.com/akopytov/sysbench/archive/1.0.19.tar.gz
md5sum = 2912bfe7238cac7351459019a84e2557
pre-configure =
aclocal -I${pkgconfig:location}/share/aclocal -I${libtool:location}/share/aclocal -I${gettext:location}/share/aclocal
./autogen.sh
configure-options =
--disable-static
--with-mysql-includes=${mariadb:location}/include/mysql
--with-mysql-libs=${mariadb:location}/lib
environment =
PATH=${m4:location}/bin:${autoconf:location}/bin:${automake:location}/bin:${pkgconfig:location}/bin:${libtool:location}/bin:%(PATH)s
CPPFLAGS=-I${gettext:location}/include -I${mariadb:location}/include
LDFLAGS=-L${gettext:location}/lib -Wl,-rpath=${gettext:location}/lib -L${mariadb:location}/lib -Wl,-rpath=${mariadb:location}/lib
ACLOCAL_PATH=${pkgconfig:location}/share/aclocal:${gettext:location}/share/aclocal:${libtool:location}/share/aclocal
\ No newline at end of file
# THIS IS NOT A BUILDOUT FILE, despite purposedly using a compatible syntax.
# The only allowed lines here are (regexes):
# - "^#" comments, copied verbatim
# - "^[" section beginings, copied verbatim
# - lines containing an "=" sign which must fit in the following categorie.
# - "^\s*filename\s*=\s*path\s*$" where "path" is relative to this file
# Copied verbatim.
# - "^\s*hashtype\s*=.*" where "hashtype" is one of the values supported
# by the re-generation script.
# Re-generated.
# - other lines are copied verbatim
# Substitution (${...:...}), extension ([buildout] extends = ...) and
# section inheritance (< = ...) are NOT supported (but you should really
# not need these here).
[instance.cfg]
filename = instance.cfg.in
md5sum = b41f521b5f7980c64260ed0e5c494450
[instance-repman.cfg]
_update_hash_filename_ = instance-repman.cfg.jinja2.in
md5sum = 7dbaace0d7db0e26d582ad17f36ac9cd
[config-toml.in]
_update_hash_filename_ = templates/config.toml.in
md5sum = 5cfa75ca5a0048a050c0041dfe541f3d
[config-cluster-toml.in]
_update_hash_filename_ = templates/cluster-config.toml.in
md5sum = d2e79a9435082d9420281b4f59a5d464
[nginx.conf.in]
_update_hash_filename_ = templates/nginx.conf.in
md5sum = 0eeb24c6aa0760f0d33c4cc2828ddf30
[template-mariadb.cfg]
_update_hash_filename_ = instance-mariadb.cfg.jinja2.in
md5sum = 189ccee60d0fb53e29431a45e0816bc1
[template-my-cnf]
_update_hash_filename_ = templates/my.cnf.in
md5sum = f3661b788099bb31d71ba6e7d36836d9
[template-mariadb-initial-setup]
_update_hash_filename_ = templates/mariadb_initial_setup.sql.in
md5sum = 9be53e2e92333b93e92556b8a01d9c42
[mariadb-init-root-sql]
_update_hash_filename_ = templates/mariadb_init_root.sql.in
md5sum = d927b5d36410bb02717d5ca125525785
[init-root-wrapper-in]
_update_hash_filename_ = templates/init_root_wrapper.in
md5sum = 83ef59b5afaf4454d368823c33aef9cb
[repman-manager-sh.in]
_update_hash_filename_ = templates/repman-manager.sh.in
md5sum = 50503bec392e31126328f51eadc11634
[dbjobs-in]
_update_hash_filename_ = templates/dbjobs.in
md5sum = d2ebd2ec55bf8489789a52c808729925
[mysqld-need-start.sh.in]
_update_hash_filename_ = templates/mysqld-need-start.sh.in
md5sum = e9bcee5dc1318fe3acda2663472214f5
[proxy-need-start-stop.sh.in]
_update_hash_filename_ = templates/proxy-need-start-stop.sh.in
md5sum = 455aaf369bf5141758dc57f2c0e67b08
This diff is collapsed.
This diff is collapsed.
{
"name": "Output Parameters",
"properties": {
"backend-url": {
"title": "Backend URL",
"description": "URL used to connect directly to backend without frontend. Requires IPv6.",
"type": "string",
"format": "uri"
},
"url": {
"title": "URL",
"description": "URL used to connect to the service.",
"type": "string",
"format": "uri"
},
"repman-password": {
"title": "Repman password",
"description": "Password for Replication Manager service.",
"type": "string"
}
}
}
This diff is collapsed.
[buildout]
parts = switch-softwaretype
eggs-directory = {{ buildout_egg_directory }}
develop-eggs-directory = {{ buildout_develop_directory }}
offline = true
[switch-softwaretype]
recipe = slapos.cookbook:switch-softwaretype
default = template-instance-repman.cfg:rendered
RootSoftwareInstance = ${:default}
mariadb = template-instance-mariadb.cfg:rendered
[slap-configuration]
recipe = slapos.cookbook:slapconfiguration.serialised
computer = ${slap-connection:computer-id}
partition = ${slap-connection:partition-id}
url = ${slap-connection:server-url}
key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file}
[jinja2-template-base]
recipe = slapos.recipe.template:jinja2
mode = 0644
extensions = jinja2.ext.do
rendered= ${buildout:directory}/${:_buildout_section_name_}
supervisord-lib = {{ supervisord_lib }}
import-list =
file supervisord_lib :supervisord-lib
context =
key slapparameter_dict slap-configuration:configuration
key computer_id slap-configuration:computer
key ipv4_set slap-configuration:ipv4
key ipv6_set slap-configuration:ipv6
raw buildout_directory {{ buildout_directory }}
raw buildout_bin_directory {{ buildout_bin_directory }}
raw eggs_directory {{ buildout_egg_directory }}
raw develop_eggs_directory {{ buildout_develop_directory }}
raw mariadb_location {{ mariadb_location }}
raw supervisord_lib {{ supervisord_lib }}
raw supervisord_conf {{ supervisord_conf }}
raw template_monitor {{ template_monitor_cfg }}
# program binaries
raw bash_bin {{ bash_location }}/bin/bash
raw jq_bin {{ jq_location }}/jq
raw curl_bin {{ curl_location }}/bin/curl
${:extra-context}
extra-context =
[template-instance-repman.cfg]
<= jinja2-template-base
template= {{ template_repman_cfg }}
extra-context =
raw gowork_bin {{ gowork_bin }}
raw haproxy_location {{ haproxy_location }}
raw nginx_bin {{ nginx_location }}/sbin/nginx
raw repman_src_location {{ repman_src_location }}
# config files
raw config_toml_in {{ config_toml_in }}
raw config_cluster_toml_in {{ config_cluster_toml_in }}
raw nginx_conf_in {{ nginx_conf_in }}
raw rsync_location {{ rsync_location }}
raw restic_bin_location {{ restic_bin_location }}
raw sysbench_location {{ sysbench_location }}
raw proxysql_location {{ proxysql_location }}
raw template_repman_manager_sh {{ template_repman_manager_sh }}
raw template_proxy_need_stopstart {{ proxy_need_stop_start_template }}
[template-mariadb-parameters]
bash = {{ bash_location }}
dash-location = {{ dash_location }}
gzip-location = {{ gzip_location }}
mariadb-location = {{ mariadb_location }}
template-my-cnf = {{ template_my_cnf }}
template-mariadb-initial-setup = {{ template_mariadb_initial_setup }}
template-mariadb-init-root = {{ template_init_root_sql }}
template-init-root-wrapper = {{ template_init_root_wrapper }}
template-mysqld-wrapper = {{ template_mysqld_wrapper }}
template-mysqld-need-start = {{ mysqld_start_template }}
link-binary = {{ dumps(mariadb_link_binary) }}
check-computer-memory-binary = {{ bin_directory }}/check-computer-memory
bin-directory = {{ bin_directory }}
percona-tools-location = {{ percona_toolkit_location }}
unixodbc-location = {{ unixodbc_location }}
curl-location = {{ curl_location }}
dbjobs-template = {{ dbjobs_in }}
socat-location = {{ socat_location }}
mroonga-mariadb-install-sql = {{ mroonga_mariadb_install_sql }}
mroonga-mariadb-plugin-dir = {{ mroonga_mariadb_plugin_dir }}
groonga-plugins-path = {{ groonga_plugin_dir }}:{{ groonga_mysql_normalizer_plugin_dir }}
[template-instance-mariadb.cfg]
<= jinja2-template-base
template = {{ template_mariadb }}
filename = instance-mariadb.cfg
extra-context =
section parameter_dict template-mariadb-parameters
[buildout]
extends =
buildout.hash.cfg
../../component/restic/buildout.cfg
../../component/replication-manager/buildout.cfg
../../component/mariadb/buildout.cfg
../../component/nginx/buildout.cfg
../../component/haproxy/buildout.cfg
../../component/logrotate/buildout.cfg
../../component/percona-toolkit/buildout.cfg
../../component/gzip/buildout.cfg
../../component/sed/buildout.cfg
../../component/coreutils/buildout.cfg
../../component/grep/buildout.cfg
../../component/sysbench/buildout.cfg
../../component/proxysql/buildout.cfg
../../component/socat/buildout.cfg
../../component/rsync/buildout.cfg
../../stack/supervisord/buildout.cfg
../../stack/monitor/buildout.cfg
../neoppod/software-common.cfg
parts =
slapos-cookbook
mroonga-mariadb
instance.cfg
template-mariadb.cfg
template-mysqld-wrapper
gowork
[instance.cfg]
recipe = slapos.recipe.template:jinja2
rendered = ${buildout:directory}/instance.cfg
template = ${:_profile_base_location_}/${:filename}
mode = 0644
context =
key bash_location bash:location
key bin_directory buildout:bin-directory
key config_toml_in config-toml.in:target
key config_cluster_toml_in config-cluster-toml.in:target
key coreutils_location coreutils:location
key curl_location curl:location
key buildout_egg_directory buildout:eggs-directory
key buildout_develop_directory buildout:develop-eggs-directory
key buildout_directory buildout:directory
key buildout_bin_directory buildout:bin-directory
key dbjobs_in dbjobs-in:target
key dash_location dash:location
key jq_location jq-binary:location
key logrotate_cfg template-logrotate-base:rendered
key gowork_bin gowork:bin
key gzip_location gzip:location
key haproxy_location haproxy:location
key template_monitor monitor2-template:rendered
key mariadb_link_binary template-mariadb.cfg:link-binary
key mariadb_location mariadb:location
key mysqld_start_template mysqld-need-start.sh.in:target
key mroonga_mariadb_install_sql mroonga-mariadb:install-sql
key mroonga_mariadb_plugin_dir mroonga-mariadb:plugin-dir
key groonga_plugin_dir groonga:groonga-plugin-dir
key groonga_mysql_normalizer_plugin_dir groonga-normalizer-mysql:groonga-plugin-dir
key nginx_conf_in nginx.conf.in:target
key nginx_location nginx:location
key percona_toolkit_location percona-toolkit:location
key proxy_need_stop_start_template proxy-need-start-stop.sh.in:target
key repman_src_location git.signal18.io_signal18_repman:location
key rsync_location rsync:location
key restic_bin_location restic:location
key socat_location socat:location
key supervisord_lib supervisord-library:target
key supervisord_conf supervisord-conf:target
key template_repman_manager_sh repman-manager-sh.in:target
key template_mariadb template-mariadb.cfg:target
key template_mariadb_initial_setup template-mariadb-initial-setup:target
key template_monitor_cfg monitor2-template:rendered
key template_my_cnf template-my-cnf:target
key template_mysqld_wrapper template-mysqld-wrapper:rendered
key template_init_root_sql mariadb-init-root-sql:target
key template_init_root_wrapper init-root-wrapper-in:target
key template_repman_cfg instance-repman.cfg:target
key unixodbc_location unixodbc:location
key sysbench_location sysbench:location
key proxysql_location proxysql:location
[jq-binary]
recipe = hexagonit.recipe.download
url = https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64
md5sum = 1fffde9f3c7944f063265e9a5e67ae4f
filename = jq
mode = 0755
download-only = true
[download-file]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/${:_update_hash_filename_}
destination = ${buildout:directory}/${:_buildout_section_name_}
[config-toml.in]
<= download-file
[config-cluster-toml.in]
<= download-file
[instance-repman.cfg]
<= download-file
[repman-manager-sh.in]
<= download-file
[template-mariadb.cfg]
<= download-file
link-binary =
${coreutils:location}/bin/basename
${coreutils:location}/bin/cat
${coreutils:location}/bin/cp
${coreutils:location}/bin/ls
${coreutils:location}/bin/tr
${coreutils:location}/bin/uname
${gettext:location}/lib/gettext/hostname
${grep:location}/bin/grep
${sed:location}/bin/sed
${mariadb:location}/bin/mysqlbinlog
[template-mariadb-initial-setup]
<= download-file
[template-my-cnf]
<= download-file
[mariadb-init-root-sql]
<= download-file
[init-root-wrapper-in]
<= download-file
[nginx.conf.in]
<= download-file
[dbjobs-in]
<= download-file
[mysqld-need-start.sh.in]
<= download-file
[proxy-need-start-stop.sh.in]
<= download-file
# Pin versions of eggs used that are not already pinned by stack/slapos.cfg
[versions]
slapos.recipe.template = 4.4
rubygemsrecipe = 0.2.2+slapos001
{
"name": "Replication Manager",
"description": "Replication Manager",
"serialisation": "xml",
"software-type": {
"default": {
"title": "Default",
"serialisation": "json-in-xml",
"description": "Replication Manager",
"request": "instance-repman-input-schema.json",
"response": "instance-repman-output-schema.json",
"index": 0
}
}
}
{% macro setbool(value) -%}
{% if value in ['true', 'True'] %} true {% else %} false {% endif -%}
{% endmacro -%}
[{{ parameter_dict['name'] }}]
title = "{{ parameter_dict['name'] }}"
monitoring-save-config = true
db-servers-hosts = "{{ parameter_dict['db-list'] }}"
db-servers-prefered-master = "{{ parameter_dict['db-prefered-master'] }}"
db-servers-credential = "{{ parameter_dict['db-user'] }}:{{ parameter_dict['db-password'] }}"
replication-credential = "{{ parameter_dict['db-user'] }}:{{ parameter_dict['db-password'] }}"
monitoring-write-heartbeat-credential="{{ parameter_dict['heartbeat-user'] }}:{{ parameter_dict['heartbeat-password'] }}"
db-servers-connect-timeout = 1
slapos-db-partitions = "{{ parameter_dict['partition-list'] }}"
slapos-proxysql-partitions = "{{ parameter_dict['proxysql-partition'] }}"
proxysql = true
proxysql-port = {{ parameter_dict['proxy-port'] }}
proxysql-servers = "{{ parameter_dict['proxysql-servers'] }}"
proxysql-servers-ipv6 = "{{ parameter_dict['proxysql-servers-ipv6'] }}"
proxysql-user = "{{ parameter_dict['proxysql-user'] }}"
proxysql-bootstrap = true
proxysql-admin-port = {{ parameter_dict['proxy-admin-port'] }}
proxysql-password = "{{ parameter_dict['password'] }}"
prov-proxy-tags = "{{ parameter_dict['proxy-tags'] }}"
monitoring-scheduler = true
scheduler-db-servers-logical-backup = true
scheduler-db-servers-logical-backup-cron = "0 {{ parameter_dict['logical-backup-cron'] }}"
scheduler-db-servers-logs = true
scheduler-db-servers-logs-cron = "0 0 23 * * *"
scheduler-db-servers-logs-table-keep = 4
scheduler-db-servers-logs-table-rotate = true
scheduler-db-servers-logs-table-rotate-cron = "0 0 23 * * *"
scheduler-db-servers-optimize = true
scheduler-db-servers-optimize-cron = "0 0 3 1 * 5"
scheduler-db-servers-physical-backup = true
scheduler-db-servers-physical-backup-cron = "0 {{ parameter_dict['physical-backup-cron'] }}"
backup-physical-type = "mariabackup"
backup-logical-type = "mysqldump"
scheduler-db-servers-receiver-ports= "{{ parameter_dict['receiver-port-list'] }}"
prov-proxy-cpu-cores = {{ parameter_dict['proxy-cpu-cores'] }}
prov-proxy-memory = {{ parameter_dict['proxy-memory'] }}
prov-db-cpu-cores = {{ parameter_dict['db-cpu-cores'] }}
prov-db-disk-iops = {{ parameter_dict['db-disk-iops'] }}
prov-db-memory = {{ parameter_dict['db-memory'] }}
prov-db-memory-shared-pct = "{{ parameter_dict['db-memory-shared-pct'] }}"
prov-db-memory-threaded-pct = "{{ parameter_dict['db-memory-threaded-pct'] }}"
test-inject-traffic = true
# failover
failover-mode = "{{ parameter_dict['failover-mode'] }}"
failover-limit = {{ parameter_dict['failover-limit'] }}
failover-falsepositive-heartbeat = {{ setbool(parameter_dict['failover-falsepositive-heartbeat']) }}
failover-falsepositive-heartbeat-timeout = {{ parameter_dict['failover-falsepositive-heartbeat-timeout'] }}
failover-falsepositive-ping-counter = {{ parameter_dict['failover-falsepositive-ping-counter'] }}
failover-max-slave-delay = {{ parameter_dict['failover-max-slave-delay'] }}
failover-readonly-state = {{ setbool(parameter_dict['failover-readonly-state']) }}
failover-restart-unsafe = {{ setbool(parameter_dict['failover-restart-unsafe']) }}
failover-time-limit = {{ parameter_dict['failover-time-limit'] }}
#switchover
switchover-at-equal-gtid = {{ setbool(parameter_dict['switchover-at-equal-gtid']) }}
switchover-slave-wait-catch = {{ setbool(parameter_dict['switchover-slave-wait-catch']) }}
switchover-wait-kill = {{ parameter_dict['switchover-wait-kill'] }}
switchover-wait-trx = {{ parameter_dict['switchover-wait-trx'] }}
switchover-wait-write-query = {{ parameter_dict['switchover-wait-write-query'] }}
{% macro setbool(value) -%}
{% if value in ['true', 'True'] %} true {% else %} false {% endif -%}
{% endmacro -%}
[Default]
api-bind = "{{ parameter_dict['ipv4'] }}"
http-bind-address = "{{ parameter_dict['ipv4'] }}"
http-server = true
http-session-lifetime = {{ parameter_dict['http-session-lifetime'] }}
http-refresh-interval = {{ int(parameter_dict['http-refresh-interval'])*1000 }}
monitoring-save-config = false
api-https-bind = true
api-credentials = "{{ parameter_dict['username'] }}:{{ parameter_dict['password'] }}"
include = "{{ parameter_dict['cluster-d'] }}"
autorejoin = {{ setbool(parameter_dict['autorejoin']) }}
autoseed = {{ setbool(parameter_dict['autoseed']) }}
{% if parameter_dict['autoseed'] in ['true', 'True'] -%}
autorejoin-logical-backup = true
{% endif -%}
db-servers-binary-path = "{{ parameter_dict['mysql-bin-dir'] }}"
# Database list of hosts to ignore in election
#db-servers-ignored-hosts =
# Database hosts list to monitor, IP and port (optional), specified in the host:[port] format and separated by commas
monitoring-address = "{{ parameter_dict['ipv4'] }}"
monitoring-wait-retry = 40
#haproxy = true
#haproxy-binary-path = "{{ parameter_dict['haproxy-bin'] }}"
# HaProxy input bind address for read (default "0.0.0.0")
#haproxy-ip-read-bind =
# HaProxy input bind address for write (default "0.0.0.0")
#haproxy-ip-write-bind =
# HaProxy load balance read port to all nodes (default 3307)
#haproxy-read-port =
# HaProxy hosts (default "127.0.0.1")
#haproxy-servers =
# HaProxy statistics port (default 1988)
#haproxy-stat-port =
#HaProxy read-write port to leader (default 3306)
#haproxy-write-port =
# Use restic to archive and restore backups
backup = true
backup-restic = true
backup-restic-binary-path = "{{ parameter_dict['restic-bin'] }}"
backup-restic-aws = false
backup-restic-password = "{{ parameter_dict['password'] }}"
backup-mysqlclient-path = "{{ parameter_dict['mysqlclient-path'] }}"
backup-mysqlbinlog-path = "{{ parameter_dict['mysqlbinlog-path'] }}"
backup-mysqldump-path = "{{ parameter_dict['mysqldump-path'] }}"
# Mail configuration
# Alert email sender (default "mrm@localhost")
mail-from = "{{ parameter_dict['mail-from'] }}"
# Alert email SMTP server address, in host:[port] format (default "localhost:25")
mail-smtp-addr = "{{ parameter_dict['mail-smtp-addr'] }}"
mail-smtp-password = "{{ parameter_dict['mail-smtp-password'] }}"
mail-smtp-user = "{{ parameter_dict['mail-smtp-user'] }}"
# Alert email recipients, separated by commas
mail-to = "{{ parameter_dict['mail-to'] }}"
prov-orchestrator = "slapos"
prov-db-tags = "{{ parameter_dict['enabled-tags'] }}"
sysbench-binary-path = "{{ parameter_dict['sysbench-bin'] }}"
# Number of threads to run benchmark (default 4)
sysbench-threads = 4
# Time to run benchmark (default 100)
sysbench-time = 100
sysbench-v1 = true
#!/bin/bash
USER={{ parameter_dict['db-user'] }}
PASSWORD={{ parameter_dict['db-password'] }}
ERROLOG={{ parameter_dict['mysql-dir'] }}/.system/logs/errors.log
SLOWLOG={{ parameter_dict['mysql-dir']}}/.system/logs/sql-slow
BACKUPDIR={{ parameter_dict['mysql-dir'] }}/.system/backup
DATADIR={{ parameter_dict['mysql-dir'] }}/
{% if parameter_dict['use-ipv6'] == True -%}
{% set listen = "TCP6-LISTEN" -%}
{% else -%}
{% set listen = "TCP-LISTEN" -%}
{% endif -%}
export PATH={{ parameter_dict['socat-location'] }}/bin:{{ parameter_dict['mysql-location'] }}/bin:{{ parameter_dict['gzip-location'] }}/bin:$PATH
JOBS=( "xtrabackup" "mariabackup" "error" "slowquery" "zfssnapback" "optimize" "reseedxtrabackup" "reseedmariabackup" "reseedmysqldump" "flashbackxtrabackup" "flashbackmariadbackup" "flashbackmysqldump" "stop" "start")
doneJob()
{
mysql --defaults-file={{ parameter_dict['dbjob-cnf'] }} -e "set sql_log_bin=0;UPDATE replication_manager_schema.jobs set end=NOW(), result=LOAD_FILE('{{ parameter_dict['log-dir'] }}/dbjob.out') WHERE id='$ID';" &
}
pauseJob()
{
mysql --defaults-file={{ parameter_dict['dbjob-cnf'] }} -e "select sleep(6);set sql_log_bin=0;UPDATE replication_manager_schema.jobs set result=LOAD_FILE('{{ parameter_dict['log-dir'] }}/dbjob.out') WHERE id='$ID';" &
}
partialRestore()
{
mysql --defaults-file={{ parameter_dict['dbjob-cnf'] }} -e "set sql_log_bin=0;install plugin BLACKHOLE soname 'ha_blackhole.so'"
for dir in $(ls -d $BACKUPDIR/*/ | xargs -n 1 basename | grep -vE 'mysql|performance_schema|replication_manager_schema') ; do
mysql --defaults-file={{ parameter_dict['dbjob-cnf'] }} -e "set sql_log_bin=0;drop database IF EXISTS $dir; CREATE DATABASE $dir;"
for file in $(find $BACKUPDIR/$dir/ -name "*.exp" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do
cat $BACKUPDIR/$dir/$file.frm | sed -e 's/\x06\x00\x49\x6E\x6E\x6F\x44\x42\x00\x00\x00/\x09\x00\x42\x4C\x41\x43\x4B\x48\x4F\x4C\x45/g' > $DATADIR/$dir/mrm_pivo.frm
mysql --defaults-file={{ parameter_dict['dbjob-cnf'] }} -e "set sql_log_bin=0;ALTER TABLE $dir.mrm_pivo engine=innodb;RENAME TABLE $dir.mrm_pivo TO $dir.$file; ALTER TABLE $dir.$file DISCARD TABLESPACE;"
mv $BACKUPDIR/$dir/$file.ibd $DATADIR/$dir/$file.ibd
mv $BACKUPDIR/$dir/$file.exp $DATADIR/$dir/$file.exp
mv $BACKUPDIR/$dir/$file.cfg $DATADIR/$dir/$file.cfg
mv $BACKUPDIR/$dir/$file.TRG $DATADIR/$dir/$file.TRG
mysql --defaults-file={{ parameter_dict['dbjob-cnf'] }} -e "set sql_log_bin=0;ALTER TABLE $dir.$file IMPORT TABLESPACE"
done
for file in $(find $BACKUPDIR/$dir/ -name "*.MYD" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do
mv $BACKUPDIR/$dir/$file.* $DATADIR/$dir/
mysql --defaults-file=/etc/mysql/dbjob.cnf -e "set sql_log_bin=0;FLUSH TABLE $dir.$file"
done
for file in $(find $BACKUPDIR/$dir/ -name "*.CSV" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do
mv $BACKUPDIR/$dir/$file.* $DATADIR/$dir/
mysql --defaults-file={{ parameter_dict['dbjob-cnf'] }} -e "set sql_log_bin=0;FLUSH TABLE $dir.$file"
done
done
for file in $(find $BACKUPDIR/mysql/ -name "*.MYD" | xargs -n 1 basename | cut -d'.' --complement -f2-) ; do
mv $BACKUPDIR/mysql/$file.* $DATADIR/mysql/
mysql --defaults-file={{ parameter_dict['dbjob-cnf'] }} -e "set sql_log_bin=0;FLUSH TABLE mysql.$file"
done
cat $BACKUPDIR/xtrabackup_info | grep binlog_pos | awk -F, '{ print $3 }' | sed -e 's/GTID of the last change/set sql_log_bin=0;set global gtid_slave_pos=/g' | mysql -h{{ parameter_dict['ip'] }} -P{{ parameter_dict['port'] }} -p$PASSWORD -u$USER
mysql --defaults-file={{ parameter_dict['dbjob-cnf'] }} -e"flush privileges;start slave;"
}
kill -9 $(lsof -t -i:{{ parameter_dict['socat-port'] }} -sTCP:LISTEN)
for job in "${JOBS[@]}"
do
TASK=($(echo "select concat(id,'@',server,':',port) from replication_manager_schema.jobs WHERE task='$job' and done=0 order by task desc limit 1" | mysql -h{{ parameter_dict['ip'] }} -P{{ parameter_dict['port'] }} -p$PASSWORD -u$USER -N))
ADDRESS=($(echo $TASK | awk -F@ '{ print $2 }'))
ID=($(echo $TASK | awk -F@ '{ print $1 }'))
#purge de past
mysql --defaults-file={{ parameter_dict['dbjob-cnf'] }} -e "set sql_log_bin=0;UPDATE replication_manager_schema.jobs set done=1 WHERE done=0 AND task='$job';"
if [ "$ADDRESS" == "" ]; then
echo "No $job needed"
else
echo "Processing $job"
case "$job" in
reseedmysqldump)
echo "Waiting backup." > {{ parameter_dict['log-dir'] }}/dbjob.out
pauseJob
socat -u {{ listen }}:{{ parameter_dict['socat-port'] }},bind={{ parameter_dict['host'] }},reuseaddr STDOUT | gunzip | mysql -h{{ parameter_dict['ip'] }} -P{{ parameter_dict['port'] }} -p$PASSWORD -u$USER --init-command="reset master;set sql_log_bin=0" > {{ parameter_dict['log-dir'] }}/dbjob.out 2>&1
mysql --defaults-file={{ parameter_dict['dbjob-cnf'] }} -e 'start slave;'
;;
flashbackmysqldump)
echo "Waiting backup." > {{ parameter_dict['log-dir'] }}/dbjob.out
pauseJob
socat -u {{ listen }}:{{ parameter_dict['socat-port'] }},bind={{ parameter_dict['host'] }},reuseaddr STDOUT | gunzip | mysql -h{{ parameter_dict['ip'] }} -P{{ parameter_dict['port'] }} -p$PASSWORD -u$USER --init-command="set sql_log_bin=0" > {{ parameter_dict['log-dir'] }}/dbjob.out 2>&1
mysql --defaults-file={{ parameter_dict['dbjob-cnf'] }} -e 'start slave;'
;;
reseedmariabackup)
rm -rf $BACKUPDIR
mkdir $BACKUPDIR
echo "Waiting backup." > {{ parameter_dict['log-dir'] }}/dbjob.out
pauseJob
socat -u {{ listen }}:{{ parameter_dict['socat-port'] }},bind={{ parameter_dict['host'] }},reuseaddr STDOUT | mbstream -x -C $BACKUPDIR
# mbstream -p, --parallel
mariabackup --prepare --export --target-dir=$BACKUPDIR
partialRestore
;;
flashbackmariadbackup)
rm -rf $BACKUPDIR
mkdir $BACKUPDIR
echo "Waiting backup." > {{ parameter_dict['log-dir'] }}/dbjob.out
pauseJob
socat -u {{ listen }}:{{ parameter_dict['socat-port'] }},bind={{ parameter_dict['host'] }},reuseaddr STDOUT | xbstream -x -C $BACKUPDIR
mariabackup --prepare --export --target-dir=$BACKUPDIR
partialRestore
;;
mariabackup)
cd {{ parameter_dict['tmp-dir'] }}
mariadb-backup --innobackupex --defaults-file={{ parameter_dict['dbjob-cnf'] }} --socket='{{ parameter_dict["mysqld-socket"] }}' --no-version-check --user=$USER --password=$PASSWORD --stream=xbstream {{ parameter_dict['tmp-dir'] }}/ | socat -u stdio TCP:$ADDRESS &>{{ parameter_dict['log-dir'] }}/dbjob.out
;;
error)
cat $ERROLOG| socat -u stdio TCP:$ADDRESS &>{{ parameter_dict['log-dir'] }}/dbjob.out
;;
slowquery)
cat $SLOWLOG| socat -u stdio TCP:$ADDRESS &>{{ parameter_dict['log-dir'] }}/dbjob.out
;;
optimize)
mysqlcheck --defaults-file={{ parameter_dict['dbjob-cnf'] }} -o --all-databases --skip-write-binlog &>{{ parameter_dict['log-dir'] }}/dbjob.out
;;
restart)
{{ parameter_dict['restart-script'] }} > {{ parameter_dict['log-dir'] }}/dbjob.out
;;
esac
doneJob
fi
done
#!/bin/bash
run_mysql () {
{{ mysql_bin }} --defaults-file="{{ mysql_conf }}" \
--protocol=socket -uroot -hlocalhost $@
}
if [ ! -f "{{ init_password_done }}" ]; then
for i in {30..0}; do
if echo 'SELECT 1' | run_mysql &> /dev/null; then
break
fi
echo 'MySQL init process in progress...'
sleep 1
done
if [ "$i" = 0 ]; then
echo >&2 'MySQL init process failed.'
exit 1
fi
echo "Setting mariabdb root password...";
run_mysql < {{ init_root_sql }} && touch {{ init_password_done }} || exit 1;
echo "done"
fi
# Run mariadb_upgrade when replication is bootstrapped will break replication topology.
# skip when already upgraded until we have a good solution.
if [ ! -f "{{ upgrade_done }}" ]; then
{{ mysql_update }}
if [ $? -eq 0 ]; then
touch {{ upgrade_done }};
fi
fi
\ No newline at end of file
-- What's done in this file shouldn't be replicated
-- or products like mysql-fabric won't work
SET @@SESSION.SQL_LOG_BIN=0;
CREATE USER '{{ parameter_dict["root-user"] }}'@'localhost' IDENTIFIED BY '{{ parameter_dict["password"] }}' ;
GRANT ALL ON *.* TO '{{ parameter_dict["root-user"] }}'@'localhost' WITH GRANT OPTION ;
CREATE USER '{{ parameter_dict["root-user"] }}'@'%' IDENTIFIED BY '{{ parameter_dict["password"] }}' ;
GRANT ALL ON *.* TO '{{ parameter_dict["root-user"] }}'@'%' WITH GRANT OPTION ;
CREATE USER '{{ parameter_dict["root-user"] }}'@'::' IDENTIFIED BY '{{ parameter_dict["password"] }}' ;
GRANT ALL ON *.* TO '{{ parameter_dict["root-user"] }}'@'::' WITH GRANT OPTION ;
CREATE USER '{{ parameter_dict["heartbeat-user"] }}'@'localhost' IDENTIFIED BY '{{ parameter_dict["password"] }}' ;
GRANT ALL ON *.* TO '{{ parameter_dict["heartbeat-user"] }}'@'localhost' WITH GRANT OPTION ;
CREATE USER '{{ parameter_dict["heartbeat-user"] }}'@'%' IDENTIFIED BY '{{ parameter_dict["password"] }}' ;
GRANT ALL ON *.* TO '{{ parameter_dict["heartbeat-user"] }}'@'%' WITH GRANT OPTION ;
DROP DATABASE IF EXISTS test ;
FLUSH PRIVILEGES ;
\ No newline at end of file
SET @@SESSION.SQL_LOG_BIN=0;
USE mysql;
{% set mroonga = parameter_dict.get('mroonga', 'ha_mroonga.so') -%}
{% if mroonga %}
SOURCE {{ parameter_dict['mroonga-mariadb-install-sql'] }};
{% endif %}
DROP FUNCTION IF EXISTS sphinx_snippets;
#CREATE FUNCTION sphinx_snippets RETURNS STRING SONAME 'ha_sphinx.so';
{% macro database(name, user, password) -%}
CREATE DATABASE IF NOT EXISTS `{{ name }}`;
{% if user -%}
GRANT ALL PRIVILEGES ON `{{ name }}`.* TO `{{ user }}`@`%` IDENTIFIED BY '{{ password }}';
GRANT ALL PRIVILEGES ON `{{ name }}`.* TO `{{ user }}`@localhost IDENTIFIED BY '{{ password }}';
GRANT ALL PRIVILEGES ON `{{ name }}`.* TO `{{ user }}`@'::' IDENTIFIED BY '{{ password }}';
{%- endif %}
{% endmacro -%}
{% for entry in parameter_dict['database-list'] -%}
{{ database(entry['name'], entry.get('user'), entry.get('password')) }}
{% endfor -%}
\ No newline at end of file
{% set socket = parameter_dict['socket'] -%}
# ERP5 buildout my.cnf template based on my-huge.cnf shipped with mysql
# The MySQL server
[mysqld]
# ERP5 by default requires InnoDB storage. MySQL by default fallbacks to using
# different engine, like MyISAM. Such behaviour generates problems only, when
# tables requested as InnoDB are silently created with MyISAM engine.
#
# Loud fail is really required in such case.
# Already present in REPMAN
#sql_mode="NO_ENGINE_SUBSTITUTION"
socket = {{ socket }}
datadir = {{ parameter_dict['data-directory'] }}
pid_file = {{ parameter_dict['pid-file'] }}
{% set innodb_buffer_pool_size = parameter_dict['innodb-buffer-pool-size'] -%}
{% if innodb_buffer_pool_size %}innodb_buffer_pool_size = {{ innodb_buffer_pool_size }}{% endif %}
{% set innodb_buffer_pool_instances = parameter_dict['innodb-buffer-pool-instances'] -%}
{% if innodb_buffer_pool_instances %}innodb_buffer_pool_instances = {{ innodb_buffer_pool_instances }}{% endif %}
{% set innodb_log_file_size = parameter_dict['innodb-log-file-size'] -%}
{% if innodb_log_file_size %} innodb_log_file_size = {{ innodb_log_file_size }}{% endif %}
{% set innodb_log_buffer_size = parameter_dict['innodb-log-buffer-size'] -%}
{% if innodb_log_buffer_size %} innodb_log_buffer_size = {{ innodb_log_buffer_size }}{% endif %}
# very important to allow parallel indexing
# Note: this is compatible with binlog-based incremental backups, because ERP5
# doesn't use "insert ... select" (in any number of queries) pattern.
# innodb_locks_unsafe_for_binlog = 1
#plugin_load = ha_mroonga
plugin-dir = {{ parameter_dict['plugin-directory'] }}
{% if 'ssl-key' in parameter_dict -%}
ssl
ssl-cert = {{ parameter_dict['ssl-crt'] }}
ssl-key = {{ parameter_dict['ssl-key'] }}
{% if 'ssl-ca-crt' in parameter_dict -%}
ssl-ca = {{ parameter_dict['ssl-ca-crt'] }}
{%- endif %}
{% if 'ssl-crl' in parameter_dict -%}
ssl-crl = {{ parameter_dict['ssl-crl'] }}
{%- endif %}
{% if 'ssl-cipher' in parameter_dict -%}
ssl-cipher = {{ parameter_dict['ssl-cipher'] }}
{%- endif %}
{%- endif %}
# Some dangerous settings you may want to uncomment temporarily
# if you only want performance or less disk access.
{% set x = '' if parameter_dict['relaxed-writes'] else '#' -%}
{{x}}innodb_flush_log_at_trx_commit = 0
{{x}}innodb_flush_method = nosync
{{x}}innodb_doublewrite = 0
{{x}}sync_frm = 0
# skip_character_set_client_handshake
[client]
socket = {{ socket }}
user = root
[mysql]
no_auto_rehash
[mysqlhotcopy]
interactive_timeout
[mysqldump]
max_allowed_packet = 128M
#!{{ bash_bin }}
curl () {
{{ curl_bin }} -k --silent -H "Accept: application/json" "$@"
}
# TOKEN=$(curl -s -X POST --data '{"username":"{{ username }}","password":"XXXXX"}' {{ repman_url }}/api/login | {{ jq_bin }} -r '.token')
# Checking if mariadb start is needed
#CODE=$(curl -H "Authorization: Bearer ${TOKEN}" -o /dev/null -w "%{http_code}" {{ repman_url }}/api/clusters/{{ cluster }}/servers/{{ db_host }}/{{ db_port }}/need-start)
CODE=$(curl -o /dev/null -w "%{http_code}" {{ repman_url }}/api/clusters/{{ cluster }}/servers/{{ db_host }}/{{ db_port }}/need-start)
if [ $CODE -eq 200 ]; then
echo "$CODE: Updating mysql configuration..."
# update mysql configuration
{{ update_config }}
echo "$CODE: Starting mariadb service..."
# print current status, can be useful for debug...
{{ mariadb_controller }} status mariadb
{{ mariadb_controller }} start mariadb
sleep 5
# check again if the service is still up...
{{ mariadb_controller }} status mariadb
fi
pid {{ parameter_dict['pid-file'] }};
error_log {{ parameter_dict['error-log'] }};
daemon off;
events {
worker_connections 1024;
accept_mutex off;
}
http {
default_type application/octet-stream;
access_log {{ parameter_dict['access-log'] }} combined;
client_max_body_size 10M;
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen [{{ parameter_dict['ipv6'] }}]:{{ parameter_dict['ssl-port'] }} ssl;
server_name _;
ssl_certificate {{ parameter_dict['ssl-certificate'] }};
ssl_certificate_key {{ parameter_dict['ssl-key'] }};
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
keepalive_timeout 90s;
location / {
proxy_redirect off;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $http_host;
proxy_connect_timeout 200;
proxy_pass {{ parameter_dict['repman-secure-url'] }};
}
}
server {
listen [{{ parameter_dict['ipv6'] }}]:{{ parameter_dict['port'] }};
server_name _;
location / {
proxy_redirect off;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $http_host;
proxy_connect_timeout 200;
proxy_pass {{ parameter_dict['repman-url'] }};
}
}
}
#!{{ bash_bin }}
curl () {
{{ curl_bin }} -k --silent "$@"
}
check_start_cluster () {
NAME=$1
HOST=$2
PORT=$3
CODE=$(curl -o /dev/null -w "%{http_code}" {{ repman_url }}/api/clusters/$NAME/servers/$HOST/$PORT/need-start)
if [ $CODE -eq 200 ]; then
echo "$CODE: Starting proxysql $HOST:$PORT..."
{{ proxysql_controller }} start proxysql-$NAME
sleep 1
# check again if the service is still up...
{{ proxysql_controller }} status proxysql-$NAME
fi
}
check_stop_cluster () {
NAME=$1
HOST=$2
PORT=$3
CODE=$(curl -o /dev/null -w "%{http_code}" {{ repman_url }}/api/clusters/$NAME/servers/$HOST/$PORT/need-stop)
if [ $CODE -eq 200 ]; then
echo "$CODE: updating proxysql config $HOST:$PORT..."
{{ get_proxy_config }} $NAME $HOST $PORT
echo "$CODE: Stoping proxysql $HOST:$PORT..."
{{ proxysql_controller }} stop proxysql-$NAME
sleep 1
# check again if the service is stopped...
{{ proxysql_controller }} status proxysql-$NAME
fi
}
{% for cluster_dict in cluster_list -%}
check_start_cluster {{ cluster_dict['name'] }} {{ cluster_dict['host'] }} {{ cluster_dict['port'] }}
check_stop_cluster {{ cluster_dict['name'] }} {{ cluster_dict['host'] }} {{ cluster_dict['port'] }}
{% endfor -%}
#!{{ bash_bin }}
#set -e
curl () {
{{ curl_bin }} -k --silent -H "Accept: application/json" "$@"
}
get_token () {
curl -s -X POST --data '{"username":"{{ username }}","password":"{{ password}}"}' {{ secure_url }}/api/login
}
wait_database () {
NAME=$1
for retry in {1..50}; do
echo ">> Wait until $NAME databases are ready...";
CODE=$(curl -H "Authorization: Bearer ${TOKEN}" -o /dev/null -w "%{http_code}" {{ secure_url }}/api/clusters/$NAME/actions/waitdatabases);
if [ $CODE -eq 504 ]; then
# We have a timeout try again
echo ">> [$retry] Timeout on {{ secure_url }}/api/clusters/$NAME/actions/waitdatabases, trying again...";
else
if [ $CODE -eq 200 ]; then
break;
else
if [ $CODE -eq 401 ]; then
# try again with new token
TOKEN=$(get_token | {{ jq_bin }} -r '.token')
fi
fi
echo ">> [$retry] waitdatabases returned code $CODE...";
fi
sleep 30
done
}
activate_proxy () {
NAME=$1
URL="{{ secure_url }}/api/clusters/$NAME/settings/actions/switch/database-hearbeat"
echo ">> Calling $URL...";
CODE=$(curl -H "Authorization: Bearer ${TOKEN}" -o /dev/null -w "%{http_code}" $URL)
if [ $CODE -eq 200 ]; then
return 0;
else
echo ">> ERROR: failed to activate proxy: $URL returned code $CODE"
return 1;
fi
}
TOKEN=$(get_token | {{ jq_bin }} -r '.token')
# Always reload cluster configuration to apply recent changes
{% for name in cluster_name_list -%}
# reload {{ name }} settings
echo "Reloading settings for {{ name }}..."
curl -H "Authorization: Bearer ${TOKEN}" \
{{ secure_url }}/api/clusters/{{ name }}/settings/actions/reload
# Start Replication on {{ name }}
if [ ! -f "{{ parameter_dict['bootstrap'] }}/{{ name }}_bootstrapped" ]; then
wait_database {{ name }}
echo "Bootstrap replication on {{ name }}..."
TOKEN=$(get_token | {{ jq_bin }} -r '.token')
curl -H "Authorization: Bearer ${TOKEN}" \
{{ secure_url }}/api/clusters/{{ name }}/actions/replication/cleanup
CODE=$(curl -H "Authorization: Bearer ${TOKEN}" -o /dev/null -w "%{http_code}" {{ secure_url }}/api/clusters/{{ name }}/actions/replication/bootstrap/master-slave)
SUCCESS=0
if [ $CODE -eq 200 ]; then
activate_proxy {{ name }}
if [ $? -eq 0 ]; then
# Mark boostrap done!
echo "Cluster {{ name }} replication bootstrapped"
echo "DO NOT REMOVE THIS FILE" > {{ parameter_dict['bootstrap'] }}/{{ name }}_bootstrapped
fi
else
echo "ERROR: Failed to bootstrap cluster {{ name }}... http_code $CODE"
fi
fi
{% endfor %}
\ No newline at end of file
Tests for Replication Manager software release
##############################################################################
#
# Copyright (c) 2018 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from setuptools import setup, find_packages
version = '0.0.1.dev0'
name = 'slapos.test.repman'
long_description = open("README.md").read()
setup(
name=name,
version=version,
description="Test for SlapOS' Replication Manager",
long_description=long_description,
long_description_content_type='text/markdown',
maintainer="Nexedi",
maintainer_email="info@nexedi.com",
url="https://lab.nexedi.com/nexedi/slapos",
packages=find_packages(),
install_requires=[
'slapos.core',
'slapos.libnetworkcache',
'erp5.util',
'supervisor',
'pexpect',
'requests',
],
zip_safe=True,
test_suite='test',
)
##############################################################################
#
# Copyright (c) 2019 Nexedi SA and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from __future__ import unicode_literals
import os
import textwrap
import logging
import tempfile
import time
from six.moves.urllib.parse import urlparse, urljoin
import pexpect
import requests
from slapos.testing.testcase import makeModuleSetUpAndTestCaseClass
setUpModule, SlapOSInstanceTestCase = makeModuleSetUpAndTestCaseClass(
os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'software.cfg')))
class TestRepman(SlapOSInstanceTestCase):
__partition_reference__ = 'R' # solve path too long for postgresql and unicorn
@classmethod
def getInstanceSoftwareType(cls):
return 'default'
def setUp(self):
self.backend_url = self.computer_partition.getConnectionParameterDict(
)['backend-url']
def test_http_get(self):
resp = requests.get(self.backend_url, verify=False)
self.assertTrue(
resp.status_code in [requests.codes.ok, requests.codes.found])
......@@ -148,6 +148,11 @@ setup = ${slapos-repository:location}/software/cloudooo/test/
egg = slapos.test.dream
setup = ${slapos-repository:location}/software/dream/test/
[slapos.test.repman-setup]
<= setup-develop-egg
egg = slapos.test.repman
setup = ${slapos-repository:location}/software/repman/test/
[slapos.core-repository]
<= git-clone-repository
repository = https://lab.nexedi.com/nexedi/slapos.core.git
......@@ -193,6 +198,7 @@ eggs =
${slapos.test.cloudooo-setup:egg}
${slapos.test.dream-setup:egg}
${slapos.test.metabase-setup:egg}
${slapos.test.repman-setup:egg}
${backports.lzma:egg}
entry-points =
runTestSuite=erp5.util.testsuite:runTestSuite
......@@ -259,6 +265,7 @@ extra =
${slapos.test.gitlab-setup:setup}
${slapos.test.cloudooo-setup:setup}
${slapos.test.dream-setup:setup}
${slapos.test.repman-setup:setup}
[versions]
# slapos.core is used from the clone always
......
Supervisord process manager
How to use
==========
Supervisord stack provides a library which can be called in your instance slapos. This stack can be used to run sub services in a partition.
To use:
* extend ``stack/supervisord/buildout.cfg`` in your software.cfg file.
* provide ``supervisord-library:target`` and ``supervisord-conf:target`` to your instance template which require to use supervisord controller.
* add ``{% import "supervisord" as supervisord with context %}`` to instance template which call supervisord library. See example below:
**software.cfg**
::
[template-instance]
recipe = slapos.recipe.template:jinja2
context =
key buildout_bin_directory buildout:bin-directory
key supervisord supervisord-library:target
key supervisord_conf supervisord-conf:target
**instance.cfg.in**
::
[template-custom-instance.cfg]
recipe = slapos.recipe.template:jinja2
supervisord-lib = {{ supervisord }}
import-list =
file supervisord :supervisord-lib
context =
raw buildout_bin_directory {{ buildout_bin_directory }}
raw supervisord_conf {{ supervisord_conf }}
**custom-instance.cfg**
::
{% import "supervisord" as supervisord with context %}
{{ supervisord.supervisord("custom-controller", buildout_bin_directory, supervisord_conf, use_service_hash=False) }}
# add program to service controller
{% set program_dict = {"name": "mariadb", "command": "${mariadb-service:wrapper}",
"stopwaitsecs": 300, "environment": []} %}
{{ supervisord.supervisord_program("mariadb", program_dict) }}
...
[buildout]
parts =
...
supervisord-custom-controller
supervisord-mariadb
Supervisord inside partition
============================
Check supervisord controlled services status:
::
$ instance/slappartXX/bin/custom-controller status
mariadb RUNNING pid 5511, uptime 6:04:54
`supervisord_program` parameters and defaults:
.. code-block:: python
program_dict = {
"name": "NAME",
"command": "WRAPPER_PATH",
"stopwaitsecs": 60,
"environment": ['PATH="/usr/bin/:/partition/bin/:$PATH"', 'MAKEFLAGS="-j2"'],
"autostart": True,
"autorestart": False,
"startsecs": 0,
"startretries": 0,
"stopsignal": "TERM",
"stdout_logfile": "NONE",
"stderr_logfile": "NONE"
}
[buildout]
extends =
../slapos.cfg
buildout.hash.cfg
parts =
supervisord-conf
supervisord-library
[supervisord-download-base]
recipe = slapos.recipe.build:download
mode = 0644
url = ${:_profile_base_location_}/${:_update_hash_filename_}
[supervisord-eggs]
recipe = zc.recipe.egg
eggs =
${slapos-cookbook:eggs}
supervisor
scripts =
supervisord
supervisorctl
[supervisord-library]
<= supervisord-download-base
filename = supervisord.jinja2.in
depends = ${supervisord-eggs:recipe}
[supervisord-conf]
<= supervisord-download-base
filename = supervisord.conf.in
# THIS IS NOT A BUILDOUT FILE, despite purposedly using a compatible syntax.
# The only allowed lines here are (regexes):
# - "^#" comments, copied verbatim
# - "^[" section beginings, copied verbatim
# - lines containing an "=" sign which must fit in the following categorie.
# - "^\s*filename\s*=\s*path\s*$" where "path" is relative to this file
# Copied verbatim.
# - "^\s*hashtype\s*=.*" where "hashtype" is one of the values supported
# by the re-generation script.
# Re-generated.
# - other lines are copied verbatim
# Substitution (${...:...}), extension ([buildout] extends = ...) and
# section inheritance (< = ...) are NOT supported (but you should really
# not need these here).
[supervisord-library]
_update_hash_filename_ = supervisord.jinja2.in
md5sum = 163c9f60e4ad3842162cbb11d771b7b8
[supervisord-conf]
_update_hash_filename_ = supervisord.conf.in
md5sum = d624f65151233493c6dbdafa83ae8cbd
[unix_http_server]
file = {{ parameter_dict['socket-path'] }}
chmod=0700
[include]
files = {{ parameter_dict['include-dir'] }}/*.conf
[supervisorctl]
serverurl = unix://{{ parameter_dict['socket-path'] }}
[supervisord]
loglevel = {{ parameter_dict['log-level'] }}
logfile_maxbytes = 2MB
nodaemon = false
logfile-backups = 3
logfile = {{ parameter_dict['log-file'] }}
pidfile = {{ parameter_dict['pid-file'] }}
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
{% macro supervisord(
name,
buildout_bin_directory,
supervisord_conf,
use_service_hash=False
) -%}
[controller-directory]
recipe = slapos.cookbook:mkdirectory
etc = ${buildout:directory}/etc
var = ${buildout:directory}/var
log = ${:var}/log
run = ${:var}/run
supervisord = ${:etc}/supervisord-{{ name }}.conf.d
[controller-parameters]
socket-path = ${controller-directory:run}/{{ name }}.socket
include-dir = ${controller-directory:supervisord}
log-file = ${controller-directory:log}/supervisord-{{ name }}.log
log-level = info
pid-file = ${controller-directory:run}/supervisord-{{ name }}.pid
[supervisord-controller-conf]
recipe = slapos.recipe.template:jinja2
template = {{ supervisord_conf }}
context =
section parameter_dict controller-parameters
rendered = ${controller-directory:etc}/supervisord-{{ name }}.conf
[supervisord-{{ name }}]
recipe = slapos.cookbook:wrapper
command-line = {{ buildout_bin_directory }}/supervisord -c ${supervisord-controller-conf:rendered} --nodaemon
wrapper-path = ${directory:services}/supervisord-{{ name }}
{% if use_service_hash -%}
hash-existing-files = ${buildout:directory}/software_release/buildout.cfg
{% endif -%}
depends =
{{ '${' ~ name ~ '-bin:recipe}' }}
[{{ name }}-bin]
recipe = slapos.cookbook:wrapper
command-line = {{ buildout_bin_directory }}/supervisorctl -c ${supervisord-controller-conf:rendered}
wrapper-path = ${directory:bin}/{{ name }}
{%- endmacro %}
{% macro supervisord_program(
name,
parameter_dict
) -%}
[supervisord-{{ name }}]
recipe = slapos.recipe.template:jinja2
template = inline:[program:{{ parameter_dict['name'] }}]
directory = ${buildout:directory}
command = {{ parameter_dict['command'] }}
process_name = {{ parameter_dict['name'] }}
autostart = {{ parameter_dict.get('autostart', True) }}
autorestart = {{ parameter_dict.get('autorestart', False) }}
startsecs = {{ parameter_dict.get('startsecs', 0) }}
startretries = {{ parameter_dict.get('startretries', 0) }}
exitcodes = {{ parameter_dict.get('exitcodes', 0) }}
stopsignal = {{ parameter_dict.get('stopsignal', 'TERM') }}
stopwaitsecs = {{ parameter_dict.get('stopwaitsecs', 60) }}
serverurl=AUTO
redirect_stderr=true
stdout_logfile = {{ parameter_dict.get('stdout_logfile', 'NONE') }}
stdout_logfile_maxbytes = 1000KB
stdout_logfile_backups = 1
stderr_logfile = {{ parameter_dict.get('stderr_logfile', 'NONE') }}
stderr_logfile_maxbytes = 1000KB
stderr_logfile_backups = 1
environment = {{ parameter_dict['environment'] | join(',') }}
rendered = ${controller-directory:supervisord}/{{ name }}.conf
{%- endmacro %}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment