Commit 88976343 authored by Alain Takoudjou's avatar Alain Takoudjou

Merge branch 'master' into 1.0

parents b6e38ca4 596225bb
......@@ -8,7 +8,7 @@ extends =
recipe = slapos.recipe.cmmi
url = ftp://ftp.cyrusimap.org/cyrus-sasl/cyrus-sasl-2.1.26.tar.gz
md5sum = a7f4e5e559a0e37b3ffc438c9456e425
location = ${buildout:parts-directory}/${:_buildout_section_name_}
configure-options =
--disable-digest
--disable-gssapi
......@@ -18,6 +18,7 @@ configure-options =
--without-openssl
--without-pam
--without-saslauthd
--with-plugindir=${:location}/lib/sasl2
# it seems that parallel build sometimes fails.
make-options =
-j1
......
......@@ -6,14 +6,39 @@ extends =
../libdb/buildout.cfg
../openssl/buildout.cfg
../pcre/buildout.cfg
../cyrus-sasl/buildout.cfg
parts = postfix
[noroot.patch]
recipe = hexagonit.recipe.download
url =${:_profile_base_location_}/${:filename}
filename = ${:_buildout_section_name_}
download-only = true
md5sum = 738bcc97b8044c45b58708bdf3a84b8e
[skip-libdb-check.patch]
recipe = hexagonit.recipe.download
url =${:_profile_base_location_}/${:filename}
filename = ${:_buildout_section_name_}
download-only = true
md5sum = f7fdbd8787874b535fee548b0139c0d8
[postfix-linux4.patch]
recipe = hexagonit.recipe.download
url = ${:_profile_base_location_}/${:filename}
filename = ${:_buildout_section_name_}
download-only = true
md5sum = 2549e80b9834c58185a18bfbf55c2767
[postfix]
recipe = slapos.recipe.cmmi
url = ftp://ftp.porcupine.org/mirrors/postfix-release/official/postfix-2.8.3.tar.gz
md5sum = b3922ededd3fd6051f759e58a4ada3ae
url = ftp://ftp.porcupine.org/mirrors/postfix-release/official/postfix-2.11.1.tar.gz
md5sum = 56ac1f1a79737c4ac1e24535a122a4a6
location = ${buildout:parts-directory}/${:_buildout_section_name_}
patch-options = -p1
patches =
${noroot.patch:location}/${noroot.patch:filename}
${skip-libdb-check.patch:location}/${skip-libdb-check.patch:filename}
${postfix-linux4.patch:location}/${postfix-linux4.patch:filename}
configure-command = make
configure-options = makefiles CCARGS='-DUSE_TLS -DHAS_PCRE -DHAS_DB -I${libdb:location}/include -I${pcre:location}/include -I${openssl:location}/include' AUXLIBS='-L${openssl:location}/lib -L${pcre:location}/lib -L${libdb:location}/lib -lssl -lpcre -ldb -lcrypto -Wl,-rpath=${openssl:location}/lib -Wl,-rpath=${pcre:location}/lib -Wl,-rpath=${libdb:location}/lib'
configure-options = makefiles CCARGS='-DUSE_SASL_AUTH -DUSE_CYRUS_SASL -DUSE_TLS -DHAS_PCRE -DHAS_DB -I${libdb:location}/include -I${pcre:location}/include -I${openssl:location}/include -I${cyrus-sasl:location}/include/sasl' AUXLIBS='-L${openssl:location}/lib -L${pcre:location}/lib -L${libdb:location}/lib -L${cyrus-sasl:location}/lib -lssl -lpcre -ldb -lcrypto -lsasl2 -Wl,-rpath=${openssl:location}/lib -Wl,-rpath=${pcre:location}/lib -Wl,-rpath=${libdb:location}/lib -Wl,-rpath=${cyrus-sasl:location}/lib'
make-targets = non-interactive-package install_root=${:location}
diff --git a/src/global/mail_params.c b/src/global/mail_params.c
index 2d91977..0f06298 100644
--- a/src/global/mail_params.c
+++ b/src/global/mail_params.c
@@ -721,7 +721,9 @@ void mail_params_init()
check_default_privs();
check_mail_owner();
check_sgid_group();
+ /*
check_overlap();
+ */
#ifdef HAS_DB
dict_db_cache_size = var_db_read_buf;
#endif
diff --git a/src/master/master.c b/src/master/master.c
index a9d5d1b..db88c55 100644
--- a/src/master/master.c
+++ b/src/master/master.c
@@ -315,10 +315,10 @@ int main(int argc, char **argv)
* privileges for selected operations. That's right - it takes privileges
* to toss privileges.
*/
- if (getuid() != 0)
+ /*if (getuid() != 0)
msg_fatal("the master command is reserved for the superuser");
if (unsafe() != 0)
- msg_fatal("the master command must not run as a set-uid process");
+ msg_fatal("the master command must not run as a set-uid process");*/
/*
* Process JCL.
@@ -392,8 +392,10 @@ int main(int argc, char **argv)
* all MTA processes cleanly. Give up if we can't separate from our
* parent process. We're not supposed to blow away the parent.
*/
+ /*
if (debug_me == 0 && master_detach != 0 && setsid() == -1 && getsid(0) != getpid())
msg_fatal("unable to set session and process group ID: %m");
+ */
/*
* Make some room for plumbing with file descriptors. XXX This breaks
diff --git a/src/postfix/postfix.c b/src/postfix/postfix.c
index 183c825..007c805 100644
--- a/src/postfix/postfix.c
+++ b/src/postfix/postfix.c
@@ -450,12 +450,12 @@ int main(int argc, char **argv)
* privileges for selected operations. That's right - it takes privileges
* to toss privileges.
*/
- if (getuid() != 0) {
+ /*if (getuid() != 0) {
msg_error("to submit mail, use the Postfix sendmail command");
msg_fatal("the postfix command is reserved for the superuser");
}
if (unsafe() != 0)
- msg_fatal("the postfix command must not run as a set-uid process");
+ msg_fatal("the postfix command must not run as a set-uid process");*/
/*
* Parse switches.
diff --git a/src/postsuper/postsuper.c b/src/postsuper/postsuper.c
index 9dabb5d..e678565 100644
--- a/src/postsuper/postsuper.c
+++ b/src/postsuper/postsuper.c
@@ -1150,10 +1150,10 @@ int main(int argc, char **argv)
* the secondary groups, the process environment, and so on. Otherwise,
* accidents can happen. If not with Postfix, then with other software.
*/
- if (unsafe() != 0)
+ /*if (unsafe() != 0)
msg_fatal("this postfix command must not run as a set-uid process");
if (getuid())
- msg_fatal("use of this command is reserved for the superuser");
+ msg_fatal("use of this command is reserved for the superuser");*/
/*
* Parse JCL.
diff --git a/src/util/chroot_uid.c b/src/util/chroot_uid.c
index 4a7660f..d5d4e67 100644
--- a/src/util/chroot_uid.c
+++ b/src/util/chroot_uid.c
@@ -55,10 +55,11 @@ void chroot_uid(const char *root_dir, const char *user_name)
msg_fatal("unknown user: %s", user_name);
uid = pwd->pw_uid;
gid = pwd->pw_gid;
+ /*
if (setgid(gid) < 0)
msg_fatal("setgid(%ld): %m", (long) gid);
if (initgroups(user_name, gid) < 0)
- msg_fatal("initgroups: %m");
+ msg_fatal("initgroups: %m");*/
}
/*
@@ -74,9 +75,11 @@ void chroot_uid(const char *root_dir, const char *user_name)
/*
* Drop the user privileges.
*/
+ /*
if (user_name != 0)
if (setuid(uid) < 0)
msg_fatal("setuid(%ld): %m", (long) uid);
+ */
/*
* Give the desperate developer a clue of what is happening.
diff --git a/src/util/set_eugid.c b/src/util/set_eugid.c
index ef35380..ed96a69 100644
--- a/src/util/set_eugid.c
+++ b/src/util/set_eugid.c
@@ -53,7 +53,7 @@
void set_eugid(uid_t euid, gid_t egid)
{
- int saved_errno = errno;
+/* int saved_errno = errno;
if (geteuid() != 0)
if (seteuid(0))
@@ -67,4 +67,4 @@ void set_eugid(uid_t euid, gid_t egid)
if (msg_verbose)
msg_info("set_eugid: euid %ld egid %ld", (long) euid, (long) egid);
errno = saved_errno;
-}
+*/}
diff --git a/src/util/set_ugid.c b/src/util/set_ugid.c
index bbcb901..5a7a48b 100644
--- a/src/util/set_ugid.c
+++ b/src/util/set_ugid.c
@@ -44,7 +44,7 @@
void set_ugid(uid_t uid, gid_t gid)
{
- int saved_errno = errno;
+/* int saved_errno = errno;
if (geteuid() != 0)
if (seteuid(0) < 0)
@@ -58,4 +58,4 @@ void set_ugid(uid_t uid, gid_t gid)
if (msg_verbose > 1)
msg_info("setugid: uid %ld gid %ld", (long) uid, (long) gid);
errno = saved_errno;
-}
+*/}
Taken from https://bugs.gentoo.org/show_bug.cgi?id=544610
--- a/makedefs 2015-03-27 00:25:29.103726341 +0100
+++ a/makedefs 2015-03-27 00:32:14.657061042 +0100
@@ -500,7 +500,8 @@
: ${SHLIB_ENV="LD_LIBRARY_PATH=`pwd`/lib"}
: ${PLUGIN_LD="${CC-gcc} -shared"}
;;
- Linux.3*) SYSTYPE=LINUX3
+ Linux.*)
+ SYSTYPE=LINUX
case "$CCARGS" in
*-DNO_DB*) ;;
*-DHAS_DB*) ;;
--- a/src/util/sys_defs.h 2015-03-27 00:36:21.203728543 +0100
+++ a/src/util/sys_defs.h 2015-03-27 00:36:09.657061839 +0100
@@ -756,7 +756,7 @@
/*
* LINUX.
*/
+#if defined(LINUX2) || defined(LINUX)
-#if defined(LINUX2) || defined(LINUX3)
#define SUPPORTED
#include <sys/types.h>
#define UINT32_TYPE unsigned int
diff --git a/makedefs b/makedefs
index dd5f256..e90880e 100644
--- a/makedefs
+++ b/makedefs
@@ -299,13 +299,13 @@ case "$SYSTEM.$RELEASE" in
elif [ -f /usr/include/db/db.h ]
then
CCARGS="$CCARGS -I/usr/include/db"
- else
+ #else
# No, we're not going to try db1 db2 db3 etc.
# On a properly installed system, Postfix builds
# by including <db.h> and by linking with -ldb
- echo "No <db.h> include file found." 1>&2
- echo "Install the appropriate db*-devel package first." 1>&2
- exit 1
+ #echo "No <db.h> include file found." 1>&2
+ #echo "Install the appropriate db*-devel package first." 1>&2
+ #exit 1
fi
SYSLIBS="-ldb"
;;
@@ -372,12 +372,12 @@ EOF
elif [ -f /usr/include/db/db.h ]
then
CCARGS="$CCARGS -I/usr/include/db"
- else
+ #else
# On a properly installed system, Postfix builds
# by including <db.h> and by linking with -ldb
- echo "No <db.h> include file found." 1>&2
- echo "Install the appropriate db*-devel package first." 1>&2
- exit 1
+ #echo "No <db.h> include file found." 1>&2
+ #echo "Install the appropriate db*-devel package first." 1>&2
+ #exit 1
fi
SYSLIBS="-ldb"
;;
@@ -403,12 +403,12 @@ EOF
elif [ -f /usr/include/db/db.h ]
then
CCARGS="$CCARGS -I/usr/include/db"
- else
+ #else
# On a properly installed system, Postfix builds
# by including <db.h> and by linking with -ldb
- echo "No <db.h> include file found." 1>&2
- echo "Install the appropriate db*-devel package first." 1>&2
- exit 1
+ #echo "No <db.h> include file found." 1>&2
+ #echo "Install the appropriate db*-devel package first." 1>&2
+ #exit 1
fi
SYSLIBS="-ldb"
;;
[buildout]
extends =
../git/buildout.cfg
[userhosts]
recipe = plone.recipe.command
stop-on-error = true
update-command = ${:command}
command = cd ${userhosts-get:location} && make
# For convenience (one section to build & know result path)
location = ${userhosts-get:location}/userhosts
[userhosts-get]
recipe = slapos.recipe.build:gitclone
repository = http://git.erp5.org/repos/userhosts.git
revision = d3080ad4b82c91dc14fd2ad2a8e9b2512e600c2a
git-executable = ${git:location}/bin/git
location = ${buildout:parts-directory}/userhosts
......@@ -167,6 +167,7 @@ setup(name=name,
'proactive = slapos.recipe.proactive:Recipe',
'publish = slapos.recipe.publish:Recipe',
'publish.serialised = slapos.recipe.publish:Serialised',
'publish-early = slapos.recipe.publish_early:Recipe',
'publishsection = slapos.recipe.publish:PublishSection',
'publishurl = slapos.recipe.publishurl:Recipe',
'readline = slapos.recipe.readline:Recipe',
......@@ -192,17 +193,19 @@ setup(name=name,
'slapmonitor = slapos.recipe.slapmonitor:MonitorRecipe',
'slapmonitor-xml = slapos.recipe.slapmonitor:MonitorXMLRecipe',
'slapreport = slapos.recipe.slapreport:Recipe',
'softwaretype = slapos.recipe.softwaretype:Recipe',
'softwaretype = slapos.recipe.softwaretype:Recipe', # BBB
'sphinx= slapos.recipe.sphinx:Recipe',
'squid = slapos.recipe.squid:Recipe',
'sshkeys_authority = slapos.recipe.sshkeys_authority:Recipe',
'sshkeys_authority.request = slapos.recipe.sshkeys_authority:Request',
'stunnel = slapos.recipe.stunnel:Recipe',
'switch-softwaretype = slapos.recipe.switch_softwaretype:Recipe',
'symbolic.link = slapos.recipe.symbolic_link:Recipe',
'tidstorage = slapos.recipe.tidstorage:Recipe',
'trac = slapos.recipe.trac:Recipe',
'urlparse = slapos.recipe._urlparse:Recipe',
'uuid = slapos.recipe._uuid:Recipe',
'userinfo = slapos.recipe.userinfo:Recipe',
'vifib = slapos.recipe.vifib:Recipe',
'waitfor = slapos.recipe.waitfor:Recipe',
'webchecker = slapos.recipe.web_checker:Recipe',
......
......@@ -57,7 +57,9 @@ class Recipe(GenericBaseRecipe):
else:
raise ValueError('Unsupported scheme %s' % scheme)
ip = self.options['ip']
ip_list = self.options['ip']
if isinstance(ip_list, basestring):
ip_list = [ip_list]
backend_path = self.options.get('backend-path', '/')
vhost_template_name = self.getTemplateFilename('vhost.in')
apache_config_file = self.createFile(
......@@ -78,7 +80,7 @@ class Recipe(GenericBaseRecipe):
'port': port,
'backend': ('%s/%s' % (backend.rstrip('/'), backend_path.strip('/'))).rstrip('/'),
'ssl_enable': ssl_enable,
}) for (port, backend) in backend_list),
}) for (port, backend) in backend_list for ip in ip_list),
},
)
)
......
##############################################################################
#
# Copyright (c) 2012 Vifib SARL and Contributors. All Rights Reserved.
# Copyright (c) 2012-2014 Vifib SARL and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
......@@ -26,7 +26,6 @@
##############################################################################
from slapos.recipe.librecipe import GenericBaseRecipe
import os
import sys
import urlparse
......@@ -36,31 +35,25 @@ class Recipe(GenericBaseRecipe):
"""
def install(self):
parsed = urlparse.urlparse(self.options['mysql-url'])
mysql_connection_string = "%(database)s@%(hostname)s:%(port)s "\
"%(username)s %(password)s" % dict(
database=parsed.path.split('/')[1],
hostname=parsed.hostname,
port=parsed.port,
username=parsed.username,
password=parsed.password
)
zope_parsed = urlparse.urlparse(self.options['zope-url'])
config = dict(
python_path=sys.executable,
user=zope_parsed.username,
password=zope_parsed.password,
site_id=zope_parsed.path.split('/')[1],
host="%s:%s" % (zope_parsed.hostname, zope_parsed.port),
sql_connection_string=mysql_connection_string,
)
# Runners
runner_path = self.createExecutable(
mysql = urlparse.urlsplit(self.options['mysql-url'])
zope = urlparse.urlsplit(self.options['zope-url'])
# Note: raises when there is more than a single element in path, as it's
# not supported by manage_addERP5Site anyway.
_, zope_path = zope.path.split('/')
return [self.createExecutable(
self.options['runner-path'],
self.substituteTemplate(self.getTemplateFilename('erp5_bootstrap.in'),
config))
return [runner_path]
self.substituteTemplate(
self.getTemplateFilename('erp5_bootstrap.in'),
{
'python_path': sys.executable,
'base_url': urlparse.urlunsplit((zope.scheme, zope.netloc, '', '', '')),
'site_id': zope_path,
'sql_connection_string': '%(database)s@%(hostname)s:%(port)s %(username)s %(password)s' % {
'database': mysql.path.split('/')[1],
'hostname': mysql.hostname,
'port': mysql.port,
'username': mysql.username,
'password': mysql.password
},
},
))]
#!%(python_path)s
import httplib
import urllib
import base64
user = "%(user)s"
password = "%(password)s"
host = "%(host)s"
site_id = "%(site_id)s"
erp5_catalog_storage = 'erp5_mysql_innodb_catalog'
mysql_url = "%(sql_connection_string)s"
header_dict = {'Authorization': 'Basic %%s' %% \
base64.encodestring('%%s:%%s' %% (user, password)).strip(),
'Referer':'http://%%s/manage_addProduct/ERP5/addERP5Site' %% host}
zope_connection = httplib.HTTPConnection(host)
# Check if an ERP5 site is already created, as ERP5 does support having
# 2 instances in the same zope, and this script should not destroy user data
zope_connection.request('GET', '/isERP5SitePresent', headers=header_dict)
result = zope_connection.getresponse()
if result.status == 204: # and (result.read() == "False"):
# Use a new connection
zope_connection = httplib.HTTPConnection(host)
# Create the expected ERP5 instance
zope_connection.request(
'POST', '/manage_addProduct/ERP5/manage_addERP5Site',
urllib.urlencode({
'id': site_id,
'erp5_catalog_storage': erp5_catalog_storage,
import ssl
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
def isSuccess(response):
return 200 <= response.code < 300
base_url = %(base_url)r
response = urllib.urlopen(base_url + '/isERP5SitePresent')
if isSuccess(response) and response.read() == '':
mysql_url = %(sql_connection_string)r
response = urllib.urlopen(
base_url + '/manage_addProduct/ERP5/manage_addERP5Site',
data=urllib.urlencode({
'id': %(site_id)r,
'erp5_catalog_storage': 'erp5_mysql_innodb_catalog',
'erp5_sql_connection_string': mysql_url,
'cmf_activity_sql_connection_string': mysql_url,
}),
headers=header_dict)
# Wait for the erp5 response, to prevent multiple requests
# been done by the same script.
result = zope_connection.getresponse()
# Read result make sure the site really finished to
#created the ERP5 site.
result.read()
)
if not isSuccess(response):
raise ValueError('Failed creating site, status=%%i: %%s' %% (response.code, response.read()))
print "ERP5 site created."
......@@ -38,12 +38,25 @@ class Recipe(GenericBaseRecipe):
# XXX: assume existence of 100 test databases, because slaves are not
# functional yet in slapos: testdb_0...testdb_100, with testuser_N
mysql_template = "%s@%s:%s %s %s"
mysql_parsed = urlparse.urlparse(self.options['mysql-url'])
for i in range(0, 100):
mysql_connection_string_list.append(mysql_template % ('testdb_%s'% i,
mysql_parsed.hostname, mysql_parsed.port, 'testuser_%s'% i, mysql_parsed.password))
mysql_connection_string = mysql_template % ('erp5_test', mysql_parsed.hostname,
mysql_parsed.port, 'erp5_test', mysql_parsed.password)
mysql_url_list = self.options.get('mysql-url-list')
if mysql_url_list is None:
mysql_parsed = urlparse.urlparse(self.options['mysql-url'])
for i in range(0, 100):
mysql_connection_string_list.append(mysql_template % ('testdb_%s'% i,
mysql_parsed.hostname, mysql_parsed.port, 'testuser_%s'% i, mysql_parsed.password))
mysql_connection_string = mysql_template % ('erp5_test', mysql_parsed.hostname,
mysql_parsed.port, 'erp5_test', mysql_parsed.password)
else:
for mysql_url in mysql_url_list:
mysql_parsed = urlparse.urlparse(mysql_url)
mysql_connection_string_list.append(mysql_template % (
mysql_parsed.path.lstrip('/'),
mysql_parsed.hostname,
mysql_parsed.port,
mysql_parsed.username,
mysql_parsed.password,
))
mysql_connection_string = mysql_connection_string_list.pop()
cloudooo_parsed = urlparse.urlparse(self.options['cloudooo-url'])
memcached_parsed = urlparse.urlparse(self.options['memcached-url'])
kumofs_parsed = urlparse.urlparse(self.options['kumofs-url'])
......
......@@ -48,6 +48,9 @@ class Recipe(object):
- storage-path: plain-text persistent storage for password,
that can only be accessed by the user
(default: ${buildout:parts-directory}/${:_buildout_section_name_})
If storage-path is empty, the recipe does not save the password, which is
fine it is saved by other means, e.g. using the publish-early recipe.
"""
def __init__(self, buildout, name, options):
......@@ -57,16 +60,17 @@ class Recipe(object):
except KeyError:
self.storage_path = options['storage-path'] = os.path.join(
buildout['buildout']['parts-directory'], name)
try:
with open(self.storage_path) as f:
passwd = f.read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
passwd = None
passwd = None
if self.storage_path:
try:
with open(self.storage_path) as f:
passwd = f.read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
self.update = self.install
if not passwd:
passwd = self.generatePassword(int(options_get('bytes', '8')))
self.update = self.install
self.passwd = passwd
# Password must not go into .installed file, for 2 reasons:
# security of course but also to prevent buildout to always reinstall.
......@@ -88,7 +92,7 @@ class Recipe(object):
os.write(fd, self.passwd)
finally:
os.close(fd)
return self.storage_path
return self.storage_path
def update(self):
return ()
......@@ -17,7 +17,8 @@ def runMysql(args):
# XXX: Protect with proper root password
# XXX: Follow http://dev.mysql.com/doc/refman/5.0/en/default-privileges.html
popen = subprocess.Popen([conf['mysql_install_binary'],
'--skip-name-resolve', '--no-defaults',
'--defaults-file=%s' % conf['configuration_file'],
'--skip-name-resolve',
'--datadir=%s' % conf['data_directory'],
'--basedir=%s' % conf['mysql_base_directory']],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
......@@ -66,7 +67,7 @@ def updateMysql(args):
with open(script_filename) as script_file:
conf['mysql_script'] = script_file.read()
while True:
mysql_upgrade_list = [conf['mysql_upgrade_binary'], '--no-defaults', '--user=root']
mysql_upgrade_list = [conf['mysql_upgrade_binary'], '--user=root']
if 'socket' in conf:
mysql_upgrade_list.append('--socket=' + conf['socket'])
mysql_upgrade = subprocess.Popen(mysql_upgrade_list, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
......@@ -80,7 +81,7 @@ def updateMysql(args):
print "MySQL database upgraded with result:\n%s" % result
else:
print "No need to upgrade MySQL database"
mysql_list = [conf['mysql_binary'].strip(), '--no-defaults', '-B', '--user=root']
mysql_list = [conf['mysql_binary'].strip(), '-B', '--user=root']
if 'socket' in conf:
mysql_list.append('--socket=' + conf['socket'])
mysql = subprocess.Popen(mysql_list, stdin=subprocess.PIPE,
......
......@@ -53,7 +53,8 @@ def Zope2InitUser(path, username, password):
class Recipe(GenericBaseRecipe):
def _options(self, options):
options['password'] = self.generatePassword()
if 'password' not in options:
options['password'] = self.generatePassword()
def install(self):
"""
......@@ -101,26 +102,21 @@ class Recipe(GenericBaseRecipe):
# Always provide a URL-Type
append("file://" + link)
zope_environment = dict(
TMP=self.options['tmp-path'],
TMPDIR=self.options['tmp-path'],
HOME=self.options['tmp-path'],
PATH=self.options['bin-path'],
TZ=self.options['timezone'],
)
zope_environment = {
'TMP': self.options['tmp-path'],
'TMPDIR': self.options['tmp-path'],
'HOME': self.options['tmp-path'],
'PATH': self.options['bin-path'],
'TZ': self.options['timezone'],
}
# longrequestlogger product which requires environment settings
longrequest_logger_file = self.options.get('longrequest-logger-file', None)
longrequest_logger_timeout = \
self.options.get('longrequest-logger-timeout', None)
longrequest_logger_interval= \
self.options.get('longrequest-logger-interval', None)
if longrequest_logger_file:
# add needed zope configuration
zope_environment.update(
**dict(longrequestlogger_file = longrequest_logger_file,
longrequestlogger_timeout = longrequest_logger_timeout,
longrequestlogger_interval = longrequest_logger_interval))
zope_environment['longrequestlogger_file'] = longrequest_logger_file
zope_environment['longrequestlogger_timeout'] = self.options.get('longrequest-logger-timeout', None)
zope_environment['longrequestlogger_interval'] = self.options.get('longrequest-logger-interval', None)
# configure default Zope2 zcml
open(self.options['site-zcml'], 'w').write(open(self.getTemplateFilename(
......
......@@ -76,11 +76,15 @@ class Storage(NeoBaseRecipe):
_binding_port_mandatory = False
def _getOptionList(self):
return [
r = [
'-d', self.options['database-parameters'],
'-a', self.options['database-adapter'],
'-w', self.options['wait-database'],
]
engine = self.options.get('engine')
if engine: # old versions of NEO don't support -e
r += '-e', engine
return r
class Admin(NeoBaseRecipe):
def _getOptionList(self):
......@@ -92,6 +96,7 @@ class Master(NeoBaseRecipe):
r = [
'-p', options['partitions'],
'-r', options['replicas'],
'-A', options['autostart'],
]
for x in (('-C', options['upstream-cluster']),
('-M', options['upstream-masters'])):
......
......@@ -32,15 +32,19 @@ CONNECTION_PARAMETER_STRING = 'connection-'
class Recipe(GenericSlapRecipe):
def _install(self):
publish_dict = dict()
options = self.options.copy()
del options['recipe']
slave_reference = options.pop('-slave-reference', None)
for k, v in options.iteritems():
if k[:1] == '-':
continue
publish_dict[k] = v
self._setConnectionDict(publish_dict, slave_reference)
publish_dict = {}
done = set()
extends = [self.name]
while extends:
name = extends.pop()
done.add(name)
for k, v in self.buildout[name].iteritems():
if k[:1] == '-':
if k == '-extends':
extends += set(v.split()) - done
elif k != 'recipe':
publish_dict[k] = v
self._setConnectionDict(publish_dict, self.options.get('-slave-reference'))
return []
def _setConnectionDict(self, publish_dict, slave_reference=None):
......
##############################################################################
#
# Copyright (c) 2010 Vifib SARL and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import slapos.slap
from slapos.recipe.librecipe import unwrap, wrap
from slapos.recipe.librecipe import GenericSlapRecipe
class Recipe(GenericSlapRecipe):
"""
Early initialization of published parameters.
The '-init' option defines parameters that should be published before
requesting any partitions, and how they are initialized.
Example:
[publish-early]
recipe = slapos.cookbook:publish-early
-init =
foo gen-foo:x
bar gen-bar:y
bar = z
[gen-foo]
...
[publish]
recipe = slapos.cookbook:publish.serialised
-extends = publish-early
...
${publish-early:foo} is initialized with the value of the published
parameter 'foo', or ${gen-foo:x} if it hasn't been published yet
(and in this case, it is published immediately as a way to save the value).
${publish-early:bar} is forced to 'z' (${gen-bar:y} ignored):
a line like 'bar = z' is usually rendered conditionally with Jinja2.
"""
def __init__(self, buildout, name, options):
GenericSlapRecipe.__init__(self, buildout, name, options)
published_dict = None
publish = False
publish_dict = {}
for line in options['-init'].splitlines():
if line:
k, v = line.split()
if k not in options:
if published_dict is None:
self.slap.initializeConnection(self.server_url, self.key_file,
self.cert_file)
computer_partition = self.slap.registerComputerPartition(
self.computer_id, self.computer_partition_id)
published_dict = unwrap(
computer_partition.getConnectionParameterDict())
try:
publish_dict[k] = published_dict[k]
except KeyError:
section, key = v.split(":")
publish_dict[k] = self.buildout[section][key]
publish = True
if publish:
computer_partition.setConnectionDict(wrap(publish_dict))
options.update(publish_dict)
install = update = lambda self: None
##############################################################################
#
# Copyright (c) 2014 Vifib SARL and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
##############################################################################
import os, subprocess, sys
class Recipe:
def __init__(self, buildout, name, options):
self.buildout = buildout
self.options = options
self.name = name
self.software_type = buildout["slap-configuration"]["slap-software-type"]
section, key = self.options[self.software_type].split(":")
self.base = self.buildout[section][key]
def install(self):
# XXX-Antoine: We gotta find a better way to do this. I tried to check
# out how slapgrid-cp was running buildout. But it is worse than that.
args = sys.argv[:]
for x in self.buildout["slap-connection"].iteritems():
args.append("slap-connection:%s=%s" % x)
for x in "directory", "eggs-directory", "develop-eggs-directory":
args.append("buildout:%s=%s" % (x, self.buildout["buildout"][x]))
args.append("buildout:installed=.installed-%s.cfg" % self.name)
# Options.get (from zc.buildout) should deserialize.
try:
override = self.options["override"][self.software_type]
except (KeyError, TypeError):
buildout = self.base
else:
# unfortunately, buildout:extends does not work when given at command line
buildout = os.path.join(self.buildout["buildout"]["parts-directory"],
self.name + ".cfg")
with open(override) as src, open(buildout, "w", 0) as dst:
dst.write("[buildout]\nextends = %s\n\n" % self.base + src.read())
subprocess.check_call(args + ["-oc", buildout])
return []
update = install
import grp
import os
import pwd
class Recipe(object):
"""
Provide POSIX information about the user that is currently running buildout.
"""
def __init__(self, buildout, name, options):
pinfo = pwd.getpwuid(os.getuid())
options['pw-name'] = pinfo.pw_name
options['pw-uid'] = pinfo.pw_uid
options['pw-gid'] = pinfo.pw_gid
options['pw-dir'] = pinfo.pw_dir
options['pw-shell'] = pinfo.pw_shell
ginfo = grp.getgrgid(os.getgid())
options['gr-name'] = ginfo.gr_name
options['gr-gid'] = ginfo.gr_gid
install = update = lambda self: []
Available ``software-type`` values
==================================
- ``default``
Recommended for production use.
- ``create-erp5-site``
Automated creation of ERP5Site instance, for easy deployment.
Usage in production discouraged due to the increased risk of data loss.
Notes
=====
This software release is not intended to be accessed directly, but through a
front-end instance which is expected to contains the RewriteRules_ (or
equivalent) needed to relocate Zope's urls via its VirtualHostMonster_. See the
``frontend`` erp5 instance parameter.
Included cloudooo partition is **deprecated**. It is not recommended for
intensive usage. See the ``cloudooo`` Software Release to setup a cloudooo
cluster, more suitable for intensive usage.
Port ranges
===========
This software release assigns the following port ranges by default:
==================== ==========
Partition type Port range
==================== ==========
memcached-persistent 2000-2009
memcached-volatile 2010-2019
cloudooo 2020-2024
smtp 2025-2029
neo (admin & master) 2050-2051
mariadb 2099
zeo 2100-2149
balancer 2150-2199
zope 2200-*
==================== ==========
Non-zope partitions are unique in an ERP5 cluster, so you shouldn't have to
care about them as a user (but a Software Release developer needs to know
them).
Zope partitions should be assigned port ranges starting at 2200, incrementing
by some value which depends on how many zope process you want per partition
(see the ``port-base`` parameter in ``zope-partition-dict``).
Notes to the Software Release developper: These ranges are not strictly
defined. Not each port is actually used so one may reduce alread-assigned
ranges if needed (ex: memcached partitions use actually fewer ports). There
should be enough room for evolution (as between smtp and mariadb types). It is
important to not allocate any port after 2200 as user may have assigned ports
to his zope processes.
.. _RewriteRules: http://httpd.apache.org/docs/current/en/mod/mod_rewrite.html#rewriterule
.. _VirtualHostMonster: http://docs.zope.org/zope2/zope2book/VirtualHosting.html
TODO: improve
Instance Parameters
===================
Zope Parameters
---------------
Needed by software-type development (default) and zope.
frontend-software-url
~~~~~~~~~~~~~~~~~~~~~
Software URL of an existing frontend.
XXX: meaning should change (or it will go away) in order to be resilient to
software updates - as they are visible at software-url level.
If it is not provided, no frontend will be requested.
frontend-instance-guid
~~~~~~~~~~~~~~~~~~~~~~
GUID of frontend instance.
Not perfect yet: if that instance is replaced, slaves have to be reconfigured.
Mandatory only if frontend-software-url is also provided.
XXX: should be complemented (or replaced) by more flexible and precise
criteria.
frontend-software-type (optional)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Frontend software type as defined by the software relase at
frontend-software-url.
frontend-domain (optional)
~~~~~~~~~~~~~~~~~~~~~~~~~~
Domain name frontend must recognise as belonging to this instance.
{
"$schema": "http://json-schema.org/draft-04/schema#",
"required": ["tcpv4-port"],
"properties": {
"tcpv4-port": {
"allOf": [{
"$ref": "#/definitions/tcpv4port"
}, {
"description": "Start allocating ports at this value, going upward"
}]
},
"font-url-list": {
"description": "List of URLs from which fonts are to be downloaded",
"default": [],
"items": {
"type": "string"
},
"type": "array"
}
}
}
{
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Parameters to instantiate ERP5",
"additionalProperties": false,
"properties": {
"sla-dict": {
"description": "Where to request instances. Each key is a query string for criterions (e.g. \"computer_guid=foo\"), and each value is a list of partition references.",
"additionalProperties": {
"type": "array",
"items": { "type": "string" },
"uniqueItems": true
},
"type": "object"
},
"site-id": {
"description": "ERP5Site object's id",
"default": "erp5",
"type": "string"
},
"timezone": {
"description": "Zope's timezone. Possible values are determined by host's libc, and typically come from a separate package (tzdata, ...)",
"default": "UTC",
"type": "string"
},
"deadlock-debugger-password": {
"description": "Password for /manage_debug_threads",
"type": "string"
},
"inituser-login": {
"description": "Login of the initial/rescue user",
"default": "zope",
"type": "string"
},
"inituser-password": {
"description": "Password of the initial/rescue user",
"type": "string"
},
"developer-list": {
"description": "List of logins which should get the Developper role (required to modify portal_components' content), defaulting to inituser-login's value",
"items": {
"pattern": "/^\\S+$/",
"type": "string"
},
"uniqueItems": true,
"type": "array"
},
"hostalias-dict": {
"description": "Hostname-to-hostname mapping",
"default": {},
"additionalProperties": {
"description": "A hostname to which current entry will resolve",
"type": "string"
},
"type": "object"
},
"hosts-dict": {
"description": "Host entries to be used in addition to and/or overriding auto-generated ones (erp5-catalog-0, erp5-cloudooo, erp5-memcached-persistent, erp5-memcached-volatile and erp5-smtp)",
"patternProperties": {
".*": {
"description": "An IP or domain name to which current entry will resolve",
"type": "string"
}
},
"type": "object"
},
"frontend": {
"description": "Front-end slave instance request parameters",
"properties": {
"software-url": {
"description": "Front-end's software type. If this parameter is empty, no front-end instance is requested. Else, sla-dict must specify 'frontend' which is a special value matching all frontends (e.g. {\"instance_guid=bar\": [\"frontend\"]}).",
"default": "",
"type": "string"
},
"domain": {
"description": "The domain name to request front-end to respond as.",
"default": "",
"type": "string"
},
"software-type": {
"description": "Request a front-end slave instance of this software type.",
"default": "RootSoftwareInstance",
"type": ""
}
},
"type": "object"
},
"zope-partition-dict": {
"description": "Zope layout definition",
"default": {"1": {}},
"patternProperties": {
".*": {
"additionalProperties": false,
"properties": {
"family": {
"description": "The family this partition is part of. For example: 'public', 'admin', 'backoffice', 'web-service'... Each family gets its own balancer entry. It has no special meaning for the system.",
"default": "default",
"type": "string"
},
"instance-count": {
"description": "Number of Zopes to setup on this partition",
"default": 1,
"type": "integer"
},
"thread-amount": {
"description": "Number of worker threads for each created Zope process",
"default": 4,
"type": "integer"
},
"timerserver-interval": {
"description": "Timerserver tick perdiod, in seconds, or 0 to disable",
"default": 5,
"type": "integer"
},
"webdav": {
"description": "Serve webdav queries, implies timerserver-interval=0 (disabled). Mixing webdav and non-webdav nodes in a single family will give unspecified results.",
"default": false,
"type": "boolean"
},
"longrequest-logger-interval": {
"description": "Period, in seconds, with which LongRequestLogger polls worker thread stack traces, or -1 to disable",
"default": -1,
"type": "integer"
},
"longrequest-logger-timeout": {
"description": "Transaction duration after which LongRequestLogger will start logging its stack trace, in seconds",
"default": 1,
"type": "integer"
},
"port-base": {
"allOf": [{
"$ref": "#/definitions/tcpv4port"
}, {
"description": "Start allocating ports at this value. Useful if one needs to make several partitions share the same port range (ie, several partitions bound to a single address)",
"default": 2200
}]
}
},
"type": "object"
}
},
"type": "object"
},
"kumofs": {
"description": "Persistent memcached service",
"additionalProperties": {
"$ref": "./instance-kumofs-schema.json#properties"
},
"type": "object"
},
"memcached": {
"description": "Volatile memcached service",
"additionalProperties": {
"$ref": "./instance-kumofs-schema.json#properties"
},
"type": "object"
},
"cloudooo": {
"description": "Format conversion service",
"additionalProperties": {
"$ref": "./instance-cloudooo-schema.json#properties"
},
"type": "object"
},
"mariadb": {
"description": "Relational database service",
"additionalProperties": {
"$ref": "./instance-mariadb-schema.json#properties"
},
"type": "object"
},
"zodb-zeo": {
"description": "Common settings ZEO servers",
"properties": {
"tcpv4-port": {
"allOf": [{
"$ref": "#/definitions/tcpv4port"
}, {
"description": "Start allocating ports at this value, going upward"
}]
},
"backup-periodicity": {
"description": "When to backup, specified in the same format as for systemd.time(7) calendar events (years & seconds not supported, DoW & DoM can not be combined). Enter 'never' to disable backups.",
"default": "daily",
"type": "string"
},
"tidstorage-repozo-path": {
"description": "Directory for backup timestamp and tidstorage status files.",
"default": "~/srv/backup/tidstorage",
"type": "string"
}
},
"type": "object"
},
"zodb": {
"description": "Zope Object DataBase mountpoints. See https://github.com/zopefoundation/ZODB/blob/3.10/src/ZODB/component.xml for extra options.",
"items": {
"required": ["type"],
"properties": {
"name": {
"description": "Database name",
"default": "main",
"type": "string"
},
"mount-point": {
"description": "Mount point",
"default": "/",
"type": "string"
},
"type": {
"description": "Storage type",
"enum": ["zeo", "neo"],
"type": "string"
},
"server": {
"description": "Instantiate a server. If missing, 'storage-dict' must contain the necessary properties to mount the ZODB. For ZEO, the partition reference is 'zodb'. For NEO, they are 'neo-0', 'neo-1', ...",
"anyOf": [
{"$ref": "./instance-zeo-schema.json"},
{"$ref": "../neoppod/instance-neo-input-schema.json"}
]
},
"storage-dict": {
"description": "Storage configuration. For NEO, 'logfile' is automatically set (see http://git.erp5.org/gitweb/neoppod.git/blob/HEAD:/neo/client/component.xml for other settings).",
"additionalProperties": {"type": "string"},
"type": "object"
}
},
"additionalProperties": {"type": "string"},
"type": "object"
},
"type": "array"
}
}
}
{
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Values returned by ERP5 instantiation",
"properties": {
"hosts-dict": {
"description": "Hosts mapping, including auto-generated entries",
"patternProperties": {
".*": {
"description": "IP or domain names current entry resolves to",
"type": "string"
}
},
"type": "object"
},
"site-id": {
"description": "Chosen ERP5Site object identifier",
"type": "string"
},
"inituser-login": {
"description": "Initial user login",
"type": "string"
},
"inituser-password": {
"description": "Initial user password",
"type": "string"
},
"kumofs-url": {
"description": "Persistent memcached access information",
"type": "string"
},
"memcached-url": {
"description": "Volatile memcached access information",
"type": "string"
},
"cloudooo-url": {
"description": "Conversion service access information",
"type": "string"
},
"mariadb-url": {
"description": "Relational database access information",
"type": "string"
}
},
"patternProperties": {
"family-.*": {
"description": "Zope family access information",
"type": "string"
}
},
"type": "object"
}
{
"$schema": "http://json-schema.org/draft-04/schema#",
"required": ["tcpv4-port"],
"properties": {
"tcpv4-port": {
"allOf": [{
"$ref": "#/definitions/tcpv4port"
}, {
"description": "Start allocating ports at this value, going upward"
}]
},
"ram-storage-size": {
"description": "If 0 use disk storage, otherwise use ram and limit data size to this many megabytes",
"default": 0,
"type": "integer"
}
}
}
{
"$schema": "http://json-schema.org/draft-04/schema#",
"required": ["tcpv4-port"],
"properties": {
"tcpv4-port": {
"allOf": [{
"$ref": "#/definitions/tcpv4port"
}, {
"description": "Start allocating ports at this value, going downward"
}]
},
"database-list": {
"description": "Databases to create and respective user credentials getting all privileges on it",
"default": [{
"name": "erp5",
"user": "user",
"password": "insecure"
}],
"minItems": 1,
"items": {
"required": ["name", "user", "password"],
"properties": {
"name": {
"description": "Database name",
"type": "string"
},
"user": {
"description": "User name",
"type": "string"
},
"password": {
"description": "User password",
"type": "string"
}
},
"type": "object"
},
"type": "array"
},
"test-database-amount": {
"description": "The number of test databases to create, adding auto-generated entries to database-list",
"default": 1,
"minimum": 0,
"type": "integer"
},
"catalog-backup": {
"description": "Backup control knobs",
"properties": {
"full-retention-days": {
"description": "How many days full backups must be retained, -1 meaning full backups are disabled and 0 meaning no expiration",
"default": 7,
"minimum": -1,
"type": "integer"
},
"incremental-retention-days": {
"description": "How many days incremental backups (binlogs) must be retained, -1 meaning incremental backups are disabled and 0 meaning no expiration, defaulting to full-retention-days' value",
"minimum": -1,
"type": "integer"
}
},
"type": "object"
},
"backup-periodicity": {
"description": "When to backup, specified in the same format as for systemd.time(7) calendar events (years & seconds not supported, DoW & DoM can not be combined).",
"default": "daily",
"type": "string"
},
"innodb-buffer-pool-size": {
"description": "See MariaDB documentation on innodb_buffer_pool_size",
"minimum": 0,
"type": "integer"
},
"innodb-log-file-size": {
"description": "See MariaDB documentation on innodb_log_file_size",
"minimum": 0,
"type": "integer"
},
"innodb-log-buffer-size": {
"description": "See MariaDB documentation on innodb_log_buffer_size",
"minimum": 0,
"type": "integer"
},
"long-query-time": {
"description": "Number of seconds above which long queries are logged",
"minimum": 0,
"default": 1,
"type": "number"
},
"relaxed-writes": {
"description": "When enabled, sets innodb_flush_log_at_trx_commit = 0, innodb_flush_method = nosync, innodb_doublewrite = 0 and sync_frm = 0 - RTFM, those options are dangerous",
"default": false,
"type": "boolean"
},
"ssl": {
"description": "Enable and define SSL support for network connections",
"default": {},
"properties": {
"ca-crt": {
"description": "Certificate Authority's certificate, in PEM format",
"type": "string"
},
"crt": {
"description": "Server's certificate, in PEM format (mandatory to enable SSL support)",
"type": "string"
},
"key": {
"description": "Server's key, in PEM format (mandatory to enable SSL support)",
"type": "string"
},
"crl": {
"description": "Server's certificate revocation list, in PEM format",
"type": "string"
},
"cipher": {
"description": "Permissible cipher specifications, separated by colons",
"type": "string"
}
},
"type": "object"
}
}
}
{
"$schema": "http://json-schema.org/draft-04/schema#",
"additionalProperties": false,
"properties": {
"backup": {
"description": "'%(backup)s' is expanded to partition's ZODB backup path (typically 'srv/backup/zodb'), and %(name)s with the export id",
"default": "%(backup)s/%(name)s",
"type": "string"
},
"family": {
"description": "Opaque name used to regroup/separate mountpoints under different ZEO processes (must be valid as a file name and as a ConfigParser section name)",
"default": "default",
"pattern": "^[^<>:\"/\\|?*\\]\\[ ]*$",
"type": "string"
},
"path": {
"description": "FileStorage file path, '%(zodb)s' occurrences are replaced with the path to partition's srv/zodb directory, and %(name)s with the export id",
"default": "%(zodb)s/%(name)s.fs",
"type": "string"
}
},
"type": "object"
}
......@@ -53,7 +53,7 @@ mysqld-binary = ${mariadb:location}/bin/mysqld
mysqldump-binary = ${mariadb:location}/bin/mysqldump
gzip-binary = $${buildout:gzip-binary}
zcat-binary = ${gzip:location}/bin/zcat
mysql-tzinfo-to-sql-binary = ${mariadb:location}/bin/mysql_tzinfo_to_sql
[certificate-authority]
recipe = slapos.cookbook:certificate_authority
......
......@@ -48,7 +48,7 @@ mode = 0644
[instance-mariadb]
recipe = slapos.recipe.template
url = ${:_profile_base_location_}/instance-mariadb.cfg
md5sum = d160bb15d8d3a3913706a4ed29c49388
md5sum = 79f86f8c74335b2b9ec81d6a02164cbf
output = ${buildout:directory}/template-mariadb.cfg
mode = 0644
......
Software types
==============
Which software type is an entry point and can be used for root software
instance.
Parameters are expected to be passed as of *.serialised recipes expect them.
Minimal parameters::
```
<?xml version='1.0' encoding='utf-8'?>
<instance>
<parameter id="_">{
"cluster": "dummy",
}</parameter>
</instance>
```
For each available key in the outmost dict are described below.
default (default)
-----------------
Deploy a NEO cluster.
'cluster' (str, mandatory)
~~~~~~~~~~~~~~~~~~~~~~~~~~
Cluster unique identifier. Your last line of defense against mixing up neo
clusters and corrupting your data. Choose a unique value for each of your
cluster.
'partitions' (int, optional)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Number of partitions. You cannot change this value once you created a cluster.
Defautls to 12.
'replicas' (int, optional)
~~~~~~~~~~~~~~~~~~~~~~~~~~
Number of replicates.
Defaults to 0 (no resilience).
'mysql-storage-count' (int, optional)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Number of MySQL-based storage nodes to deploy. One master node is deployed
along with each storage.
Defaults to 1.
'admin-count' (int, optional)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Number of admin nodes to deploy.
Defaults to 1.
{% if slap_software_type not in (
mysql_storage_software_type,
admin_software_type,
) -%}
{% macro section(name) %}{% do part_list.append(name) %}{{ name }}{% endmacro -%}
{% set part_list = [] -%}
{% set master_list = [] -%}
{% set admin_list = [] -%}
[request-common]
recipe = slapos.cookbook:request
software-url = ${slap-connection:software-release-url}
sla-computer_guid = ${slap-connection:computer-id}
server-url = ${slap-connection:server-url}
key-file = ${slap-connection:key-file}
cert-file = ${slap-connection:cert-file}
computer-id = ${slap-connection:computer-id}
partition-id = ${slap-connection:partition-id}
config-cluster = {{ slapparameter_dict['cluster'] }}
[node-base]
< = request-common
config-masters = ${all-masters:addresses}
[mysql-storage-base]
< = request-common
config-partitions = {{ slapparameter_dict.get('partitions', 12) }}
config-replicas = {{ slapparameter_dict.get('replicas', 0) }}
software-type = {{ mysql_storage_software_type }}
{% import "root_common" as common_macro with context %}
[mysql-storage-request-base]
< = mysql-storage-base
return = master
[admin-base]
< = node-base
return = admin
software-type = {{ admin_software_type }}
[request-common]
<= request-common-base
{{ common_macro.request_neo(slapparameter_dict, 'neo', 'node-') }}
[publish]
recipe = slapos.cookbook:publish
masters = ${all-masters:addresses}
admins = ${all-admins:addresses}
{% for node_number in range(slapparameter_dict.get('mysql-storage-count', 1)) -%}
{% set section_id = 'storage-%i' % (node_number, ) -%}
{% set final_section_id = 'final-' ~ section_id -%}
{% do master_list.append(section_id) -%}
[{{ section_id }}]
< = mysql-storage-request-base
name = {{ section_id }}
[{{ section(final_section_id) }}]
< = mysql-storage-base
node-base
name = {{ section_id }}
{% endfor -%}
{% for node_number in range(slapparameter_dict.get('admin-count', 1)) -%}
{% set section_id = 'admin-%i' % (node_number, ) -%}
{% do admin_list.append(section_id)%}
[{{ section(section_id) }}]
< = admin-base
name = {{ section_id }}
{% endfor -%}
[buildout]
parts =
{{ part_list | join('\n\t') }}
publish
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
offline = true
[all-masters]
addresses = {% for master in master_list -%}
{{ '${' + master + ':connection-master}' -}}
{% if not loop.last %} {% endif -%}
{% endfor %}
[all-admins]
addresses = {% for admin in admin_list -%}
{{ '${' + admin + ':connection-admin}' -}}
{% if not loop.last %} {% endif -%}
{% endfor %}
{% endif -%}
recipe = slapos.cookbook:publish.serialised
neo-masters = ${node-0-final:connection-masters}
neo-admins = ${node-0-final:connection-admins}
{{ common_macro.common_section() }}
[buildout]
parts = switch-softwaretype
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
[slap-configuration]
recipe = slapos.cookbook:slapconfiguration.serialised
computer = ${slap-connection:computer-id}
partition = ${slap-connection:partition-id}
url = ${slap-connection:server-url}
key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file}
[jinja2-template-base]
recipe = slapos.recipe.template:jinja2
filename = ${:_buildout_section_name_}.cfg
rendered = ${buildout:parts-directory}/${:_buildout_section_name_}/${:filename}
extensions = jinja2.ext.do
extra-context =
context =
key ipv4_set slap-configuration:ipv4
key ipv6_set slap-configuration:ipv6
key slapparameter_dict slap-configuration:configuration
raw logrotate_cfg {{ template_logrotate_base }}
raw bin_directory {{ bin_directory }}
${:extra-context}
[neo-admin]
<= jinja2-template-base
template = {{ neo_admin }}
[neo-master]
<= jinja2-template-base
template = {{ neo_master }}
[neo-storage-mysql]
<= jinja2-template-base
template = {{ neo_storage_mysql }}
extra-context =
key master_cfg neo-master:rendered
key admin_cfg neo-admin:rendered
raw mariadb_location {{ mariadb_location }}
raw template_neo_my_cnf {{ template_neo_my_cnf }}
[buildout]
parts =
cron-entry-logrotate
[cron]
recipe = slapos.cookbook:cron
cron-entries = ${logrotate-directory:cron-entries}
dcrond-binary = {{ dcron_location }}/sbin/crond
crontabs = ${logrotate-directory:crontabs}
cronstamps = ${logrotate-directory:cronstamps}
catcher = ${cron-simplelogger:wrapper}
binary = ${logrotate-directory:services}/crond
[cron-simplelogger]
recipe = slapos.cookbook:simplelogger
wrapper = ${logrotate-directory:bin}/cron_simplelogger
log = ${logrotate-directory:log}/cron.log
[logrotate]
recipe = slapos.cookbook:logrotate
logrotate-entries = ${logrotate-directory:logrotate-entries}
backup = ${logrotate-directory:logrotate-backup}
logrotate-binary = {{ logrotate_location }}/usr/sbin/logrotate
gzip-binary = {{ gzip_location }}/bin/gzip
gunzip-binary = {{ gzip_location }}/bin/gunzip
wrapper = ${logrotate-directory:bin}/logrotate
conf = ${logrotate-directory:etc}/logrotate.conf
state-file = ${logrotate-directory:srv}/logrotate.status
[cron-entry-logrotate]
recipe = slapos.cookbook:cron.d
cron-entries = ${cron:cron-entries}
name = logrotate
frequency = 0 0 * * *
command = ${logrotate:wrapper}
[logrotate-directory]
recipe = slapos.cookbook:mkdirectory
cron-entries = ${:etc}/cron.d
cronstamps = ${:etc}/cronstamps
crontabs = ${:etc}/crontabs
logrotate-backup = ${:backup}/logrotate
logrotate-entries = ${:etc}/logrotate.d
bin = ${buildout:directory}/bin
srv = ${buildout:directory}/srv
backup = ${:srv}/backup
etc = ${buildout:directory}/etc
services = ${:etc}/run
log = ${buildout:directory}/var/log
[buildout]
parts =
neo-admin-run-wrapper
parts +=
neo-admin-promise
eggs-directory = {{eggs_directory}}
develop-eggs-directory = {{develop_eggs_directory}}
offline = true
logrotate-admin
[neo-admin]
recipe = slapos.cookbook:neoppod.admin
binary = {{bin_directory}}/neoadmin
wrapper = ${directory:bin}/neoadmin
wrapper = ${directory:etc_run}/neoadmin
logfile = ${directory:log}/neoadmin.log
ip = ${publish:ip}
port = ${publish:port}
cluster = ${slap-parameter:cluster}
masters = ${slap-parameter:masters}
[publish]
recipe = slapos.cookbook:publish
# TODO: make port a partition parameter
# TODO: stop using slap-network-information
ip = [${slap-network-information:global-ipv6}]
port = 10002
admin = ${:ip}:${:port}
[neo-admin-run-wrapper]
recipe = slapos.cookbook:symbolic.link
target-directory = ${directory:run}
link-binary = ${neo-admin:wrapper}
[directory]
recipe = slapos.cookbook:mkdirectory
promises = ${buildout:directory}/etc/promises
run = ${buildout:directory}/etc/run
log = ${buildout:directory}/var/log
bin = ${buildout:directory}/bin
port = ${publish:port-admin}
cluster = {{ dumps(slapparameter_dict['cluster']) }}
masters = ${publish:masters}
[neo-admin-promise]
recipe = slapos.cookbook:check_port_listening
hostname = ${neo-admin:ip}
port = ${neo-admin:port}
path = ${directory:promises}/neo-admin-promise
[logrotate-admin]
recipe = slapos.cookbook:logrotate.d
logrotate-entries = ${logrotate:logrotate-entries}
backup = ${logrotate:backup}
name = neo-admin
log = ${neo-admin:logfile}
post = {{ bin_directory }}/slapos-kill -n neoadmin -s RTMIN+1 ${:log}
{
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Parameters to instantiate a NEO cluster. See https://git.erp5.org/gitweb/neoppod.git/blob/HEAD:/neo.conf?js=1 for more information.",
"additionalProperties": false,
"required": ["cluster"],
"properties": {
"cluster": {
"description": "Cluster unique identifier. Your last line of defense against mixing up NEO clusters and corrupting your data. Choose a unique value for each of your cluster.",
"type": "string"
},
"partitions": {
"description": "Number of partitions. You cannot change this value once you created a cluster.",
"default": 12,
"type": "integer"
},
"replicas": {
"description": "Number of replicates.",
"default": 0,
"type": "integer"
},
"upstream-cluster": {
"description": "Identifier of the cluster to backup.",
"type": "string"
},
"upstream-masters": {
"description": "Master nodes in the cluster to backup.",
"type": "string"
},
"sla-dict": {
"description": "[NEO SR only] Where to request instances. Each key is a query string for criterions (e.g. \"computer_guid=foo\"), and each value is a list of partition references ('node-0', 'node-1', ...).",
"additionalProperties": {
"type": "array",
"items": { "type": "string" },
"uniqueItems": true
},
"type": "object"
},
"node-list": {
"description": "List of dictionaries containing parameters for each node.",
"items": {
"description": "Dictionary containing parameters required to configure individual nodes.",
"default": {},
"properties": {
"admin": {
"description": "Port of admin node. 0 to disable.",
"default": 2050,
"type": "integer"
},
"master": {
"description": "Port of master node. 0 to disable.",
"default": 2051,
"type": "integer"
},
"storage-count": {
"description": "Number of storage nodes to deploy. One master and one admin node is deployed with each storage.",
"default": 1,
"type": "integer"
},
"mysql": {
"description": "Dictionary containing parameters for MySQL.",
"default": {},
"properties": {
"relaxed-writes": {
"description": "When enabled, sets innodb_flush_log_at_trx_commit = 0, innodb_flush_method = nosync, innodb_doublewrite = 0 and sync_frm = 0 - RTFM, those options are dangerous",
"default": false,
"type": "boolean"
}
},
"additionalProperties": {
"description": "To configure important parameters like innodb_buffer_pool_size, tokudb_cache_size, etc.",
"type": "string"
},
"type": "object"
},
"engine": {
"description": "Configures storage engine, currently only InnoDB and TokuDB are supported. Defaults to NEO's default.",
"type": "string"
}
},
"type": "object"
},
"type": "array"
}
},
"type": "object"
}
[buildout]
parts =
neo-master-run-wrapper
parts +=
neo-master-promise
eggs-directory = {{eggs_directory}}
develop-eggs-directory = {{develop_eggs_directory}}
offline = true
logrotate-master
[neo-master]
recipe = slapos.cookbook:neoppod.master
binary = {{bin_directory}}/neomaster
wrapper = ${directory:bin}/neomaster
wrapper = ${directory:etc_run}/neomaster
logfile = ${directory:log}/neomaster.log
ip = ${publish:ip}
port = ${publish:port}
cluster = ${slap-parameter:cluster}
partitions = ${slap-parameter:partitions}
replicas = ${slap-parameter:replicas}
masters = ${slap-parameter:masters}
[publish]
recipe = slapos.cookbook:publish
# TODO: make port a partition parameter
# TODO: stop using slap-network-information
ip = [${slap-network-information:global-ipv6}]
port = 10000
master = ${:ip}:${:port}
[neo-master-run-wrapper]
recipe = slapos.cookbook:symbolic.link
target-directory = ${directory:run}
link-binary = ${neo-master:wrapper}
[directory]
recipe = slapos.cookbook:mkdirectory
promises = ${buildout:directory}/etc/promises
run = ${buildout:directory}/etc/run
log = ${buildout:directory}/var/log
bin = ${buildout:directory}/bin
port = ${publish:port-master}
cluster = {{ dumps(slapparameter_dict['cluster']) }}
partitions = {{ slapparameter_dict['partitions'] }}
replicas = {{ slapparameter_dict['replicas'] }}
upstream-cluster = {{ dumps(slapparameter_dict['upstream-cluster']) }}
upstream-masters = {{ dumps(slapparameter_dict['upstream-masters']) }}
autostart = {{ slapparameter_dict['autostart'] }}
# "masters" parameter is not provided when just requesting a partition.
# No actual installation takes place at that time
# (slapos.cookbook:neoppod.master raises), but cfg expansion must succeed. So
# this default value is required.
masters = ${publish:masters}
[neo-master-promise]
recipe = slapos.cookbook:check_port_listening
......@@ -44,9 +28,10 @@ hostname = ${neo-master:ip}
port = ${neo-master:port}
path = ${directory:promises}/neo-master-promise
[slap-parameter]
# "masters" parameter is not provided when just requesting a partition.
# No actuall installation takes place at that time
# (slapos.cookbook:neoppod.master raises), but cfg expansion must succeed. So
# this default value is required.
masters =
[logrotate-master]
recipe = slapos.cookbook:logrotate.d
logrotate-entries = ${logrotate:logrotate-entries}
backup = ${logrotate:backup}
name = neo-master
log = ${neo-master:logfile}
post = {{ bin_directory }}/slapos-kill -n neomaster -s RTMIN+1 ${:log}
{
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Values returned by NEO instantiation",
"properties": {
"masters": {
"description": "Connection parameters of all the master nodes",
"type": "string"
},
"admins": {
"description": "Connection parameters of all the admin nodes",
"type": "string"
}
},
"type": "object"
}
[buildout]
extends = {{ master_cfg }}
parts += neo-storage-run-wrapper
[mariadb-instance]
recipe = slapos.cookbook:generic.mysql
user = user
database = neo
{% macro section(name) %}{% do part_list.append(name) %}{{ name }}{% endmacro -%}
{% set part_list = [] -%}
{% set init_list = [] -%}
conf-file = ${directory:etc}/mariadb.cnf
# No networking (ip & port options)
[mysqld]
recipe = slapos.cookbook:generic.mysql.wrap_mysqld
output = ${directory:etc_run}/mariadb
binary = ${:mysql-base-directory}/bin/mysqld
configuration-file = ${my-cnf:rendered}
data-directory = ${directory:srv_mariadb}
bin-directory = ${directory:bin}
pid-file = ${directory:var_run}/mariadb.pid
mysql-install-binary = ${:mysql-base-directory}/scripts/mysql_install_db
mysql-base-directory = {{ mariadb_location }}
[my-cnf-parameters]
socket = ${directory:var_run}/mariadb.sock
data-directory = ${mysqld:data-directory}
pid-file = ${directory:var_run}/mariadb.pid
error-log = ${directory:log}/mariadb_error.log
slow-query-log = ${directory:log}/mariadb_slowquery.log
extra-dict = {{ dumps(slapparameter_dict.get('mysql', {})) }}
init-file = ${init-script:rendered}
wrapper = ${directory:etc_run}/mariadb
update-wrapper = ${directory:etc_run}/mariadb_update
mysql-base-directory = {{ mariadb_location }}
mysql-binary = {{ mariadb_location }}/bin/mysql
mysql-install-binary = {{ mariadb_location }}/scripts/mysql_install_db
mysql-upgrade-binary = {{ mariadb_location }}/bin/mysql_upgrade
mysqld-binary = {{ mariadb_location }}/bin/mysqld
[my-cnf]
recipe = slapos.recipe.template:jinja2
extensions = jinja2.ext.do
rendered = ${directory:etc}/mariadb.cnf
template = {{ template_neo_my_cnf }}
context = section parameter_dict my-cnf-parameters
mroonga =
{% set master_list = [] -%}
{% set admin_list = [] -%}
{% for k, v in slapparameter_dict.iteritems() -%}
{% if k.startswith('master-') and v -%}
{% do master_list.append(v) -%}
{% endif -%}
{% if k.startswith('admin-') and v -%}
{% do admin_list.append(v) -%}
{% endif -%}
{% endfor -%}
[publish]
recipe = slapos.cookbook:publish.serialised
# TODO: make port a partition parameter
ip = {{ (ipv4_set | list)[0] }}
{% set admin = slapparameter_dict.get('admin', 2050) -%}
{% set master = slapparameter_dict.get('master', 2051) -%}
{% if master -%}
port-master = {{ master }}
master = ${:ip}:${:port-master}
{% else -%}
master =
{% endif -%}
{% if admin -%}
port-admin = {{ admin }}
admin = ${:ip}:${:port-admin}
{% else -%}
admin =
{% endif -%}
masters = {{ ' '.join(sorted(master_list)) }}
{%- if admin_list %}
admins = {{ ' '.join(sorted(admin_list)) }}
{%- endif %}
[neo-storage]
recipe = slapos.cookbook:neoppod.storage
binary = {{ bin_directory }}/neostorage
wrapper = ${directory:bin}/neostorage
logfile = ${directory:log}/neostorage.log
ip = [${slap-network-information:global-ipv6}]
cluster = ${slap-parameter:cluster}
masters = ${slap-parameter:masters}
ip = ${publish:ip}
cluster = {{ dumps(slapparameter_dict['cluster']) }}
masters = ${publish:masters}
database-adapter = MySQL
database-parameters = ${mariadb-instance:user}:${mariadb-instance:password}@${mariadb-instance:database}${mariadb-instance:socket}
wait-database = 60
engine = {{ slapparameter_dict.get('engine', '') }}
[logrotate-storage]
recipe = slapos.cookbook:logrotate.d
logrotate-entries = ${logrotate:logrotate-entries}
backup = ${logrotate:backup}
{% for i in range(slapparameter_dict.get('storage-count', 1)) -%}
{% set storage_id = 'neo-storage-' ~ i -%}
[{{ section(storage_id) }}]
< = neo-storage
wrapper = ${directory:etc_run}/{{ 'neostorage-' ~ i }}
logfile = ${directory:log}/{{ 'neostorage-' ~ i }}.log
{% do init_list.append('CREATE DATABASE IF NOT EXISTS neo' ~ i ~ ';') -%}
database-parameters = root@neo{{ i }}${my-cnf-parameters:socket}
[{{ section('logrotate-storage-' ~ i) }}]
< = logrotate-storage
name = {{ storage_id }}
log = {{ '${' + storage_id + ':logfile}' }}
post = {{ bin_directory }}/slapos-kill -n neostorage -s RTMIN+1 ${:log}
[neo-storage-run-wrapper]
recipe = slapos.cookbook:symbolic.link
target-directory = ${directory:etc_run}
link-binary = ${neo-storage:wrapper}
{% endfor -%}
[init-script]
recipe = slapos.recipe.template:jinja2
# XXX: is there a better location ?
rendered = ${directory:etc}/mariadb_initial_setup.sql
template = inline:
{{ init_list | join('\n\t') }}
[directory]
recipe = slapos.cookbook:mkdirectory
promises = ${buildout:directory}/etc/promises
bin = ${buildout:directory}/bin
etc = ${buildout:directory}/etc
var = ${buildout:directory}/var
etc_run = ${:etc}/run
var_run = ${:var}/run
srv_mariadb = ${buildout:directory}/srv/mariadb
log = ${buildout:directory}/var/log
[logrotate-mysql]
recipe = slapos.cookbook:logrotate.d
logrotate-entries = ${logrotate:logrotate-entries}
backup = ${logrotate:backup}
name = mariadb
log = ${my-cnf-parameters:error-log} ${my-cnf-parameters:slow-query-log}
post = ${mysqld:mysql-base-directory}/bin/mysql --defaults-file="${my-cnf:rendered}" -e "FLUSH LOGS"
[buildout]
extends =
{{ logrotate_cfg }}
{%- if master %}
{{ master_cfg }}
{%- endif %}
{%- if admin %}
{{ admin_cfg }}
{%- endif %}
parts +=
{{ '\n '.join(part_list) }}
logrotate-mysql
{% set admin_software_type = 'neo-admin' -%}
{% set mysql_storage_software_type = 'neo-storage-mysql' -%}
[buildout]
parts = switch-softwaretype
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
offline = true
extends = {{ instance_common_cfg }}
[slap-connection]
computer-id = ${slap_connection:computer_id}
partition-id = ${slap_connection:partition_id}
server-url = ${slap_connection:server_url}
software-release-url = ${slap_connection:software_release_url}
key-file = ${slap_connection:key_file}
cert-file = ${slap_connection:cert_file}
[slap-configuration]
recipe = slapos.cookbook:slapconfiguration.serialised
computer = ${slap-connection:computer-id}
partition = ${slap-connection:partition-id}
url = ${slap-connection:server-url}
key = ${slap-connection:key-file}
cert = ${slap-connection:cert-file}
[jinja2-template-base]
rendered = ${buildout:parts-directory}/${:_buildout_section_name_}.cfg
[neo-cluster]
recipe = slapos.recipe.template:jinja2
<= jinja2-template-base
template = {{ cluster }}
rendered = ${buildout:parts-directory}/${:_buildout_section_name_}/cluster.cfg
extensions = jinja2.ext.do
context =
key eggs_directory buildout:eggs-directory
key develop_eggs_directory buildout:develop-eggs-directory
key slap_software_type slap-configuration:slap-software-type
key slapparameter_dict slap-configuration:configuration
raw admin_software_type {{ admin_software_type }}
raw mysql_storage_software_type {{ mysql_storage_software_type }}
extra-context =
import urlparse urlparse
import-list =
rawfile root_common {{ root_common }}
[switch-softwaretype]
recipe = slapos.cookbook:softwaretype
default = ${neo-cluster:rendered}
{{ admin_software_type }} = {{ neo_admin }}
{{ mysql_storage_software_type }} = {{ neo_storage_mysql }}
recipe = slapos.cookbook:switch-softwaretype
override = {{ dumps(override_switch_softwaretype |default) }}
default = neo-cluster:rendered
# BBB
RootSoftwareInstance = ${:default}
neo = neo-storage-mysql:rendered
{% macro assert(x) %}{{ ("",)[not x] }}{% endmacro -%}
{% set socket = parameter_dict['socket'] -%}
{% set extra_dict = parameter_dict['extra-dict'] -%}
[mysqld]
skip_networking
socket = {{ socket }}
datadir = {{ parameter_dict['data-directory'] }}
pid_file = {{ parameter_dict['pid-file'] }}
log_error = {{ parameter_dict['error-log'] }}
slow_query_log
slow_query_log_file = {{ parameter_dict['slow-query-log'] }}
init_file = {{ parameter_dict['init-file'] }}
log_warnings = 1
disable-log-bin
### Enables TokuDB
plugin-load = ha_tokudb
## The following settings come from ERP5 configuration.
max_allowed_packet = 128M
query_cache_size = 32M
innodb_locks_unsafe_for_binlog = 1
# Some dangerous settings you may want to uncomment temporarily
# if you only want performance or less disk access.
{% set x = '' if extra_dict.pop('relaxed-writes', False) else '#' -%}
{{x}}innodb_flush_log_at_trx_commit = 0
{{x}}innodb_flush_method = nosync
{{x}}innodb_doublewrite = 0
{{x}}sync_frm = 0
# Extra parameters.
{%- do extra_dict.setdefault('innodb_file_per_table', '0') %}
{%- for k, v in extra_dict.iteritems() %}
{%- do assert('-' not in k) %}
{{ k }} = {{ v }}
{%- endfor %}
# Force utf8 usage
collation_server = utf8_unicode_ci
character_set_server = utf8
skip_character_set_client_handshake
[client]
socket = {{ socket }}
user = root
{% macro section(name) %}{% do part_list.append(name) %}{{ name }}{% endmacro -%}
{% set part_list = [] -%}
{% set sla_dict = {} -%}
{% for sla, ref_list in slapparameter_dict.get('sla-dict', {}).iteritems() -%}
{% do sla_dict.update(dict.fromkeys(ref_list, sla)) -%}
{% endfor -%}
{% macro sla(name, required=False) -%}
{% if required or name in sla_dict -%}
{% for k, (v,) in urlparse.parse_qs(sla_dict.pop(name), strict_parsing=1).iteritems() -%}
sla-{{ k }} = {{ v }}
{% endfor -%}
{% else -%}
sla-computer_guid = ${slap-connection:computer-id}
{% endif -%}
{% endmacro -%}
{% macro common_section() -%}
[request-common-base]
recipe = slapos.cookbook:request.serialised
software-url = ${slap-connection:software-release-url}
server-url = ${slap-connection:server-url}
key-file = ${slap-connection:key-file}
cert-file = ${slap-connection:cert-file}
computer-id = ${slap-connection:computer-id}
partition-id = ${slap-connection:partition-id}
[buildout]
parts =
publish
{{ part_list | join('\n\t') }}
{{- assert(not sla_dict, sla_dict) }}
{% endmacro -%}
{% macro request_neo(parameter_dict, software_type, prefix='node-') -%}
{% set section_id_list = [] -%}
[{{ prefix }}request-common]
<= request-common-base
config-cluster = {{ parameter_dict['cluster'] }}
{% set replicas = parameter_dict.get('replicas', 0) -%}
config-partitions = {{ dumps(parameter_dict.get('partitions', 12)) }}
config-replicas = {{ dumps(replicas) }}
config-upstream-cluster = {{ dumps(parameter_dict.get('upstream-cluster', '')) }}
config-upstream-masters = {{ dumps(parameter_dict.get('upstream-masters', '')) }}
software-type = {{ software_type }}
{% set node_list = parameter_dict.get('node-list', ({},)) -%}
{% set storage_count = [] -%}
{% for node in node_list -%}
{% do storage_count.append(node.get('storage-count', 1)) -%}
{% endfor -%}
config-autostart = {{ dumps(sum(storage_count)) }}
{% do assert(replicas < len(node_list)) -%}
{% for i, node in enumerate(node_list) -%}
{% set section_id = prefix ~ i -%}
{% do section_id_list.append(section_id) -%}
[{{ section_id }}-base]
name = {{ section_id }}
{% for k, v in node.iteritems() -%}
config-{{ k }} = {{ dumps(v) }}
{% endfor -%}
{{ sla(section_id) }}
[{{ section_id }}]
<= {{ prefix }}request-common
{{ section_id }}-base
return =
master
admin
{% endfor -%}
[final-base]
{% for i, section_id in enumerate(section_id_list) -%}
config-master-{{i}} = {{ '${' + section_id + ':connection-master}' }}
config-admin-{{i}} = {{ '${' + section_id + ':connection-admin}' }}
{% endfor -%}
{% for section_id in section_id_list -%}
[{{ section(section_id + '-final') }}]
<= {{ prefix }}request-common
final-base
{{ section_id }}-base
{% if loop.first -%}
return =
masters
admins
{% endif -%}
{% endfor -%}
{% endmacro -%}
# Note on LXML/END LXML: they delimit areas where lxml magic is needed. lxml is
# a slapos.cookbook dependency, so it should be fetched automatically. But when
# automatically fetched, it gets built against system headers/libs, which is
# forbidden in slapos. So we need to fetch lxml explicitly so it is properly
# built.
[buildout]
extends =
../../stack/slapos.cfg
../../component/dcron/buildout.cfg
../../component/gzip/buildout.cfg
../../component/logrotate/buildout.cfg
#LXML
../../component/lxml-python/buildout.cfg
#END LXML
../../component/python-2.7/buildout.cfg
../../component/mariadb/buildout.cfg
../../component/mysql-python/buildout.cfg
parts =
slapos-deps-eggs
slapos-cookbook-develop
slapos-cookbook
# NEO & dependencies
python2.7
mariadb
mysql-python
neoppod
[slapos.cookbook-repository]
branch = erp5-cluster
[slapos-deps-eggs]
recipe = zc.recipe.egg
eggs =
${lxml-python:egg}
slapos.toolbox
scripts =
slapos-kill
[template-logrotate-base]
recipe = slapos.recipe.template:jinja2
template = ${:_profile_base_location_}/${:filename}.in
rendered = ${buildout:directory}/${:filename}
filename = instance-logrotate-base.cfg
md5sum = af19ff0c7817df85987c69738fb083f2
context =
key dcron_location dcron:location
key gzip_location gzip:location
key logrotate_location logrotate:location
[download-base-neo]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/${:_buildout_section_name_}.cfg.in
# XXX: following mode should be the default
mode = 644
# XXX: must be rendered, not just dled
[instance-common]
recipe = slapos.recipe.template:jinja2
template = ${:_profile_base_location_}/${:_buildout_section_name_}.cfg.in
rendered = ${buildout:directory}/${:_buildout_section_name_}.cfg
md5sum = e8f5a83580e9791a32c66f5935cb6840
context =
key bin_directory buildout:bin-directory
key develop_eggs_directory buildout:develop-eggs-directory
key eggs_directory buildout:eggs-directory
key mariadb_location mariadb:location
key neo_admin instance-neo-admin:target
key neo_master instance-neo-master:target
key neo_storage_mysql instance-neo-storage-mysql:target
key template_logrotate_base template-logrotate-base:rendered
key template_neo_my_cnf template-neo-my-cnf:target
[root-common]
<= download-base-neo
md5sum = 26193dbb132d340c8ba919a616449a17
[instance-neo-admin]
<= download-base-neo
md5sum = 16d11f0fe74de06aebbadcff3527db1c
[instance-neo-master]
<= download-base-neo
md5sum = 023f08763dbba2319f58e5c597f7761d
[instance-neo-storage-mysql]
<= download-base-neo
md5sum = 5a61039c7a980e24519e1bbb1252e662
[template-neo-my-cnf]
<= download-base-neo
url = ${:_profile_base_location_}/my.cnf.in
md5sum = febd3ed58043ce1367b86cf6e4e69700
[neoppod]
recipe = zc.recipe.egg
eggs = neoppod[admin, ctl, master, storage-importer, storage-mysqldb]
ZODB3
ZODB3-patches = ${:_profile_base_location_}/../../component/egg-patch/ZODB3-3.10.5.patch
ZODB3-patch-options = -p1
[versions]
# patched eggs
ZODB3 = 3.10.5+SlapOSPatched001
# Note on LXML/END LXML: they delimit areas where lxml magic is needed. lxml is
# a slapos.cookbook dependency, so it should be fetched automatically. But when
# automatically fetched, it gets built against system headers/libs, which is
# forbidden in slapos. So we need to fetch lxml explicitly so it is properly
# built.
[buildout]
extends =
../../stack/slapos.cfg
#LXML
../../component/lxml-python/buildout.cfg
#END LXML
../../component/python-2.7/buildout.cfg
../../component/mariadb/buildout.cfg
../../component/mysql-python/buildout.cfg
software-common.cfg
parts =
slapos-deps-eggs
slapos-cookbook
# NEO & dependencies
python2.7
mariadb
mysql-python
neoppod
parts +=
# NEO instanciation
template-instance
[slapos-deps-eggs]
recipe = zc.recipe.egg
eggs =
#LXML
${lxml-python:egg}
#END LXML
template
[base-template]
[template]
recipe = slapos.recipe.template:jinja2
template = ${:_profile_base_location_}/${:filename}.in
rendered = ${buildout:directory}/${:filename}
extensions = jinja2.ext.do
extra-context =
context =
key eggs_directory buildout:eggs-directory
key develop_eggs_directory buildout:develop-eggs-directory
key bin_directory buildout:bin-directory
${:extra-context}
[template-neo-master]
< = base-template
filename = instance-neo-master.cfg
md5sum = 8947620b15535b95163c11a0efab546e
[template-neo-storage-mysql]
< = base-template
filename = instance-neo-storage-mysql.cfg
md5sum = 518c8c41ce73f0b608fbb1c3b889ca1b
extra-context =
key mariadb_location mariadb:location
key master_cfg template-neo-master:rendered
[template-neo-admin]
< = base-template
filename = instance-neo-admin.cfg
md5sum = 987f69333c563898cab21455509f8024
[template-cluster]
recipe = slapos.recipe.build:download
url = ${:_profile_base_location_}/cluster.cfg.in
md5sum = edd46f8670421520ef20c786cd65d8c1
[template-instance]
< = base-template
template = ${:_profile_base_location_}/instance.cfg.in
md5sum = 777c00a7dbcb145a75f980421a9b20b5
# XXX: "template.cfg" is hardcoded in instanciation recipe
filename = template.cfg
md5sum = 17e761b371487b55b39da1c39ebb1bb6
extra-context =
key neo_master template-neo-master:rendered
key neo_storage_mysql template-neo-storage-mysql:rendered
key neo_admin template-neo-admin:rendered
key cluster template-cluster:target
rendered = ${buildout:directory}/template.cfg
context =
key cluster cluster:target
key instance_common_cfg instance-common:rendered
key root_common root-common:target
[neoppod]
recipe = zc.recipe.egg
eggs = neoppod[admin, master, storage-mysqldb]
[cluster]
<= download-base-neo
md5sum = ee8401a4e7d82bf488a57e3399f9ce48
LoadModule unixd_module modules/mod_unixd.so
LoadModule access_compat_module modules/mod_access_compat.so
LoadModule authz_core_module modules/mod_authz_core.so
LoadModule authz_host_module modules/mod_authz_host.so
LoadModule log_config_module modules/mod_log_config.so
LoadModule setenvif_module modules/mod_setenvif.so
LoadModule version_module modules/mod_version.so
LoadModule proxy_module modules/mod_proxy.so
LoadModule proxy_http_module modules/mod_proxy_http.so
LoadModule socache_shmcb_module modules/mod_socache_shmcb.so
LoadModule ssl_module modules/mod_ssl.so
LoadModule mime_module modules/mod_mime.so
LoadModule dav_module modules/mod_dav.so
LoadModule dav_fs_module modules/mod_dav_fs.so
LoadModule negotiation_module modules/mod_negotiation.so
LoadModule rewrite_module modules/mod_rewrite.so
LoadModule headers_module modules/mod_headers.so
PidFile "{{ parameter_dict['pid-file'] }}"
ServerAdmin admin@
TypesConfig conf/mime.types
AddType application/x-compress .Z
AddType application/x-gzip .gz .tgz
ServerTokens Prod
ServerSignature Off
TraceEnable Off
TimeOut {{ parameter_dict['timeout'] }}
SSLCertificateFile {{ parameter_dict['cert'] }}
SSLCertificateKeyFile {{ parameter_dict['key'] }}
SSLRandomSeed startup builtin
SSLRandomSeed connect builtin
SSLProtocol -ALL +SSLv3 +TLSv1
SSLHonorCipherOrder On
{% if parameter_dict['cipher'] -%}
SSLCipherSuite {{ parameter_dict['cipher'] }}
{% else -%}
SSLCipherSuite RC4-SHA:HIGH:!ADH
{%- endif %}
SSLSessionCache shmcb:{{ parameter_dict['ssl-session-cache'] }}(512000)
SSLProxyEngine On
# As backend is trusting REMOTE_USER header unset it always
RequestHeader unset REMOTE_USER
ErrorLog "{{ parameter_dict['error-log'] }}"
# Default apache log format with request time in microsecond at the end
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %D" combined
CustomLog "{{ parameter_dict['access-log'] }}" combined
<Directory />
Options FollowSymLinks
AllowOverride None
Allow from all
</Directory>
RewriteEngine On
{% for port, _, backend, authentication in parameter_dict['backend-list'] -%}
{% for ip in parameter_dict['ip-list'] -%}
Listen {{ ip }}:{{ port }}
{% endfor -%}
<VirtualHost *:{{ port }}>
{% if authentication -%}
SSLVerifyClient require
RequestHeader set REMOTE_USER %{SSL_CLIENT_S_DN_CN}s
SSLCACertificateFile {{ parameter_dict['ca-cert'] }}
SSLCARevocationPath {{ parameter_dict['crl'] }}
{% endif -%}
SSLEngine on
RewriteRule ^/(.*) {{ backend }}/$1 [L,P]
</VirtualHost>
{% endfor -%}
{% set part_list = [] -%}
{% set ssl_parameter_dict = slapparameter_dict.get('ssl', {}) %}
{% macro section(name) %}{% do part_list.append(name) %}{{ name }}{% endmacro -%}
{% set use_ipv6 = slapparameter_dict.get('use-ipv6', False) -%}
{#
XXX: This template only supports exactly one IPv4 and (if ipv6 is used) one IPv6
per partition. No more (undefined result), no less (IndexError).
-#}
# TODO: insert varnish between apache & haproxy.
# And think of a way to specify which urls goe through varnish, which go
# directly to haproxy. (maybe just passing literal configuration file chunk)
{% set ipv4 = (ipv4_set | list)[0] -%}
{% set apache_ip_list = [ipv4] -%}
{% if ipv6_set -%}
{% set ipv6 = (ipv6_set | list)[0] -%}
{% do apache_ip_list.append('[' ~ ipv6 ~ ']') -%}
{% endif -%}
{% if use_ipv6 -%}
[zope-tunnel-base]
recipe = slapos.cookbook:ipv4toipv6
runner-path = ${directory:services}/${:base-name}
6tunnel-path = {{ parameter_dict['6tunnel'] }}/bin/6tunnel
shell-path = {{ parameter_dict['dash'] }}/bin/dash
ipv4 = {{ ipv4 }}
{% endif -%}
{% set haproxy_dict = {} -%}
{% set apache_dict = {} -%}
{% set next_port = slapparameter_dict['tcpv4-port'] -%}
{% for family_name, parameter_id_list in slapparameter_dict['zope-family-dict'].items() -%}
{% set zope_family_address_list = [] -%}
{% set has_webdav = [] -%}
{% for parameter_id in parameter_id_list -%}
{% set zope_address_list = slapparameter_dict[parameter_id] -%}
{% for zope_address, maxconn, webdav in zope_address_list -%}
{% if webdav -%}
{% do has_webdav.append(None) %}
{% endif -%}
{% if use_ipv6 -%}
[{{ section('zope-tunnel-' ~ next_port) }}]
< = zope-tunnel-base
base-name = {{ 'zeo-tunnel-' ~ next_port }}
ipv4-port = {{ next_port }}
ipv6-port = {{ zope_address.split(']:')[1] }}
ipv6 = {{ zope_address.split(']:')[0][1:] }}
{% set zope_effective_address = ipv4 ~ ":" ~ next_port -%}
{% set next_port = next_port + 1 -%}
{% else -%}
{% set zope_effective_address = zope_address -%}
{% endif -%}
{% do zope_family_address_list.append((zope_effective_address, maxconn, webdav)) -%}
{% endfor -%}
{% endfor -%}
{# Make rendering fail artificially if any family has no known backend.
# This is useful as haproxy's hot-reconfiguration mechanism is
# supervisord-incompatible.
# As jinja2 postpones KeyError until place-holder value is actually used,
# do a no-op getitem.
-#}
{% do zope_family_address_list[0][0] -%}
{% set haproxy_port = next_port -%}
{% set next_port = next_port + 1 -%}
{% do haproxy_dict.__setitem__(family_name, (haproxy_port, zope_family_address_list)) -%}
{% if has_webdav -%}
{% set internal_scheme = 'http' -%}{# mod_rewrite does not recognise webdav scheme -#}
{% set external_scheme = 'webdavs' -%}
{% else %}
{% set internal_scheme = 'http' -%}
{% set external_scheme = 'https' -%}
{% endif -%}
{% set backend_path = slapparameter_dict['backend-path-dict'][family_name] -%}
{% set ssl_authentication = slapparameter_dict['ssl-authentication-dict'][family_name] -%}
{% do apache_dict.__setitem__(family_name, (next_port, external_scheme, internal_scheme ~ '://' ~ ipv4 ~ ':' ~ haproxy_port ~ backend_path, ssl_authentication)) -%}
{% set next_port = next_port + 1 -%}
{% endfor -%}
[apache-certificate-authority]
recipe = slapos.cookbook:certificate_authority
openssl-binary = {{ parameter_dict['openssl'] }}/bin/openssl
ca-dir = ${directory:ca-dir}
requests-directory = ${directory:requests}
wrapper = ${directory:services}/apache-ca
ca-private = ${directory:private}
ca-certs = ${directory:certs}
ca-newcerts = ${directory:newcerts}
ca-crl = ${directory:crl}
country-code = {{ slapparameter_dict['country-code'] }}
email = {{ slapparameter_dict['email'] }}
state = {{ slapparameter_dict['state'] }}
city = {{ slapparameter_dict['city'] }}
company = {{ slapparameter_dict['company'] }}
[haproxy-cfg-parameter-dict]
socket-path = ${directory:run}/haproxy.sock
server-check-path = {{ dumps(slapparameter_dict['haproxy-server-check-path']) }}
backend-dict = {{ dumps(haproxy_dict) }}
ip = {{ ipv4 }}
[haproxy-cfg]
recipe = slapos.recipe.template:jinja2
template = {{ parameter_dict['template-haproxy-cfg'] }}
rendered = ${directory:etc}/haproxy.cfg
context = section parameter_dict haproxy-cfg-parameter-dict
extensions = jinja2.ext.do
[{{ section('haproxy') }}]
recipe = slapos.cookbook:wrapper
wrapper-path = ${directory:services}/haproxy
command-line = "{{ parameter_dict['haproxy'] }}/sbin/haproxy" -f "${haproxy-cfg:rendered}"
{# TODO: build socat and wrap it as "${directory:bin}/haproxy-ctl" to connect to "${haproxy-cfg-parameter-dict:socket-path}" #}
[apache-conf-ssl]
cert = ${directory:apache-conf}/apache.crt
key = ${directory:apache-conf}/apache.pem
ca-cert = ${directory:apache-conf}/ca.crt
crl = ${directory:apache-conf}/crl.pem
[apache-conf-parameter-dict]
backend-list = {{ dumps(apache_dict.values()) }}
ip-list = {{ dumps(apache_ip_list) }}
pid-file = ${directory:run}/apache.pid
error-log = ${directory:log}/apache-error.log
access-log = ${directory:log}/apache-access.log
# Apache 2.4's default value (60 seconds) can be a bit too short
timeout = 300
# Basic SSL server configuration
cert = ${apache-ssl:cert}
key = ${apache-ssl:key}
cipher =
ssl-session-cache = ${directory:log}/apache-ssl-session-cache
# Client x509 auth
{% if ssl_parameter_dict.get('ca-cert') and ssl_parameter_dict.get('ca-crl') -%}
ca-cert = {{ dumps(ssl_parameter_dict.get('ca-cert')) }}
crl = {{ dumps(ssl_parameter_dict.get('ca-crl')) }}
{% else -%}
ca-cert = ${apache-certificate-authority:ca-dir}/cacert.pem
crl = ${apache-certificate-authority:ca-crl}
{% endif -%}
[apache-conf]
recipe = slapos.recipe.template:jinja2
template = {{ parameter_dict['template-apache-conf'] }}
rendered = ${directory:apache-conf}/apache.conf
context = section parameter_dict apache-conf-parameter-dict
[{{ section('apache') }}]
recipe = slapos.cookbook:wrapper
wrapper-path = ${directory:services}/apache
command-line = "{{ parameter_dict['apache'] }}/bin/httpd" -f "${apache-conf:rendered}" -DFOREGROUND
[{{ section('apache-promise') }}]
# Check any apache port in ipv4, expect other ports and ipv6 to behave consistently
recipe = slapos.cookbook:check_port_listening
path = ${directory:promise}/apache
hostname = {{ ipv4 }}
port = {{ apache_dict.values()[0][0] }}
[publish]
recipe = slapos.cookbook:publish.serialised
{% for family_name, (apache_port, scheme, _, _) in apache_dict.items() -%}
{{ family_name ~ '-v6' }} = {% if ipv6_set %}{{ scheme ~ '://[' ~ ipv6 ~ ']:' ~ apache_port }}{% endif %}
{{ family_name }} = {{ scheme ~ '://' ~ ipv4 ~ ':' ~ apache_port }}
{% endfor -%}
[apache-ssl]
recipe = plone.recipe.command
command = "{{ parameter_dict['openssl'] }}/bin/openssl" req -newkey rsa -batch -new -x509 -days 3650 -nodes -keyout "${:key}" -out "${:cert}"
key = ${apache-conf-ssl:key}
cert = ${apache-conf-ssl:cert}
[logrotate-apache]
recipe = slapos.cookbook:logrotate.d
logrotate-entries = ${logrotate:logrotate-entries}
backup = ${logrotate:backup}
name = apache
log = ${apache-conf-parameter-dict:error-log} ${apache-conf-parameter-dict:access-log}
post = {{ parameter_dict['bin-directory'] }}/slapos-kill --pidfile ${apache-conf-parameter-dict:pid-file} -s USR1
[directory]
recipe = slapos.cookbook:mkdirectory
apache-conf = ${:etc}/apache
bin = ${buildout:directory}/bin
etc = ${buildout:directory}/etc
promise = ${directory:etc}/promise
services = ${:etc}/run
var = ${buildout:directory}/var
run = ${:var}/run
log = ${:var}/log
ca-dir = ${buildout:directory}/srv/ssl
requests = ${:ca-dir}/requests
private = ${:ca-dir}/private
certs = ${:ca-dir}/certs
newcerts = ${:ca-dir}/newcerts
crl = ${:ca-dir}/crl
[buildout]
extends = {{ logrotate_cfg }}
parts +=
publish
logrotate-apache
apache-certificate-authority
{{ part_list | join('\n ') }}
[directory]
recipe = slapos.cookbook:mkdirectory
etc = ${buildout:directory}/etc
services = ${:etc}/run
promise = ${:etc}/promise
[erp5-bootstrap]
recipe = slapos.cookbook:erp5.bootstrap
runner-path = ${directory:services}/erp5-bootstrap
{# Note: a random domain name will be picked if several point to the same IP -#}
{% set reverse_hosts = {} -%}
{% for x, y in publish['hosts-dict'].iteritems() -%}
{% do reverse_hosts.__setitem__(y, x) -%}
{% endfor -%}
{# XXX: Expect the first database to be the one to use for catalog. -#}
{% set mysql_parsed = urlparse.urlparse(publish['mariadb-database-list'][0]) -%}
mysql-url = {{ dumps(urlparse.urlunparse(mysql_parsed[:1] + (mysql_parsed.username + ":" + mysql_parsed.password + "@" + reverse_hosts.get(mysql_parsed.hostname, mysql_parsed.hostname) + ':' ~ mysql_parsed.port, ) + mysql_parsed[2:])) }}
{# Pick the first http[s] family found, they should be all equivalent anyway. -#}
{# Don't pick the https[s] configurated with ssl-authenticat=true. By convention, this family name contain 'service'. -#}
{% set family_list = [] -%}
{% for key, value in publish.items() -%}
{% if key.startswith('family-') and not 'service' in key and value.startswith('http') -%}
{% do family_list.append(value.split('://', 1)) -%}
{% endif -%}
{% endfor -%}
zope-url = {{ dumps(family_list[0][0] + '://' + publish['inituser-login'] + ':' + publish['inituser-password'] + '@' + family_list[0][1] + '/' + publish['site-id']) }}
[promise-erp5-site]
recipe = slapos.cookbook:check_url_available
url = ${erp5-bootstrap:zope-url}
path = ${directory:promise}/erp5-site
dash_path = {{ parameter_dict['dash-location'] }}/bin/dash
curl_path = {{ parameter_dict['curl-location'] }}/bin/curl
[buildout]
parts = promise-erp5-site
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
\ No newline at end of file
{% set frontend_dict = slapparameter_dict.get('frontend', {}) %}
{% set has_frontend = frontend_dict.get('software-url', '') != '' -%}
{% set site_id = slapparameter_dict.get('site-id', 'erp5') -%}
{% set inituser_login = slapparameter_dict.get('inituser-login', 'zope') -%}
{% set publish_dict = {'site-id': site_id, 'inituser-login': inituser_login} -%}
[request-common]
recipe = slapos.cookbook:request.serialised
software-url = ${slap-connection:software-release-url}
server-url = ${slap-connection:server-url}
key-file = ${slap-connection:key-file}
cert-file = ${slap-connection:cert-file}
computer-id = ${slap-connection:computer-id}
partition-id = ${slap-connection:partition-id}
config-use-ipv6 = {{ dumps(slapparameter_dict.get('use-ipv6', False)) }}
{% macro request(name, software_type, config_key, config, ret={'url': True}) -%}
{% do config.update(slapparameter_dict.get(config_key, {})) -%}
{% set section = 'request-' ~ name -%}
[{{ section }}]
< = request-common
name = {{ name }}
software-type = {{ software_type }}
return = {{ ret.keys() | join(' ') }}
{% for ret, publish in ret.items() -%}
{% if publish -%}
{% do publish_dict.__setitem__(name ~ '-' ~ ret, '${' ~ section ~ ':connection-' ~ ret ~ '}')%}
{% endif -%}
{% endfor -%}
sla-computer_guid = {{ dumps(slapparameter_dict.get(config_key + '-computer-guid', computer_id)) }}
{% for option, value in config.items() -%}
config-{{ option }} = {{ dumps(value) }}
{% endfor -%}
{% endmacro -%}
{{ request('memcached-persistent', 'kumofs', 'kumofs', {'tcpv4-port': 2000}) }}
{{ request('memcached-volatile', 'kumofs', 'memcached', {'tcpv4-port': 2010, 'ram-storage-size': 64}) }}
{{ request('cloudooo', 'cloudooo', 'cloudooo', {'tcpv4-port': 2020}) }}
{{ request('mariadb', 'mariadb', 'mariadb', {'tcpv4-port': 2099}, {'database-list': True, 'test-database-list': True}) }}
{# Fail early if an unexpected value is provided -#}
{% set zodb_type = slapparameter_dict.get('zodb-software-type') -%}
{% set zodb_extern = slapparameter_dict.get('zodb-extern') -%}
{% if {'zeo': 1, None: 0}[zodb_type] -%}
{{ request('zodb', 'zodb-' ~ zodb_type, 'zodb', {'tcpv4-port': 2100, 'zodb-dict': {'root': {}}}, {'zodb-storage-type': False, 'zodb-dict': False, 'tidstorage-ip': False, 'tidstorage-port': False}) }}
{% else -%}
{% do zodb_extern[0] %}
{% endif -%}
[inituser-password]
{% set inituser_password = slapparameter_dict.get('inituser-password') -%}
{% if inituser_password -%}
passwd = {{ dumps(inituser_password) }}
{% else -%}
recipe = slapos.cookbook:generate.password
{% endif -%}
[deadlock-debugger-password]
{% set deadlock_debugger_password = slapparameter_dict.get('deadlock-debugger-password') -%}
{% if deadlock_debugger_password -%}
passwd = {{ dumps(deadlock_debugger_password) }}
{% else -%}
recipe = slapos.cookbook:generate.password
{% endif -%}
[request-zope-base]
< = request-common
return =
zope-address-list
hosts-dict
config-bt5 = {{ dumps(slapparameter_dict.get('bt5', 'erp5_full_text_myisam_catalog erp5_configurator_standard erp5_configurator_maxma_demo erp5_configurator_ung erp5_configurator_run_my_doc slapos_configurator')) }}
config-bt5-repository-url = {{ dumps(slapparameter_dict.get('bt5-repository-url', local_bt5_repository)) }}
config-cloudooo-url = ${request-cloudooo:connection-url}
config-deadlock-debugger-password = ${deadlock-debugger-password:passwd}
config-developer-list = {{ dumps(slapparameter_dict.get('developer-list', [inituser_login])) }}
config-hosts-dict = {{ dumps(slapparameter_dict.get('hosts-dict', {})) }}
config-inituser-login = {{ dumps(inituser_login) }}
config-inituser-password = ${inituser-password:passwd}
config-kumofs-url = ${request-memcached-persistent:connection-url}
config-memcached-url = ${request-memcached-volatile:connection-url}
config-mysql-test-url-list = ${request-mariadb:connection-test-database-list}
config-mysql-url-list = ${request-mariadb:connection-database-list}
config-site-id = {{ dumps(site_id) }}
config-smtp-url = {{ dumps(slapparameter_dict.get('smtp-url', 'smtp://localhost:25/')) }}
config-timezone = {{ dumps(slapparameter_dict.get('timezone', 'UTC')) }}
{% if zodb_type -%}
config-tidstorage-ip = ${request-zodb:connection-tidstorage-ip}
config-tidstorage-port = ${request-zodb:connection-tidstorage-port}
config-zodb-dict = ${request-zodb:connection-zodb-dict}
config-zodb-storage-type = ${request-zodb:connection-zodb-storage-type}
{% endif -%}
{% if zodb_extern -%}
config-zodb-extern = {{ dumps(zodb_extern) }}
{% endif -%}
software-type = zope
{% set zope_family_dict = {} -%}
{% set zope_backend_path_dict = {} -%}
{% set ssl_authentication_dict = {} -%}
{% for custom_name, zope_parameter_dict in slapparameter_dict.get('zope-partition-dict', {'1': {}}).items() -%}
{% set partition_name = 'zope-' ~ custom_name -%}
{% set section_name = 'request-' ~ partition_name -%}
{% set backend_path = zope_parameter_dict.get('backend-path', '/') % {'site-id': site_id} %}
{% do zope_family_dict.setdefault(zope_parameter_dict.get('family', 'default'), []).append(section_name) -%}
{% do zope_backend_path_dict.setdefault(zope_parameter_dict.get('family', 'default'), backend_path) -%}
{% do ssl_authentication_dict.setdefault(zope_parameter_dict.get('family', 'default'), zope_parameter_dict.get('ssl-authentication', False)) -%}
[{{ section_name }}]
< = request-zope-base
name = {{ partition_name }}
config-name = {{ dumps(custom_name) }}
config-instance-count = {{ dumps(zope_parameter_dict.get('instance-count', 1)) }}
config-thread-amount = {{ dumps(zope_parameter_dict.get('thread-amount', 1)) }}
config-timerserver-interval = {{ dumps(zope_parameter_dict.get('timerserver-interval', 5)) }}
config-longrequest-logger-interval = {{ dumps(zope_parameter_dict.get('longrequest-logger-interval', -1)) }}
config-longrequest-logger-timeout = {{ dumps(zope_parameter_dict.get('longrequest-logger-timeout', 1)) }}
config-port-base = {{ dumps(zope_parameter_dict.get('port-base', 2200)) }}
config-webdav = {{ dumps(zope_parameter_dict.get('webdav', False)) }}
sla-computer_guid = {{ dumps(zope_parameter_dict.get('computer-guid', computer_id)) }}
{% endfor -%}
{# We need to concatenate lists that we cannot read as lists, so this gets hairy. -#}
{% set zope_address_list_id_dict = {} -%}
{% set zope_family_parameter_dict = {} -%}
{% for family_name, zope_section_id_list in zope_family_dict.items() -%}
{% for zope_section_id in zope_section_id_list -%}
{% set parameter_name = 'zope-family-entry-' ~ zope_section_id -%}
{% do zope_address_list_id_dict.__setitem__(zope_section_id, parameter_name) -%}
{% do zope_family_parameter_dict.setdefault(family_name, []).append(parameter_name) -%}
{% endfor -%}
{% if has_frontend -%}
{% set frontend_name = 'frontend-' ~ family_name -%}
{% set publishable = frontend_name ~ ':connection-site_url' -%}
[{{ frontend_name }}]
< = request-frontend-base
name = {{ frontend_name }}
config-url = ${request-balancer:{{ family_name }}-v6}
{% else -%}
{% set publishable = 'request-balancer:connection-' ~ family_name -%}
{% endif -%}
{% do publish_dict.__setitem__('family-' ~ family_name, '${' ~ publishable ~ '}' ) -%}
{% endfor -%}
{% set balancer_dict = slapparameter_dict.get('balancer', {}) -%}
[request-balancer]
< = request-common
name = balancer
software-type = balancer
sla-computer_guid = {{ dumps(slapparameter_dict.get('balancer-computer-guid', computer_id)) }}
return =
{%- for family in zope_family_dict %}
{{ family }}
{{ family }}-v6
{% endfor -%}
config-zope-family-dict = {{ dumps(zope_family_parameter_dict) }}
config-backend-path-dict = {{ dumps(zope_backend_path_dict) }}
config-ssl-authentication-dict = {{ dumps(ssl_authentication_dict) }}
config-tcpv4-port = {{ dumps(balancer_dict.get('tcpv4-port', 2150)) }}
{% for zope_section_id, name in zope_address_list_id_dict.items() -%}
config-{{ name }} = {{ ' ${' ~ zope_section_id ~ ':connection-zope-address-list}' }}
{% endfor -%}
# XXX: should those really be same for all families ?
config-haproxy-server-check-path = {{ dumps(balancer_dict.get('haproxy-server-check-path', '/') % {'site-id': site_id}) }}
config-ssl = {{ dumps(balancer_dict.get('ssl', {})) }}
config-country-code = {{ slapparameter_dict.get('country-code', 'ZZ') }}
config-email = {{ slapparameter_dict.get('email', 'nobody@example.com') }}
config-state = {{ slapparameter_dict.get('state', "('State',)") }}
config-city = {{ slapparameter_dict.get('city', 'City') }}
config-company = {{ slapparameter_dict.get('company', 'Compagny') }}
[request-frontend-base]
{% if has_frontend -%}
< = request-common
software-url = {{ dumps(frontend_dict['software-url']) }}
software-type = {{ dumps(frontend_dict.get('software-type', 'RootSoftwareInstance')) }}
sla-instance_guid = {{ dumps(frontend_dict['instance-guid']) }}
slave = true
{% set config_dict = {
'type': 'zope',
} -%}
{% if frontend_dict.get('domain') -%}
{% do config_dict.__setitem__('custom_domain', frontend_dict['domain']) -%}
{% endif -%}
{% for name, value in config_dict.items() -%}
config-{{ name }} = {{ value }}
{% endfor -%}
return = site_url
{% endif -%}
[publish]
recipe = slapos.cookbook:publish.serialised
deadlock-debugger-password = ${deadlock-debugger-password:passwd}
inituser-password = ${inituser-password:passwd}
{#
Pick any published hosts-dict, they are expected to be identical - and there is
no way to check here.
-#}
hosts-dict = {{ '${' ~ zope_address_list_id_dict.keys()[0] ~ ':connection-hosts-dict}' }}
{% for name, value in publish_dict.items() -%}
{{ name }} = {{ value }}
{% endfor -%}
[buildout]
parts = publish
[buildout]
extends =
../../stack/erp5/buildout.cfg
parts +=
vifib-fix-products-paths
[local-bt5-repository]
# Same as bt5-repository, but only local repository.
# Used to generate bt5lists.
list = ${erp5:location}/bt5 ${erp5:location}/product/ERP5/bootstrap ${vifib:location}/master/bt5
[genbt5list]
recipe = plone.recipe.command
stop-on-error = true
genbt5list = ${erp5:location}/product/ERP5/bin/genbt5list
command =
${buildout:executable} ${:genbt5list} ${local-bt5-repository:list}
update-command = ${:command}
[erp5_repository_list]
repository_id_list = erp5 vifib/master
[erp5]
recipe = slapos.recipe.build:gitclone
repository = http://git.erp5.org/repos/erp5.git
branch = interaction-drop
git-executable = ${git:location}/bin/git
[vifib]
recipe = slapos.recipe.build:gitclone
repository = http://git.erp5.org/repos/slapos.core.git
branch = master
git-executable = ${git:location}/bin/git
[vifib-fix-products-paths]
recipe = plone.recipe.command
stop-on-error = true
command =
for DIR in "${vifib:location}/master"; do cd "$DIR"; rm -f Products ; ln -s product Products; touch product/__init__.py; done
update-command = ${:command}
[eggs]
eggs +=
suds
facebook-sdk
google-api-python-client
spyne
slapos.core
dummy +=
${vifib:location}
extra-paths +=
${vifib:location}/master
[template-erp5]
md5sum = 6ada1fd4af0a451516443bfb6d00b717
[template-balancer]
md5sum = 818ab59ae966114735866aecef7a8563
[template-apache-conf]
md5sum = bb329fc28bef095a01efc901d2f84149
[template-create-erp5-site-real]
md5sum = 61824aab2172d21f1d6403a35cab47cd
[versions]
python-memcached = 1.47
facebook-sdk = 0.4.0
google-api-python-client = 1.2
[networkcache]
# signature certificates of the following uploaders.
# Romain Courteaud
# Sebastien Robin
# Kazuhiko Shiozaki
# Cedric de Saint Martin
# Yingjie Xu
# Gabriel Monnerat
# Łukasz Nowak
# Test Agent (Automatic update from tests)
# Aurélien Calonne
signature-certificate-list =
-----BEGIN CERTIFICATE-----
MIIB4DCCAUkCADANBgkqhkiG9w0BAQsFADA5MQswCQYDVQQGEwJGUjEZMBcGA1UE
CBMQRGVmYXVsdCBQcm92aW5jZTEPMA0GA1UEChMGTmV4ZWRpMB4XDTExMDkxNTA5
MDAwMloXDTEyMDkxNTA5MDAwMlowOTELMAkGA1UEBhMCRlIxGTAXBgNVBAgTEERl
ZmF1bHQgUHJvdmluY2UxDzANBgNVBAoTBk5leGVkaTCBnzANBgkqhkiG9w0BAQEF
AAOBjQAwgYkCgYEApYZv6OstoqNzxG1KI6iE5U4Ts2Xx9lgLeUGAMyfJLyMmRLhw
boKOyJ9Xke4dncoBAyNPokUR6iWOcnPHtMvNOsBFZ2f7VA28em3+E1JRYdeNUEtX
Z0s3HjcouaNAnPfjFTXHYj4um1wOw2cURSPuU5dpzKBbV+/QCb5DLheynisCAwEA
ATANBgkqhkiG9w0BAQsFAAOBgQBCZLbTVdrw3RZlVVMFezSHrhBYKAukTwZrNmJX
mHqi2tN8tNo6FX+wmxUUAf3e8R2Ymbdbn2bfbPpcKQ2fG7PuKGvhwMG3BlF9paEC
q7jdfWO18Zp/BG7tagz0jmmC4y/8akzHsVlruo2+2du2freE8dK746uoMlXlP93g
QUUGLQ==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB8jCCAVugAwIBAgIJAPu2zchZ2BxoMA0GCSqGSIb3DQEBBQUAMBIxEDAOBgNV
BAMMB3RzeGRldjMwHhcNMTExMDE0MTIxNjIzWhcNMTIxMDEzMTIxNjIzWjASMRAw
DgYDVQQDDAd0c3hkZXYzMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCrPbh+
YGmo6mWmhVb1vTqX0BbeU0jCTB8TK3i6ep3tzSw2rkUGSx3niXn9LNTFNcIn3MZN
XHqbb4AS2Zxyk/2tr3939qqOrS4YRCtXBwTCuFY6r+a7pZsjiTNddPsEhuj4lEnR
L8Ax5mmzoi9nE+hiPSwqjRwWRU1+182rzXmN4QIDAQABo1AwTjAdBgNVHQ4EFgQU
/4XXREzqBbBNJvX5gU8tLWxZaeQwHwYDVR0jBBgwFoAU/4XXREzqBbBNJvX5gU8t
LWxZaeQwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOBgQA07q/rKoE7fAda
FED57/SR00OvY9wLlFEF2QJ5OLu+O33YUXDDbGpfUSF9R8l0g9dix1JbWK9nQ6Yd
R/KCo6D0sw0ZgeQv1aUXbl/xJ9k4jlTxmWbPeiiPZEqU1W9wN5lkGuLxV4CEGTKU
hJA/yXa1wbwIPGvX3tVKdOEWPRXZLg==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB7jCCAVegAwIBAgIJAJWA0jQ4o9DGMA0GCSqGSIb3DQEBBQUAMA8xDTALBgNV
BAMMBHg2MXMwIBcNMTExMTI0MTAyNDQzWhgPMjExMTEwMzExMDI0NDNaMA8xDTAL
BgNVBAMMBHg2MXMwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANdJNiFsRlkH
vq2kHP2zdxEyzPAWZH3CQ3Myb3F8hERXTIFSUqntPXDKXDb7Y/laqjMXdj+vptKk
3Q36J+8VnJbSwjGwmEG6tym9qMSGIPPNw1JXY1R29eF3o4aj21o7DHAkhuNc5Tso
67fUSKgvyVnyH4G6ShQUAtghPaAwS0KvAgMBAAGjUDBOMB0GA1UdDgQWBBSjxFUE
RfnTvABRLAa34Ytkhz5vPzAfBgNVHSMEGDAWgBSjxFUERfnTvABRLAa34Ytkhz5v
PzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAFLDS7zNhlrQYSQO5KIj
z2RJe3fj4rLPklo3TmP5KLvendG+LErE2cbKPqnhQ2oVoj6u9tWVwo/g03PMrrnL
KrDm39slYD/1KoE5kB4l/p6KVOdeJ4I6xcgu9rnkqqHzDwI4v7e8/D3WZbpiFUsY
vaZhjNYKWQf79l6zXfOvphzJ
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB9jCCAV+gAwIBAgIJAO4V/jiMoICoMA0GCSqGSIb3DQEBBQUAMBMxETAPBgNV
BAMMCENPTVAtMjMyMCAXDTEyMDIxNjExMTAyM1oYDzIxMTIwMTIzMTExMDIzWjAT
MREwDwYDVQQDDAhDT01QLTIzMjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
wi/3Z8W9pUiegUXIk/AiFDQ0UJ4JFAwjqr+HSRUirlUsHHT+8DzH/hfcTDX1I5BB
D1ADk+ydXjMm3OZrQcXjn29OUfM5C+g+oqeMnYQImN0DDQIOcUyr7AJc4xhvuXQ1
P2pJ5NOd3tbd0kexETa1LVhR6EgBC25LyRBRae76qosCAwEAAaNQME4wHQYDVR0O
BBYEFMDmW9aFy1sKTfCpcRkYnP6zUd1cMB8GA1UdIwQYMBaAFMDmW9aFy1sKTfCp
cRkYnP6zUd1cMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEAskbFizHr
b6d3iIyN+wffxz/V9epbKIZVEGJd/6LrTdLiUfJPec7FaxVCWNyKBlCpINBM7cEV
Gn9t8mdVQflNqOlAMkOlUv1ZugCt9rXYQOV7rrEYJBWirn43BOMn9Flp2nibblby
If1a2ZoqHRxoNo2yTmm7TSYRORWVS+vvfjY=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB9jCCAV+gAwIBAgIJAIlBksrZVkK8MA0GCSqGSIb3DQEBBQUAMBMxETAPBgNV
BAMMCENPTVAtMzU3MCAXDTEyMDEyNjEwNTUyOFoYDzIxMTIwMTAyMTA1NTI4WjAT
MREwDwYDVQQDDAhDT01QLTM1NzCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
ts+iGUwi44vtIfwXR8DCnLtHV4ydl0YTK2joJflj0/Ws7mz5BYkxIU4fea/6+VF3
i11nwBgYgxQyjNztgc9u9O71k1W5tU95yO7U7bFdYd5uxYA9/22fjObaTQoC4Nc9
mTu6r/VHyJ1yRsunBZXvnk/XaKp7gGE9vNEyJvPn2bkCAwEAAaNQME4wHQYDVR0O
BBYEFKuGIYu8+6aEkTVg62BRYaD11PILMB8GA1UdIwQYMBaAFKuGIYu8+6aEkTVg
62BRYaD11PILMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEAMoTRpBxK
YLEZJbofF7gSrRIcrlUJYXfTfw1QUBOKkGFFDsiJpEg4y5pUk1s5Jq9K3SDzNq/W
it1oYjOhuGg3al8OOeKFrU6nvNTF1BAvJCl0tr3POai5yXyN5jlK/zPfypmQYxE+
TaqQSGBJPVXYt6lrq/PRD9ciZgKLOwEqK8w=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB9jCCAV+gAwIBAgIJAPHoWu90gbsgMA0GCSqGSIb3DQEBBQUAMBQxEjAQBgNV
BAMMCXZpZmlibm9kZTAeFw0xMjAzMTkyMzIwNTVaFw0xMzAzMTkyMzIwNTVaMBQx
EjAQBgNVBAMMCXZpZmlibm9kZTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
ozBijpO8PS5RTeKTzA90vi9ezvv4vVjNaguqT4UwP9+O1+i6yq1Y2W5zZxw/Klbn
oudyNzie3/wqs9VfPmcyU9ajFzBv/Tobm3obmOqBN0GSYs5fyGw+O9G3//6ZEhf0
NinwdKmrRX+d0P5bHewadZWIvlmOupcnVJmkks852BECAwEAAaNQME4wHQYDVR0O
BBYEFF9EtgfZZs8L2ZxBJxSiY6eTsTEwMB8GA1UdIwQYMBaAFF9EtgfZZs8L2ZxB
JxSiY6eTsTEwMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEAc43YTfc6
baSemaMAc/jz8LNLhRE5dLfLOcRSoHda8y0lOrfe4lHT6yP5l8uyWAzLW+g6s3DA
Yme/bhX0g51BmI6gjKJo5DoPtiXk/Y9lxwD3p7PWi+RhN+AZQ5rpo8UfwnnN059n
yDuimQfvJjBFMVrdn9iP6SfMjxKaGk6gVmI=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB9jCCAV+gAwIBAgIJAMNZBmoIOXPBMA0GCSqGSIb3DQEBBQUAMBMxETAPBgNV
BAMMCENPTVAtMTMyMCAXDTEyMDUwMjEyMDQyNloYDzIxMTIwNDA4MTIwNDI2WjAT
MREwDwYDVQQDDAhDT01QLTEzMjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
6peZQt1sAmMAmSG9BVxxcXm8x15kE9iAplmANYNQ7z2YO57c10jDtlYlwVfi/rct
xNUOKQtc8UQtV/fJWP0QT0GITdRz5X/TkWiojiFgkopza9/b1hXs5rltYByUGLhg
7JZ9dZGBihzPfn6U8ESAKiJzQP8Hyz/o81FPfuHCftsCAwEAAaNQME4wHQYDVR0O
BBYEFNuxsc77Z6/JSKPoyloHNm9zF9yqMB8GA1UdIwQYMBaAFNuxsc77Z6/JSKPo
yloHNm9zF9yqMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEAl4hBaJy1
cgiNV2+Z5oNTrHgmzWvSY4duECOTBxeuIOnhql3vLlaQmo0p8Z4c13kTZq2s3nhd
Loe5mIHsjRVKvzB6SvIaFUYq/EzmHnqNdpIGkT/Mj7r/iUs61btTcGUCLsUiUeci
Vd0Ozh79JSRpkrdI8R/NRQ2XPHAo+29TT70=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB9jCCAV+gAwIBAgIJAKRvzcy7OH0UMA0GCSqGSIb3DQEBBQUAMBMxETAPBgNV
BAMMCENPTVAtNzcyMCAXDTEyMDgxMDE1NDI1MVoYDzIxMTIwNzE3MTU0MjUxWjAT
MREwDwYDVQQDDAhDT01QLTc3MjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
o7aipd6MbnuGDeR1UJUjuMLQUariAyQ2l2ZDS6TfOwjHiPw/mhzkielgk73kqN7A
sUREx41eTcYCXzTq3WP3xCLE4LxLg1eIhd4nwNHj8H18xR9aP0AGjo4UFl5BOMa1
mwoyBt3VtfGtUmb8whpeJgHhqrPPxLoON+i6fIbXDaUCAwEAAaNQME4wHQYDVR0O
BBYEFEfjy3OopT2lOksKmKBNHTJE2hFlMB8GA1UdIwQYMBaAFEfjy3OopT2lOksK
mKBNHTJE2hFlMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEAaNRx6YN2
M/p3R8/xS6zvH1EqJ3FFD7XeAQ52WuQnKSREzuw0dsw12ClxjcHiQEFioyTiTtjs
5pW18Ry5Ie7iFK4cQMerZwWPxBodEbAteYlRsI6kePV7Gf735Y1RpuN8qZ2sYL6e
x2IMeSwJ82BpdEI5niXxB+iT0HxhmR+XaMI=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB+DCCAWGgAwIBAgIJAKGd0vpks6T/MA0GCSqGSIb3DQEBBQUAMBQxEjAQBgNV
BAMMCUNPTVAtMTU4NDAgFw0xMzA2MjAxMjE5MjBaGA8yMTEzMDUyNzEyMTkyMFow
FDESMBAGA1UEAwwJQ09NUC0xNTg0MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB
gQDZTH9etPUC+wMZQ3UIiOwyyCfHsJ+7duCFYjuo1uZrhtDt/fp8qb8qK9ob+df3
EEYgA0IgI2j/9jNUEnKbc5+OrfKznzXjrlrH7zU8lKBVNCLzQuqBKRNajZ+UvO8R
nlqK2jZCXP/p3HXDYUTEwIR5W3tVCEn/Vda4upTLcPVE5wIDAQABo1AwTjAdBgNV
HQ4EFgQU7KXaNDheQWoy5uOU01tn1M5vNkEwHwYDVR0jBBgwFoAU7KXaNDheQWoy
5uOU01tn1M5vNkEwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOBgQASmqCU
Znbvu6izdicvjuE3aKnBa7G++Fdp2bdne5VCwVbVLYCQWatB+n4crKqGdnVply/u
+uZ16u1DbO9rYoKgWqjLk1GfiLw5v86pd5+wZd5I9QJ0/Sbz2vZk5S4ciMIGwArc
m711+GzlW5xe6GyH9SZaGOPAdUbI6JTDwLzEgA==
-----END CERTIFICATE-----
......@@ -7,7 +7,11 @@ parts -= wendelin.core
parts += wendelin.core-dev
# also tell erp5 to use -dev eggs instead of released ones
[eggs]
# tell erp5 to use -dev eggs instead of released ones
eggs -= ${wendelin.core:egg}
eggs += ${wendelin.core-dev:egg}
# kill -USR1 <runzope-pid> -> traceback of all threads -> stdout
# ( logged to slappartX/.slappartX_zope-Y.log )
Products.signalstack
(in no special order)
General:
- ipv6 support (besides frontend-backend apache connection)
requires important changes at ERP5 level
- resilience
- make mariadb user accounts accept connections only from relevant IPs
or make x509 mandatory (needs ZMySQLD*A support)
- postfix
Monitoring:
- daily slow-query digest
make percona toolkit available in mysql instance and decide how to send digest
- daily apachedex
Backups:
- flush binlogs independently from full backups (in addition to anyway flushing them on full backup creation)
- rotate tidstorage consistency points
- make mysql backup path an instance parameter
- make srv/backup/zodb the default value for a parameter (zodb{ 'backup_root': ...} or so) to have a single value to modify to relocate zodb backups of a partition
- make srv/backup/logrotate customisable (per partition, otherwise files will overwrite each other)
LoadModule unixd_module modules/mod_unixd.so
LoadModule access_compat_module modules/mod_access_compat.so
LoadModule authz_core_module modules/mod_authz_core.so
LoadModule authz_host_module modules/mod_authz_host.so
LoadModule log_config_module modules/mod_log_config.so
LoadModule setenvif_module modules/mod_setenvif.so
LoadModule version_module modules/mod_version.so
LoadModule proxy_module modules/mod_proxy.so
LoadModule proxy_http_module modules/mod_proxy_http.so
LoadModule socache_shmcb_module modules/mod_socache_shmcb.so
LoadModule ssl_module modules/mod_ssl.so
LoadModule mime_module modules/mod_mime.so
LoadModule dav_module modules/mod_dav.so
LoadModule dav_fs_module modules/mod_dav_fs.so
LoadModule negotiation_module modules/mod_negotiation.so
LoadModule rewrite_module modules/mod_rewrite.so
LoadModule headers_module modules/mod_headers.so
LoadModule deflate_module modules/mod_deflate.so
LoadModule filter_module modules/mod_filter.so
AddOutputFilterByType DEFLATE text/cache-manifest text/html text/plain text/css application/hal+json application/json application/x-javascript text/xml application/xml application/rss+xml text/javascript image/svg+xml
PidFile "{{ parameter_dict['pid-file'] }}"
ServerAdmin admin@
TypesConfig conf/mime.types
AddType application/x-compress .Z
AddType application/x-gzip .gz .tgz
ServerTokens Prod
ServerSignature Off
TraceEnable Off
TimeOut {{ parameter_dict['timeout'] }}
SSLCertificateFile {{ parameter_dict['cert'] }}
SSLCertificateKeyFile {{ parameter_dict['key'] }}
SSLRandomSeed startup builtin
SSLRandomSeed connect builtin
SSLProtocol All -SSLv2
#SSLHonorCipherOrder on
{% if parameter_dict['cipher'] -%}
SSLCipherSuite {{ parameter_dict['cipher'] }}
{%- endif %}
SSLSessionCache shmcb:{{ parameter_dict['ssl-session-cache'] }}(512000)
SSLProxyEngine On
# As backend is trusting REMOTE_USER header unset it always
RequestHeader unset REMOTE_USER
{% if parameter_dict['ca-cert'] -%}
SSLVerifyClient require
RequestHeader set REMOTE_USER %{SSL_CLIENT_S_DN_CN}s
SSLCACertificateFile {{ parameter_dict['ca-cert'] }}
SSLCARevocationCheck chain
SSLCARevocationFile {{ parameter_dict['crl'] }}
{%- endif %}
ErrorLog "{{ parameter_dict['error-log'] }}"
# Default apache log format with request time in microsecond at the end
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %D" combined
CustomLog "{{ parameter_dict['access-log'] }}" combined
<Directory />
Options FollowSymLinks
AllowOverride None
Allow from all
</Directory>
RewriteEngine On
{% for port, _, backend in parameter_dict['backend-list'] -%}
{% for ip in parameter_dict['ip-list'] -%}
Listen {{ ip }}:{{ port }}
{% endfor -%}
<VirtualHost *:{{ port }}>
SSLEngine on
RewriteRule ^/(.*) {{ backend }}/$1 [L,P]
</VirtualHost>
{% endfor -%}
This diff is collapsed.
{% set server_check_path = parameter_dict['server-check-path'] -%}
global
maxconn 4096
stats socket {{ parameter_dict['socket-path'] }} level admin
defaults
log global
mode http
option httplog
option dontlognull
retries 1
option redispatch
maxconn 2000
cookie SERVERID rewrite
http-send-name-header X-Balancer-Current-Server
balance roundrobin
stats uri /haproxy
stats realm Global\ statistics
# it is useless to have timeout much bigger than the one of apache.
# By default apache use 300s, so we set slightly more in order to
# make sure that apache will first stop the connection.
timeout server 305s
# Stop waiting in queue for a zope to become available.
# If no zope can be reached after one minute, consider the request will
# never succeed.
timeout queue 60s
# The connection should be immediate on LAN,
# so we should not set more than 5 seconds, and it could be already too much
timeout connect 5s
# As requested in haproxy doc, make this "at least equal to timeout server".
timeout client 305s
# Use "option forceclose" to not preserve client & server persistent connections
# while handling every incoming request individually, dispatching them one after
# another to servers, in HTTP close mode. This is really needed when haproxy
# is configured with maxconn to 1, without this option browsers are unable
# to render a page
option forceclose
{% for name, (port, backend_list) in parameter_dict['backend-dict'].items() -%}
listen {{ name }} {{ parameter_dict['ip'] }}:{{ port }}
http-request set-header X-Balancer-Current-Cookie SERVERID
{% set has_webdav = [] -%}
{% for address, connection_count, webdav in backend_list -%}
{% if webdav %}{% do has_webdav.append(None) %}{% endif -%}
{% set server_name = name ~ '-' ~ loop.index0 -%}
server {{ server_name }} {{ address }} cookie {{ server_name }} check inter 3s rise 1 fall 2 maxqueue 5 maxconn {{ connection_count }}
{% endfor -%}
{%- if not has_webdav and server_check_path %}
option httpchk GET {{ server_check_path }}
{% endif -%}
{% endfor %}
{% set part_list = [] -%}
{% set ssl_parameter_dict = slapparameter_dict.get('ssl', {}) %}
{% macro section(name) %}{% do part_list.append(name) %}{{ name }}{% endmacro -%}
{% set use_ipv6 = slapparameter_dict.get('use-ipv6', False) -%}
{#
XXX: This template only supports exactly one IPv4 and (if ipv6 is used) one IPv6
per partition. No more (undefined result), no less (IndexError).
-#}
# TODO: insert varnish between apache & haproxy.
# And think of a way to specify which urls goe through varnish, which go
# directly to haproxy. (maybe just passing literal configuration file chunk)
{% set ipv4 = (ipv4_set | list)[0] -%}
{% set apache_ip_list = [ipv4] -%}
{% if ipv6_set -%}
{% set ipv6 = (ipv6_set | list)[0] -%}
{% do apache_ip_list.append('[' ~ ipv6 ~ ']') -%}
{% endif -%}
[simplefile]
recipe = slapos.recipe.template:jinja2
template = inline:{{ '{{ content }}' }}
{% macro simplefile(section_name, file_path, content, mode='') -%}
{% set content_section_name = section_name ~ '-content' -%}
[{{ content_section_name }}]
content = {{ dumps(content) }}
[{{ section(section_name) }}]
< = simplefile
rendered = {{ file_path }}
context = key content {{content_section_name}}:content
mode = {{ mode }}
{%- endmacro %}
{% if use_ipv6 -%}
[zope-tunnel-base]
recipe = slapos.cookbook:ipv4toipv6
runner-path = ${directory:services}/${:base-name}
6tunnel-path = {{ parameter_dict['6tunnel'] }}/bin/6tunnel
shell-path = {{ parameter_dict['dash'] }}/bin/dash
ipv4 = {{ ipv4 }}
{% endif -%}
{% set haproxy_dict = {} -%}
{% set apache_dict = {} -%}
{% set next_port = slapparameter_dict['tcpv4-port'] -%}
{% for family_name, parameter_id_list in slapparameter_dict['zope-family-dict'].items() -%}
{% set zope_family_address_list = [] -%}
{% set has_webdav = [] -%}
{% for parameter_id in parameter_id_list -%}
{% set zope_address_list = slapparameter_dict[parameter_id] -%}
{% for zope_address, maxconn, webdav in zope_address_list -%}
{% if webdav -%}
{% do has_webdav.append(None) %}
{% endif -%}
{% if use_ipv6 -%}
[{{ section('zope-tunnel-' ~ next_port) }}]
< = zope-tunnel-base
base-name = {{ 'zeo-tunnel-' ~ next_port }}
ipv4-port = {{ next_port }}
ipv6-port = {{ zope_address.split(']:')[1] }}
ipv6 = {{ zope_address.split(']:')[0][1:] }}
{% set zope_effective_address = ipv4 ~ ":" ~ next_port -%}
{% set next_port = next_port + 1 -%}
{% else -%}
{% set zope_effective_address = zope_address -%}
{% endif -%}
{% do zope_family_address_list.append((zope_effective_address, maxconn, webdav)) -%}
{% endfor -%}
{% endfor -%}
{# Make rendering fail artificially if any family has no known backend.
# This is useful as haproxy's hot-reconfiguration mechanism is
# supervisord-incompatible.
# As jinja2 postpones KeyError until place-holder value is actually used,
# do a no-op getitem.
-#}
{% do zope_family_address_list[0][0] -%}
{% set haproxy_port = next_port -%}
{% set next_port = next_port + 1 -%}
{% do haproxy_dict.__setitem__(family_name, (haproxy_port, zope_family_address_list)) -%}
{% if has_webdav -%}
{% set internal_scheme = 'http' -%}{# mod_rewrite does not recognise webdav scheme -#}
{% set external_scheme = 'webdavs' -%}
{% else %}
{% set internal_scheme = 'http' -%}
{% set external_scheme = 'https' -%}
{% endif -%}
{% do apache_dict.__setitem__(family_name, (next_port, external_scheme, internal_scheme ~ '://' ~ ipv4 ~ ':' ~ haproxy_port ~ slapparameter_dict['backend-path'])) -%}
{% set next_port = next_port + 1 -%}
{% endfor -%}
[haproxy-cfg-parameter-dict]
socket-path = ${directory:run}/haproxy.sock
server-check-path = {{ dumps(slapparameter_dict['haproxy-server-check-path']) }}
backend-dict = {{ dumps(haproxy_dict) }}
ip = {{ ipv4 }}
[haproxy-cfg]
recipe = slapos.recipe.template:jinja2
template = {{ parameter_dict['template-haproxy-cfg'] }}
rendered = ${directory:etc}/haproxy.cfg
context = section parameter_dict haproxy-cfg-parameter-dict
extensions = jinja2.ext.do
[{{ section('haproxy') }}]
recipe = slapos.cookbook:wrapper
wrapper-path = ${directory:services}/haproxy
command-line = "{{ parameter_dict['haproxy'] }}/sbin/haproxy" -f "${haproxy-cfg:rendered}"
{# TODO: build socat and wrap it as "${directory:bin}/haproxy-ctl" to connect to "${haproxy-cfg-parameter-dict:socket-path}" #}
[apache-conf-ssl]
cert = ${directory:apache-conf}/apache.crt
key = ${directory:apache-conf}/apache.pem
ca-cert = ${directory:apache-conf}/ca.crt
crl = ${directory:apache-conf}/crl.pem
[apache-conf-parameter-dict]
backend-list = {{ dumps(apache_dict.values()) }}
ip-list = {{ dumps(apache_ip_list) }}
pid-file = ${directory:run}/apache.pid
error-log = ${directory:log}/apache-error.log
access-log = ${directory:log}/apache-access.log
# Apache 2.4's default value (60 seconds) can be a bit too short
timeout = 300
# Basic SSL server configuration
cert = ${apache-ssl:cert}
key = ${apache-ssl:key}
cipher =
ssl-session-cache = ${directory:log}/apache-ssl-session-cache
# Client x509 auth
ca-cert = ${apache-ssl-client:cert}
crl = ${apache-ssl-client:crl}
[apache-conf]
recipe = slapos.recipe.template:jinja2
template = {{ parameter_dict['template-apache-conf'] }}
rendered = ${directory:apache-conf}/apache.conf
context = section parameter_dict apache-conf-parameter-dict
[{{ section('apache') }}]
recipe = slapos.cookbook:wrapper
wrapper-path = ${directory:services}/apache
command-line = "{{ parameter_dict['apache'] }}/bin/httpd" -f "${apache-conf:rendered}" -DFOREGROUND
[{{ section('apache-promise') }}]
# Check any apache port in ipv4, expect other ports and ipv6 to behave consistently
recipe = slapos.cookbook:check_port_listening
path = ${directory:promise}/apache
hostname = {{ ipv4 }}
port = {{ apache_dict.values()[0][0] }}
[publish]
recipe = slapos.cookbook:publish.serialised
{% for family_name, (apache_port, scheme, _) in apache_dict.items() -%}
{{ family_name ~ '-v6' }} = {% if ipv6_set %}{{ scheme ~ '://[' ~ ipv6 ~ ']:' ~ apache_port }}{% endif %}
{{ family_name }} = {{ scheme ~ '://' ~ ipv4 ~ ':' ~ apache_port }}
{% endfor -%}
[apache-ssl]
{% if ssl_parameter_dict.get('key') -%}
key = ${apache-ssl-key:rendered}
cert = ${apache-ssl-cert:rendered}
{{ simplefile('apache-ssl-key', '${apache-conf-ssl:key}', ssl_parameter_dict['key']) }}
{{ simplefile('apache-ssl-cert', '${apache-conf-ssl:cert}', ssl_parameter_dict['cert']) }}
{% else %}
recipe = plone.recipe.command
command = "{{ parameter_dict['openssl'] }}/bin/openssl" req -newkey rsa -batch -new -x509 -days 3650 -nodes -keyout "${:key}" -out "${:cert}"
key = ${apache-conf-ssl:key}
cert = ${apache-conf-ssl:cert}
{%- endif %}
[apache-ssl-client]
{% if ssl_parameter_dict.get('ca-cert') -%}
cert = ${apache-ssl-ca:rendered}
crl = ${apache-ssl-crl:rendered}
{{ simplefile('apache-ssl-ca', '${apache-conf-ssl:ca-cert}', ssl_parameter_dict['ca-cert']) }}
{{ simplefile('apache-ssl-crl', '${apache-conf-ssl:crl}', ssl_parameter_dict['crl']) }}
{% else %}
cert =
crl =
{%- endif %}
[logrotate-apache]
recipe = slapos.cookbook:logrotate.d
logrotate-entries = ${logrotate:logrotate-entries}
backup = ${logrotate:backup}
name = apache
log = ${apache-conf-parameter-dict:error-log} ${apache-conf-parameter-dict:access-log}
post = {{ parameter_dict['bin-directory'] }}/slapos-kill --pidfile ${apache-conf-parameter-dict:pid-file} -s USR1
[directory]
recipe = slapos.cookbook:mkdirectory
apache-conf = ${:etc}/apache
bin = ${buildout:directory}/bin
etc = ${buildout:directory}/etc
promise = ${directory:etc}/promise
services = ${:etc}/run
var = ${buildout:directory}/var
run = ${:var}/run
log = ${:var}/log
ca-dir = ${buildout:directory}/srv/ssl
requests = ${:ca-dir}/requests
private = ${:ca-dir}/private
certs = ${:ca-dir}/certs
newcerts = ${:ca-dir}/newcerts
crl = ${:ca-dir}/crl
[buildout]
extends = {{ logrotate_cfg }}
parts +=
publish
logrotate-apache
{{ part_list | join('\n ') }}
{% if software_type == slap_software_type -%}
{% set json = json_module.loads(parameter_dict.get('cloudooo-json', '{}')) -%}
{% set bin_directory = parameter_dict['buildout-bin-directory'] -%}
{% set use_ipv6 = slapparameter_dict.get('use-ipv6', False) -%}
[buildout]
parts =
publish-cloudooo-connection-information
......@@ -8,21 +7,24 @@ parts =
resiliency-exclude-file
promise
promise-openoffice
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
offline = true
{% if use_ipv6 %}promise-tunnel{% endif %}
[publish-cloudooo-connection-information]
recipe = slapos.cookbook:publishurl
recipe = slapos.cookbook:publish.serialised
{% if use_ipv6 -%}
url = cloudooo://[${ipv6toipv4:ipv6}]:${ipv6toipv4:ipv6-port}/
{% else -%}
url = cloudooo://${cloudooo-instance:ip}:${cloudooo-instance:port}/
{% endif -%}
[cloudooo-instance]
recipe = slapos.cookbook:generic.cloudooo
# Network options
ip = ${slap-network-information:local-ipv4}
port = 23000
openoffice-port = 23060
ip = {{ (ipv4_set | list)[0] }}
{% set tcpv4_port = slapparameter_dict['tcpv4-port'] -%}
port = {{ tcpv4_port }}
openoffice-port = {{ tcpv4_port + 1 }}
# Paths
configuration-file = ${directory:etc}/cloudooo.cfg
......@@ -46,7 +48,8 @@ recipe = slapos.cookbook:fontconfig
conf-path = ${directory:etc}/font.conf
font-system-folder = {{ parameter_dict['fonts'] }}
font-folder = ${directory:font}
url-list = {{ json.get('font_url_list', []) | join(' ') }}
{# XXX: violates "instanciation happens offline" rule -#}
url-list = {{ dumps(slapparameter_dict.get('font-url-list', []) | join(' ')) }}
service-folder = ${directory:service}
onetimedownload_path = {{ bin_directory }}/onetimedownload
......@@ -68,6 +71,25 @@ path = ${directory:promise}/openoffice
hostname = ${cloudooo-instance:ip}
port = ${cloudooo-instance:openoffice-port}
{% if use_ipv6 -%}
[promise-tunnel]
recipe = slapos.cookbook:check_port_listening
path = ${directory:promise}/tunnel
hostname = ${ipv6toipv4:ipv6}
port = ${ipv6toipv4:ipv6-port}
[ipv6toipv4]
recipe = slapos.cookbook:ipv6toipv4
runner-path = ${directory:service}/${:base-name}
6tunnel-path = {{ parameter_dict['6tunnel'] }}/bin/6tunnel
shell-path = {{ parameter_dict['dash'] }}/bin/dash
ipv4 = ${cloudooo-instance:ip}
ipv6 = {{ (ipv6_set | list)[0] }}
ipv6-port = ${cloudooo-instance:port}
ipv4-port = ${cloudooo-instance:port}
base-name = cloudooo-tunnel
{% endif -%}
# rest of parts are candidates for some generic stuff
[directory]
recipe = slapos.cookbook:mkdirectory
......@@ -77,4 +99,3 @@ service = ${:etc}/run
promise = ${:etc}/promise
cloudooo-data = ${:srv}/cloudooo
font = ${:srv}/font
{% endif %}
[directory]
recipe = slapos.cookbook:mkdirectory
etc = ${buildout:directory}/etc
services = ${:etc}/run
promise = ${:etc}/promise
[erp5-bootstrap]
recipe = slapos.cookbook:erp5.bootstrap
runner-path = ${directory:services}/erp5-bootstrap
{# Note: a random domain name will be picked if several point to the same IP -#}
{% set reverse_hosts = {} -%}
{% for x, y in publish['hosts-dict'].iteritems() -%}
{% do reverse_hosts.__setitem__(y, x) -%}
{% endfor -%}
{# XXX: Expect the first database to be the one to use for catalog. -#}
{% set mysql_parsed = urlparse.urlparse(publish['mariadb-database-list'][0]) -%}
mysql-url = {{ dumps(urlparse.urlunparse(mysql_parsed[:1] + (mysql_parsed.username + ":" + mysql_parsed.password + "@" + reverse_hosts.get(mysql_parsed.hostname, mysql_parsed.hostname) + ':' ~ mysql_parsed.port, ) + mysql_parsed[2:])) }}
{# Pick the first http[s] family found, they should be all equivalent anyway. -#}
{% set family_list = [] -%}
{% for key, value in publish.items() -%}
{% if key.startswith('family-') and value.startswith('http') -%}
{% do family_list.append(value.split('://', 1)) -%}
{% endif -%}
{% endfor -%}
zope-url = {{ dumps(family_list[0][0] + '://' + publish['inituser-login'] + ':' + publish['inituser-password'] + '@' + family_list[0][1] + '/' + publish['site-id']) }}
[promise-erp5-site]
recipe = slapos.cookbook:check_url_available
url = ${erp5-bootstrap:zope-url}
path = ${directory:promise}/erp5-site
dash_path = {{ parameter_dict['dash-location'] }}/bin/dash
curl_path = {{ parameter_dict['curl-location'] }}/bin/curl
[buildout]
parts = promise-erp5-site
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
{# To create the script (wrapper) which creates the ERP5Site object, pieces
# of what is published by extended file are required. Because they are not
# available at the time the file you are reading is rendered, and because
# those values are composed (lists, dicts...) of which items are needed,
# they cannot be accessed. Instead, make buildout provide these values to
# a second template, rendered at a convenient time.
-#}
[instance-create-erp5-site-real-parameters]
dash-location = {{ parameter_dict['dash-location'] }}
curl-location = {{ parameter_dict['curl-location'] }}
[instance-create-erp5-site-real]
recipe = slapos.recipe.template:jinja2
template = {{ parameter_dict['template-create-erp5-site-real'] }}
rendered = ${buildout:directory}/instance-create-erp5-site-real.cfg
extensions = jinja2.ext.do
context =
import urlparse urlparse
section publish publish
section parameter_dict instance-create-erp5-site-real-parameters
key eggs_directory buildout:eggs-directory
key develop_eggs_directory buildout:develop-eggs-directory
[instance-create-erp5-site-real-run]
recipe = slapos.recipe.build
script =
import subprocess, sys
subprocess.check_call([
sys.argv[0],
"buildout:directory=${buildout:directory}",
"buildout:installed=.installed-${:_buildout_section_name_}.cfg",
"-Uoc", self.options['run'],
])
run = ${instance-create-erp5-site-real:rendered}
slapos_promise =
[buildout]
extends = {{ parameter_dict['instance-erp5'] }}
parts +=
instance-create-erp5-site-real-run
{% if slap_software_type == software_type -%}
#############################
#
# Request erp5 production environnment
#
#############################
[buildout]
parts =
request-tidstorage
basedirectory
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
offline = true
versions = versions
[request-common]
recipe = slapos.cookbook:request
software-url = ${slap-connection:software-release-url}
return = url
server-url = ${slap-connection:server-url}
key-file = ${slap-connection:key-file}
cert-file = ${slap-connection:cert-file}
computer-id = ${slap-connection:computer-id}
partition-id = ${slap-connection:partition-id}
[request-mariadb]
<=request-common
name = MariaDB DataBase
software-type = mariadb
config-mariadb-json = ${slap-parameter:mariadb-json}
sla-computer_guid = ${slap-parameter:mariadb-computer-guid}
[request-cloudooo]
<=request-common
name = Cloudooo
config-cloudooo-json = ${slap-parameter:cloudooo-json}
software-type = cloudooo
sla-computer_guid = ${slap-parameter:cloudooo-computer-guid}
[request-memcached]
<=request-common
name = Memcached
software-type = memcached
sla-computer_guid = ${slap-parameter:memcached-computer-guid}
[request-kumofs]
<=request-common
name = KumoFS
software-type = kumofs
sla-computer_guid = ${slap-parameter:kumofs-computer-guid}
[request-tidstorage]
<=request-common
name = TidStorage
return = url-login
config-json = ${slap-parameter:json}
config-mysql-url = ${request-mariadb:connection-url}
config-memcached-url = ${request-memcached:connection-url}
config-cloudooo-url = ${request-cloudooo:connection-url}
config-kumofs-url = ${request-kumofs:connection-url}
config-bt5 = ${slap-parameter:bt5}
config-bt5-repository-url = ${slap-parameter:bt5-repository-url}
config-smtp-url = ${slap-parameter:smtp-url}
software-type = tidstorage
sla-computer_guid = ${slap-parameter:tidstorage-computer-guid}
[request-varnish]
<=request-common
name = Varnish
config-tidstorage-url = ${request-tidstorage:connection-url-login}
config-web-checker-mail-address = ${slap-parameter:web-checker-mail-address}
config-web-checker-smtp-host = ${slap-parameter:web-checker-smtp-host}
software-type = varnish
sla-computer_guid = ${slap-parameter:varnish-computer-guid}
[slap-parameter]
# Default value if no computer_guid is specified for each type
mariadb-computer-guid = ${slap-connection:computer-id}
cloudooo-computer-guid = ${slap-connection:computer-id}
memcached-computer-guid = ${slap-connection:computer-id}
kumofs-computer-guid = ${slap-connection:computer-id}
tidstorage-computer-guid = ${slap-connection:computer-id}
varnish-computer-guid = ${slap-connection:computer-id}
cloudooo-json =
bt5 = {{ bt5_list }}
bt5-repository-url = {{ local_bt5_repository }}
smtp-url = smtp://localhost:25/
[basedirectory]
recipe = slapos.cookbook:mkdirectory
etc = ${buildout:directory}/etc/run
{% endif %}
[versions]
slapos.core = {{ slapos_core_version }}
#############################
#
# Request erp5 development environnment
#
#############################
[buildout]
extends = {{ template_zope }}
parts +=
request-mariadb
request-cloudooo
request-memcached
request-kumofs
basedirectory
test-runner
erp5-bootstrap
erp5-promise
promise-erp5-site
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
offline = true
versions = versions
[slap-parameter]
# default site id
site-id = erp5
cloudooo-json =
promise-path = ${rootdirectory:etc}/erp5promise.cfg
bt5 = {{ bt5_list }}
bt5-repository-url = {{ local_bt5_repository }}
smtp-url = smtp://localhost:25/
[test-runner]
recipe = slapos.cookbook:erp5.test
certificate-authority-path = ${test-certificate-authority:ca-dir}
mysql-url = ${request-mariadb:connection-url}
kumofs-url = ${request-kumofs:connection-url}
memcached-url = ${request-memcached:connection-url}
cloudooo-url = ${request-cloudooo:connection-url}
test-instance-path = ${directory:unit-test-path}
prepend-path = ${buildout:bin-directory}
run-unit-test = ${buildout:bin-directory}/runUnitTest
run-test-suite = ${buildout:bin-directory}/runTestSuite
openssl-binary = ${test-certificate-authority:openssl-binary}
run-unit-test-binary = {{ bin_directory }}/runUnitTest
run-test-suite-binary = {{ bin_directory }}/runTestSuite
[test-certificate-authority]
recipe = slapos.cookbook:certificate_authority
openssl-binary = {{ openssl_location }}/bin/openssl
ca-dir = ${directory:test-ca-dir}
requests-directory = ${test-cadirectory:requests}
wrapper = ${basedirectory:services}/test-ca
ca-private = ${test-cadirectory:private}
ca-certs = ${test-cadirectory:certs}
ca-newcerts = ${test-cadirectory:newcerts}
ca-crl = ${test-cadirectory:crl}
[test-cadirectory]
recipe = slapos.cookbook:mkdirectory
requests = ${directory:test-ca-dir}/requests
private = ${directory:test-ca-dir}/private
certs = ${directory:test-ca-dir}/certs
newcerts = ${directory:test-ca-dir}/newcerts
crl = ${directory:test-ca-dir}/crl
[erp5-bootstrap]
recipe = slapos.cookbook:erp5.bootstrap
runner-path = ${basedirectory:services}/erp5-bootstrap
mysql-url = ${request-mariadb:connection-url}
zope-url = http://${zope-instance:user}:${zope-instance:password}@${zope-instance:ip}:${zope-instance:port}/${slap-parameter:site-id}
[erp5-promise]
recipe = slapos.cookbook:erp5.promise
promise-path = ${slap-parameter:promise-path}
kumofs-url = ${request-kumofs:connection-url}
memcached-url = ${request-memcached:connection-url}
cloudooo-url = ${request-cloudooo:connection-url}
smtp-url = ${slap-parameter:smtp-url}
bt5 = ${slap-parameter:bt5}
bt5-repository-url = ${slap-parameter:bt5-repository-url}
[request-common]
recipe = slapos.cookbook:request
software-url = ${slap-connection:software-release-url}
sla-computer_guid = ${slap-connection:computer-id}
return = url
server-url = ${slap-connection:server-url}
key-file = ${slap-connection:key-file}
cert-file = ${slap-connection:cert-file}
computer-id = ${slap-connection:computer-id}
partition-id = ${slap-connection:partition-id}
[request-mariadb]
<=request-common
name = MariaDB DataBase
software-type = mariadb
[request-cloudooo]
<=request-common
name = Cloudooo
config-cloudooo-json = ${slap-parameter:cloudooo-json}
software-type = cloudooo
[request-memcached]
<=request-common
name = Memcached
software-type = memcached
[request-kumofs]
<=request-common
name = KumoFS
software-type = kumofs
[zope-instance]
promise-path = ${slap-parameter:promise-path}
site-id = ${slap-parameter:site-id}
[directory]
test-ca-dir = ${rootdirectory:srv}/test-ca
test-instance-path = ${rootdirectory:srv}/test-instance
unit-test-path = ${:test-instance-path}/unit_test
[promise-erp5-site]
recipe = slapos.cookbook:check_url_available
path = ${basedirectory:promises}/erp5site
url = http://${zope-instance:ip}:${zope-instance:port}/${slap-parameter:site-id}
dash_path = {{ dash_location }}/bin/dash
curl_path = {{ curl_location }}/bin/curl
[versions]
slapos.core = {{ slapos_core_version }}
This diff is collapsed.
{% set use_ipv6 = slapparameter_dict.get('use-ipv6', False) -%}
[buildout]
parts =
extends = {{ logrotate_cfg }}
parts +=
publish-kumofs-connection-information
kumofs-instance
logrotate
logrotate-entry-kumofs
cron
cron-entry-logrotate
resiliency-exclude-file
promise-kumofs-server
promise-kumofs-server-listen
promise-kumofs-gateway
promise-kumofs-manager
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
offline = true
[publish-kumofs-connection-information]
recipe = slapos.cookbook:publishurl
recipe = slapos.cookbook:publish.serialised
{% if use_ipv6 -%}
url = memcached://[${kumofs-instance:ip}]:${kumofs-instance:gateway-port}/
{% else -%}
url = memcached://${kumofs-instance:ip}:${kumofs-instance:gateway-port}/
{% endif -%}
[kumofs-instance]
recipe = slapos.cookbook:generic.kumofs
# Network options
ip = ${slap-network-information:local-ipv4}
manager-port = 13101
server-port = 13201
server-listen-port = 13202
gateway-port = 13301
{% if use_ipv6 -%}
ip = {{ (ipv6_set | list)[0] }}
address-family = inet6
{% else -%}
ip = {{ (ipv4_set | list)[0] }}
address-family = inet4
{% endif -%}
{% set tcpv4_port = slapparameter_dict['tcpv4-port'] -%}
manager-port = {{ tcpv4_port }}
server-port = {{ tcpv4_port + 1 }}
server-listen-port = {{ tcpv4_port + 2 }}
gateway-port = {{ tcpv4_port + 3 }}
# Paths: Data
{% set ram_storage_size = slapparameter_dict.get('ram-storage-size') -%}
{% if ram_storage_size -%}
data-path = *#capsiz={{ ram_storage_size }}m
{% else -%}
# (with 10M buckets and HDBTLARGE option)
data-path = ${directory:kumofs-data}/kumodb.tch#bnum=10485760#opts=l
{% endif -%}
# Paths: Running wrappers
gateway-wrapper = ${basedirectory:services}/kumofs_gateway
manager-wrapper = ${basedirectory:services}/kumofs_manager
server-wrapper = ${basedirectory:services}/kumofs_server
# Paths: Data (with 10M buckets and HDBTLARGE option)
data-path = ${directory:kumofs-data}/kumodb.tch#bnum=10485760#opts=l
gateway-wrapper = ${directory:services}/kumofs_gateway
manager-wrapper = ${directory:services}/kumofs_manager
server-wrapper = ${directory:services}/kumofs_server
# Paths: Logs
kumo-gateway-log = ${basedirectory:log}/kumo-gateway.log
kumo-manager-log = ${basedirectory:log}/kumo-manager.log
kumo-server-log = ${basedirectory:log}/kumo-server.log
kumo-gateway-log = ${directory:log}/kumo-gateway.log
kumo-manager-log = ${directory:log}/kumo-manager.log
kumo-server-log = ${directory:log}/kumo-server.log
# Binary information
kumo-gateway-binary = {{ kumo_location }}/bin/kumo-gateway
kumo-manager-binary = {{ kumo_location }}/bin/kumo-manager
kumo-server-binary = {{ kumo_location }}/bin/kumo-server
shell-path = {{ dash_location }}/bin/dash
kumo-gateway-binary = {{ parameter_dict['kumo-location'] }}/bin/kumo-gateway
kumo-manager-binary = {{ parameter_dict['kumo-location'] }}/bin/kumo-manager
kumo-server-binary = {{ parameter_dict['kumo-location'] }}/bin/kumo-server
shell-path = {{ parameter_dict['dash-location'] }}/bin/dash
[logrotate-entry-kumofs]
<= logrotate
recipe = slapos.cookbook:logrotate.d
logrotate-entries = ${logrotate:logrotate-entries}
backup = ${logrotate:backup}
name = kumofs
log = ${kumofs-instance:kumo-gateway-log} ${kumofs-instance:kumo-manager-log}
${kumofs-instance:kumo-server-log}
# rest of parts are candidates for some generic stuff
[logrotate]
recipe = slapos.cookbook:logrotate
# Binaries
logrotate-binary = {{ logrotate_location }}/usr/sbin/logrotate
gzip-binary = {{ gzip_location }}/bin/gzip
gunzip-binary = {{ gzip_location }}/bin/gunzip
# Directories
wrapper = ${rootdirectory:bin}/logrotate
conf = ${rootdirectory:etc}/logrotate.conf
logrotate-entries = ${directory:logrotate-entries}
backup = ${directory:logrotate-backup}
state-file = ${rootdirectory:srv}/logrotate.status
[basedirectory]
recipe = slapos.cookbook:mkdirectory
log = ${rootdirectory:var}/log
services = ${rootdirectory:etc}/run
promise = ${rootdirectory:etc}/promise
run = ${rootdirectory:var}/run
backup = ${rootdirectory:srv}/backup
log = ${kumofs-instance:kumo-gateway-log} ${kumofs-instance:kumo-manager-log} ${kumofs-instance:kumo-server-log}
[directory]
recipe = slapos.cookbook:mkdirectory
cron-entries = ${rootdirectory:etc}/cron.d
crontabs = ${rootdirectory:etc}/crontabs
cronstamps = ${rootdirectory:etc}/cronstamps
logrotate-backup = ${basedirectory:backup}/logrotate
logrotate-entries = ${rootdirectory:etc}/logrotate.d
kumofs-data = ${rootdirectory:srv}/kumofs
[rootdirectory]
recipe = slapos.cookbook:mkdirectory
etc = ${buildout:directory}/etc
var = ${buildout:directory}/var
log = ${buildout:directory}/var/log
services = ${buildout:directory}/etc/run
promise = ${buildout:directory}/etc/promise
srv = ${buildout:directory}/srv
bin = ${buildout:directory}/bin
[cron]
recipe = slapos.cookbook:cron
dcrond-binary = {{ dcron_location }}/sbin/crond
cron-entries = ${directory:cron-entries}
crontabs = ${directory:crontabs}
cronstamps = ${directory:cronstamps}
catcher = ${cron-simplelogger:wrapper}
binary = ${basedirectory:services}/crond
[cron-simplelogger]
recipe = slapos.cookbook:simplelogger
wrapper = ${rootdirectory:bin}/cron_simplelogger
log = ${basedirectory:log}/cron.log
[cron-entry-logrotate]
<= cron
recipe = slapos.cookbook:cron.d
name = logrotate
frequency = 0 0 * * *
command = ${logrotate:wrapper}
kumofs-data = ${:srv}/kumofs
[resiliency-exclude-file]
# Generate rdiff exclude file in case of resiliency
recipe = collective.recipe.template
input = inline: **
output = ${rootdirectory:srv}/exporter.exclude
output = ${directory:srv}/exporter.exclude
# Deploy zope promises scripts
[promise-template]
......@@ -130,20 +89,20 @@ port = ${kumofs-instance:server-listen-port}
[promise-kumofs-server]
<= promise-template
path = ${basedirectory:promise}/kumofs-server
path = ${directory:promise}/kumofs-server
port = ${kumofs-instance:server-port}
[promise-kumofs-server-listen]
<= promise-template
path = ${basedirectory:promise}/kumofs-server-listen
path = ${directory:promise}/kumofs-server-listen
port = ${kumofs-instance:server-listen-port}
[promise-kumofs-gateway]
<= promise-template
path = ${basedirectory:promise}/kumofs-gateway
path = ${directory:promise}/kumofs-gateway
port = ${kumofs-instance:gateway-port}
[promise-kumofs-manager]
<= promise-template
path = ${basedirectory:promise}/kumofs-manager
path = ${directory:promise}/kumofs-manager
port = ${kumofs-instance:manager-port}
This diff is collapsed.
# memcached-compatible volatile cache using kumofs
# that has no limitation for key length and data size
[buildout]
parts =
publish-kumofs-connection-information
kumofs-instance
logrotate
logrotate-entry-kumofs
cron
cron-entry-logrotate
resiliency-exclude-file
promise-kumofs-server
promise-kumofs-server-listen
promise-kumofs-gateway
promise-kumofs-manager
eggs-directory = {{ eggs_directory }}
develop-eggs-directory = {{ develop_eggs_directory }}
offline = true
[publish-kumofs-connection-information]
recipe = slapos.cookbook:publishurl
url = memcached://${kumofs-instance:ip}:${kumofs-instance:gateway-port}/
[kumofs-instance]
recipe = slapos.cookbook:generic.kumofs
# Network options
ip = ${slap-network-information:local-ipv4}
manager-port = 13401
server-port = 13501
server-listen-port = 13502
# previous memcached configuration
gateway-port = 11000
# previous memcached configuration
storage-size = 64m
# Paths: Running wrappers
gateway-wrapper = ${basedirectory:services}/volatile_kumofs_gateway
manager-wrapper = ${basedirectory:services}/volatile_kumofs_manager
server-wrapper = ${basedirectory:services}/volatile_kumofs_server
# Paths: Data
data-path = *#capsiz=${:storage-size}
# Paths: Logs
kumo-gateway-log = ${basedirectory:log}/kumo-gateway.log
kumo-manager-log = ${basedirectory:log}/kumo-manager.log
kumo-server-log = ${basedirectory:log}/kumo-server.log
# Binary information
kumo-gateway-binary = {{ kumo_location }}/bin/kumo-gateway
kumo-manager-binary = {{ kumo_location }}/bin/kumo-manager
kumo-server-binary = {{ kumo_location }}/bin/kumo-server
shell-path = {{ dash_location }}/bin/dash
[logrotate-entry-kumofs]
<= logrotate
recipe = slapos.cookbook:logrotate.d
name = kumofs
log = ${kumofs-instance:kumo-gateway-log} ${kumofs-instance:kumo-manager-log}
${kumofs-instance:kumo-server-log}
# rest of parts are candidates for some generic stuff
[logrotate]
recipe = slapos.cookbook:logrotate
# Binaries
logrotate-binary = {{ logrotate_location }}/usr/sbin/logrotate
gzip-binary = {{ gzip_location }}/bin/gzip
gunzip-binary = {{ gzip_location }}/bin/gunzip
# Directories
wrapper = ${rootdirectory:bin}/logrotate
conf = ${rootdirectory:etc}/logrotate.conf
logrotate-entries = ${directory:logrotate-entries}
backup = ${directory:logrotate-backup}
state-file = ${rootdirectory:srv}/logrotate.status
[basedirectory]
recipe = slapos.cookbook:mkdirectory
log = ${rootdirectory:var}/log
services = ${rootdirectory:etc}/run
promise = ${rootdirectory:etc}/promise
run = ${rootdirectory:var}/run
backup = ${rootdirectory:srv}/backup
[directory]
recipe = slapos.cookbook:mkdirectory
cron-entries = ${rootdirectory:etc}/cron.d
crontabs = ${rootdirectory:etc}/crontabs
cronstamps = ${rootdirectory:etc}/cronstamps
logrotate-backup = ${basedirectory:backup}/logrotate
logrotate-entries = ${rootdirectory:etc}/logrotate.d
[rootdirectory]
recipe = slapos.cookbook:mkdirectory
etc = ${buildout:directory}/etc
var = ${buildout:directory}/var
srv = ${buildout:directory}/srv
bin = ${buildout:directory}/bin
[cron]
recipe = slapos.cookbook:cron
dcrond-binary = {{ dcron_location }}/sbin/crond
cron-entries = ${directory:cron-entries}
crontabs = ${directory:crontabs}
cronstamps = ${directory:cronstamps}
catcher = ${cron-simplelogger:wrapper}
binary = ${basedirectory:services}/crond
[cron-simplelogger]
recipe = slapos.cookbook:simplelogger
wrapper = ${rootdirectory:bin}/cron_simplelogger
log = ${basedirectory:log}/cron.log
[cron-entry-logrotate]
<= cron
recipe = slapos.cookbook:cron.d
name = logrotate
frequency = 0 0 * * *
command = ${logrotate:wrapper}
[resiliency-exclude-file]
# Generate rdiff exclude file in case of resiliency
recipe = collective.recipe.template
input = inline: **
output = ${rootdirectory:srv}/exporter.exclude
# Deploy zope promises scripts
[promise-template]
recipe = slapos.cookbook:check_port_listening
hostname = ${kumofs-instance:ip}
port = ${kumofs-instance:server-listen-port}
[promise-kumofs-server]
<= promise-template
path = ${basedirectory:promise}/kumofs-server
port = ${kumofs-instance:server-port}
[promise-kumofs-server-listen]
<= promise-template
path = ${basedirectory:promise}/kumofs-server-listen
port = ${kumofs-instance:server-listen-port}
[promise-kumofs-gateway]
<= promise-template
path = ${basedirectory:promise}/kumofs-gateway
port = ${kumofs-instance:gateway-port}
[promise-kumofs-manager]
<= promise-template
path = ${basedirectory:promise}/kumofs-manager
port = ${kumofs-instance:manager-port}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
USE mysql;
DROP FUNCTION IF EXISTS last_insert_grn_id;
{% set mroonga = parameter_dict.get('mroonga', 'ha_mroonga.so') -%}
{% if mroonga %}CREATE FUNCTION last_insert_grn_id RETURNS INTEGER SONAME '{{ mroonga }}';{% endif %}
DROP FUNCTION IF EXISTS sphinx_snippets;
#CREATE FUNCTION sphinx_snippets RETURNS STRING SONAME 'ha_sphinx.so';
{% macro database(name, user, password) -%}
CREATE DATABASE IF NOT EXISTS {{ name }};
{% if user -%}
GRANT ALL PRIVILEGES ON {{ name }}.* TO {{ user }}@'%' IDENTIFIED BY '{{ password }}';
GRANT ALL PRIVILEGES ON {{ name }}.* TO {{ user }}@'localhost' IDENTIFIED BY '{{ password }}';
{%- endif %}
{% endmacro -%}
{% for entry in parameter_dict['database-list'] -%}
{{ database(entry['name'], entry.get('user'), entry.get('password')) }}
{% endfor -%}
This diff is collapsed.
#!{{ parameter_dict['shell-path'] }}
HOSTALIASES='{{ parameter_dict['hostaliases'] }}' HOSTS='{{ parameter_dict['hosts'] }}' exec '{{ parameter_dict['userhosts'] }}' '{{ parameter_dict['runzope-binary'] }}' "$@"
......@@ -12,13 +12,13 @@
set -e
mysql_executable="${mariadb-instance:mysql-binary}"
mysql_executable="${binary-wrap-mysql:wrapper-path}"
mysqldump_executable="${binary-wrap-mysqldump:wrapper-path}"
mariadb_data_directory="${directory:mariadb-data}"
mariadb_backup_directory="${directory:mariadb-backup-full}"
instance_directory="${buildout:directory}"
pid_file="${mariadb-instance:pid-file}"
binlog_path="${mariadb-instance:binlog-path}"
pid_file="${my-cnf-parameters:pid-file}"
binlog_path="${my-cnf-parameters:binlog-path}"
# Make sure mariadb is not already running
if [ -e "$pid_file" ]; then
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment